Merge tag 'upstream-4.0-rc5' of git://git.infradead.org/linux-ubifs
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 Mar 2015 17:36:44 +0000 (10:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 Mar 2015 17:36:44 +0000 (10:36 -0700)
Pull UBI fix from Artem Bityutskiy:
 "This fixes a bug introduced during the v4.0 merge window where we
  forgot to put braces where they should be"

* tag 'upstream-4.0-rc5' of git://git.infradead.org/linux-ubifs:
  UBI: fix missing brace control flow

1436 files changed:
Documentation/CodeOfConflict [new file with mode: 0644]
Documentation/DocBook/kgdb.tmpl
Documentation/cgroups/unified-hierarchy.txt
Documentation/clk.txt
Documentation/device-mapper/dm-crypt.txt
Documentation/devicetree/bindings/arm/exynos/power_domain.txt
Documentation/devicetree/bindings/arm/sti.txt
Documentation/devicetree/bindings/clock/exynos7-clock.txt
Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
Documentation/devicetree/bindings/clock/qcom,lcc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qoriq-clock.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/clock/ti,cdce706.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/fapll.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-imx.txt
Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
Documentation/devicetree/bindings/i2c/i2c-ocores.txt
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
Documentation/devicetree/bindings/i2c/trivial-devices.txt
Documentation/devicetree/bindings/mips/cavium/cib.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
Documentation/devicetree/bindings/net/apm-xgene-enet.txt
Documentation/devicetree/bindings/power/power_domain.txt
Documentation/devicetree/bindings/serial/8250.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/of-serial.txt [deleted file]
Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
Documentation/devicetree/bindings/submitting-patches.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
Documentation/filesystems/Locking
Documentation/filesystems/dlmfs.txt
Documentation/filesystems/ocfs2.txt
Documentation/filesystems/overlayfs.txt
Documentation/i2c/functionality
Documentation/input/alps.txt
Documentation/power/suspend-and-interrupts.txt
Documentation/x86/zero-page.txt
MAINTAINERS
Makefile
arch/arc/include/asm/processor.h
arch/arc/include/asm/stacktrace.h [new file with mode: 0644]
arch/arc/kernel/process.c
arch/arc/kernel/stacktrace.c
arch/arc/kernel/unaligned.c
arch/arc/mm/fault.c
arch/arm/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am335x-bone.dts
arch/arm/boot/dts/am335x-lxm.dts
arch/arm/boot/dts/am33xx-clocks.dtsi
arch/arm/boot/dts/am437x-idk-evm.dts
arch/arm/boot/dts/am43xx-clocks.dtsi
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm.dts
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/exynos3250.dtsi
arch/arm/boot/dts/exynos4-cpu-thermal.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos4210-trats.dts
arch/arm/boot/dts/exynos4210-universal_c210.dts
arch/arm/boot/dts/exynos4210.dtsi
arch/arm/boot/dts/exynos4212.dtsi
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos4412.dtsi
arch/arm/boot/dts/exynos4x12.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5420-trip-points.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5440-trip-points.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5440.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5-core-thermal.dtsi
arch/arm/boot/dts/omap5-gpu-thermal.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/omap54xx-clocks.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/configs/at91_dt_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/sama5_defconfig
arch/arm/configs/sunxi_defconfig
arch/arm/configs/vexpress_defconfig
arch/arm/crypto/aesbs-core.S_shipped
arch/arm/crypto/bsaes-armv7.pl
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/debug/at91.S
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/setup.c
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/kvm/trace.h
arch/arm/mach-asm9260/Kconfig
arch/arm/mach-at91/Kconfig
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/generic.h
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm.h
arch/arm/mach-at91/pm_slowclock.S
arch/arm/mach-axxia/axxia.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-bcm/brcmstb.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/da8xx-dt.c
arch/arm/mach-davinci/mux.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-hisi/hisilicon.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mmdc.c
arch/arm/mach-ixp4xx/include/mach/io.h
arch/arm/mach-keystone/keystone.c
arch/arm/mach-keystone/pm_domain.c
arch/arm/mach-mmp/time.c
arch/arm/mach-msm/board-halibut.c
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-mvebu/system-controller.c
arch/arm/mach-nspire/nspire.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/cclock3xxx_data.c [deleted file]
arch/arm/mach-omap2/clock.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/clock_common_data.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/dpll44xx.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-omap2/prm.h
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-omap2/prm44xx.c
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-prima2/Kconfig
arch/arm/mach-prima2/common.c
arch/arm/mach-prima2/platsmp.c
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/lpd270.c
arch/arm/mach-realview/core.c
arch/arm/mach-realview/realview_eb.c
arch/arm/mach-rockchip/Kconfig
arch/arm/mach-rockchip/pm.h
arch/arm/mach-s5pv210/s5pv210.c
arch/arm/mach-sa1100/neponset.c
arch/arm/mach-sa1100/pleb.c
arch/arm/mach-shmobile/setup-emev2.c
arch/arm/mach-socfpga/core.h
arch/arm/mach-socfpga/socfpga.c
arch/arm/mach-sti/Kconfig
arch/arm/mach-sti/board-dt.c
arch/arm/mach-tegra/tegra.c
arch/arm/mach-ux500/pm_domains.c
arch/arm/mach-versatile/versatile_dt.c
arch/arm/mach-vexpress/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/pageattr.c
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/arm/foundation-v8.dts
arch/arm64/boot/dts/arm/juno.dts
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/crypto/Makefile
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpuidle.h
arch/arm64/include/asm/insn.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/proc-fns.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/efi.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/head.S
arch/arm64/kernel/insn.c
arch/arm64/kernel/process.c
arch/arm64/kernel/psci-call.S [new file with mode: 0644]
arch/arm64/kernel/psci.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/init.c
arch/arm64/mm/pageattr.c
arch/blackfin/include/asm/bfin_rotary.h [deleted file]
arch/blackfin/mach-bf527/boards/ad7160eval.c
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/c6x/include/asm/pgtable.h
arch/frv/include/asm/pgtable.h
arch/m32r/include/asm/pgtable-2level.h
arch/m68k/include/asm/pgtable_mm.h
arch/metag/include/asm/processor.h
arch/microblaze/kernel/entry.S
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/alchemy/common/clock.c
arch/mips/alchemy/common/setup.c
arch/mips/bcm3384/irq.c
arch/mips/boot/Makefile
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/csrc-octeon.c
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/setup.c
arch/mips/configs/malta_qemu_32r6_defconfig [new file with mode: 0644]
arch/mips/fw/arc/misc.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/atomic.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/compiler.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/cpu-type.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/edac.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/gio_device.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/local.h
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
arch/mips/include/asm/mach-cavium-octeon/war.h
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
arch/mips/include/asm/mips-r2-to-r6-emul.h [new file with mode: 0644]
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/module.h
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
arch/mips/include/asm/octeon/cvmx-rst-defs.h [new file with mode: 0644]
arch/mips/include/asm/octeon/octeon-model.h
arch/mips/include/asm/octeon/octeon.h
arch/mips/include/asm/pci.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/sgialib.h
arch/mips/include/asm/siginfo.h [deleted file]
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/spram.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/siginfo.h
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/elf.c
arch/mips/kernel/entry.S
arch/mips/kernel/genex.S
arch/mips/kernel/idle.c
arch/mips/kernel/mips-r2-to-r6-emul.c [new file with mode: 0644]
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/spram.c
arch/mips/kernel/syscall.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/kvm/tlb.c
arch/mips/kvm/trace.h
arch/mips/lib/Makefile
arch/mips/lib/memcpy.S
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-r4k.c
arch/mips/mm/fault.c
arch/mips/mm/page.c
arch/mips/mm/sc-mips.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c
arch/mips/mm/uasm-mips.c
arch/mips/mm/uasm.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/pci/pci-bcm1480.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pcie-octeon.c
arch/mips/sgi-ip22/ip22-gio.c
arch/mips/sgi-ip27/ip27-reset.c
arch/mips/sgi-ip32/ip32-reset.c
arch/mn10300/include/asm/pgtable.h
arch/nios2/include/asm/ptrace.h
arch/nios2/include/asm/ucontext.h [deleted file]
arch/nios2/include/uapi/asm/Kbuild
arch/nios2/include/uapi/asm/elf.h
arch/nios2/include/uapi/asm/ptrace.h
arch/nios2/include/uapi/asm/sigcontext.h
arch/nios2/kernel/signal.c
arch/nios2/mm/fault.c
arch/parisc/include/asm/pgtable.h
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/include/asm/iommu.h
arch/powerpc/include/asm/irq_work.h [new file with mode: 0644]
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/iommu.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/topology.h
arch/s390/kernel/cache.c
arch/s390/kernel/early.c
arch/s390/kernel/jump_label.c
arch/s390/kernel/module.c
arch/s390/kernel/processor.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/mm/mmap.c
arch/s390/pci/pci.c
arch/s390/pci/pci_mmio.c
arch/sparc/Kconfig
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/starfire.h
arch/sparc/kernel/entry.h
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/starfire.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/traps_64.c
arch/sparc/mm/init_64.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/efi_stub_64.S
arch/x86/boot/compressed/efi_thunk_64.S [new file with mode: 0644]
arch/x86/crypto/aesni-intel_glue.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/fpu-internal.h
arch/x86/include/asm/imr.h [new file with mode: 0644]
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/xsave.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/microcode/intel_early.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/irq.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/kvm.c
arch/x86/kernel/traps.c
arch/x86/kernel/uprobes.c
arch/x86/kernel/xsave.c
arch/x86/kvm/emulate.c
arch/x86/kvm/i8259.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/Kconfig
arch/x86/mm/init.c
arch/x86/mm/mmap.c
arch/x86/pci/acpi.c
arch/x86/platform/Makefile
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/efi_thunk_64.S
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-quark/Makefile [new file with mode: 0644]
arch/x86/platform/intel-quark/imr.c [new file with mode: 0644]
arch/x86/platform/intel-quark/imr_selftest.c [new file with mode: 0644]
arch/x86/vdso/vdso32/sigreturn.S
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/spinlock.c
block/blk-throttle.c
drivers/acpi/acpi_lpss.c
drivers/acpi/ec.c
drivers/acpi/resource.c
drivers/acpi/video.c
drivers/android/binder.c
drivers/ata/sata_fsl.c
drivers/base/power/domain.c
drivers/base/power/wakeup.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-irq.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/btusb.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_ibmvtpm.h
drivers/char/virtio_console.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/at91/clk-programmable.c
drivers/clk/at91/pmc.c
drivers/clk/at91/pmc.h
drivers/clk/bcm/clk-kona.c
drivers/clk/clk-asm9260.c [new file with mode: 0644]
drivers/clk/clk-cdce706.c [new file with mode: 0644]
drivers/clk/clk-composite.c
drivers/clk/clk-divider.c
drivers/clk/clk-gate.c
drivers/clk/clk-mux.c
drivers/clk/clk-ppc-corenet.c [deleted file]
drivers/clk/clk-qoriq.c [new file with mode: 0644]
drivers/clk/clk.c
drivers/clk/clk.h
drivers/clk/clkdev.c
drivers/clk/hisilicon/clk-hi3620.c
drivers/clk/mmp/clk-mix.c
drivers/clk/pxa/Makefile
drivers/clk/pxa/clk-pxa.c
drivers/clk/pxa/clk-pxa3xx.c [new file with mode: 0644]
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/clk-pll.c
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-regmap-divider.c [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-divider.h [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-mux.c [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-mux.h [new file with mode: 0644]
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/qcom/gcc-msm8960.c
drivers/clk/qcom/lcc-ipq806x.c [new file with mode: 0644]
drivers/clk/qcom/lcc-msm8960.c [new file with mode: 0644]
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/samsung/clk-exynos3250.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos4415.c
drivers/clk/samsung/clk-exynos7.c
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/shmobile/Makefile
drivers/clk/shmobile/clk-div6.c
drivers/clk/shmobile/clk-r8a73a4.c [new file with mode: 0644]
drivers/clk/shmobile/clk-rcar-gen2.c
drivers/clk/st/clk-flexgen.c
drivers/clk/st/clkgen-mux.c
drivers/clk/sunxi/Makefile
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/sunxi/clk-sun9i-core.c
drivers/clk/sunxi/clk-sun9i-mmc.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/tegra/Makefile
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-periph.c
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk.c
drivers/clk/ti/Makefile
drivers/clk/ti/clk-3xxx-legacy.c [new file with mode: 0644]
drivers/clk/ti/clk-3xxx.c
drivers/clk/ti/clk-44xx.c
drivers/clk/ti/clk-54xx.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clk-816x.c [new file with mode: 0644]
drivers/clk/ti/clk.c
drivers/clk/ti/clock.h [new file with mode: 0644]
drivers/clk/ti/composite.c
drivers/clk/ti/divider.c
drivers/clk/ti/dpll.c
drivers/clk/ti/fapll.c [new file with mode: 0644]
drivers/clk/ti/gate.c
drivers/clk/ti/interface.c
drivers/clk/ti/mux.c
drivers/clk/ux500/clk-prcc.c
drivers/clk/ux500/clk-prcmu.c
drivers/clk/zynq/clkc.c
drivers/clocksource/Kconfig
drivers/clocksource/mtk_timer.c
drivers/clocksource/pxa_timer.c
drivers/clocksource/time-efm32.c
drivers/clocksource/timer-sun5i.c
drivers/cpufreq/Kconfig.powerpc
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/cpuidle.c
drivers/dma-buf/fence.c
drivers/dma-buf/reservation.c
drivers/dma/at_xdmac.c
drivers/dma/dw/core.c
drivers/dma/ioat/dma_v3.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/qcom_bam_dma.c
drivers/dma/sh/shdmac.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/gpio/gpio-tps65912.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fifo_underrun.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-microsoft.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/hid-tivo.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hwmon/ads7828.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-bcm-iproc.c [new file with mode: 0644]
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-baytrail.c [new file with mode: 0644]
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-pmcmsp.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/ide/ide-tape.c
drivers/iio/adc/mcp3422.c
drivers/iio/adc/qcom-spmi-iadc.c
drivers/iio/common/ssp_sensors/ssp_dev.c
drivers/iio/dac/ad5686.c
drivers/iio/humidity/dht11.c
drivers/iio/humidity/si7020.c
drivers/iio/imu/adis16400_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/light/Kconfig
drivers/iio/magnetometer/Kconfig
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_common.h
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_diag.c
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_eeprom.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_intr.c
drivers/infiniband/hw/qib/qib_keys.c
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mmap.c
drivers/infiniband/hw/qib/qib_mr.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_sd7220.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/qib/qib_twsi.c
drivers/infiniband/hw/qib/qib_tx.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/hw/qib/qib_verbs_mcast.c
drivers/infiniband/hw/qib/qib_wc_x86_64.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/joystick/adi.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/keyboard/tc3589x-keypad.c
drivers/input/misc/bfin_rotary.c
drivers/input/misc/mma8450.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/cyapa_gen3.c
drivers/input/mouse/cyapa_gen5.c
drivers/input/mouse/cypress_ps2.c
drivers/input/mouse/cypress_ps2.h
drivers/input/mouse/focaltech.c
drivers/input/mouse/focaltech.h
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse.h
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/touchscreen/Kconfig
drivers/iommu/Kconfig
drivers/iommu/exynos-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/omap-iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mips-gic.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/icn/icn.c
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/misc/mei/init.c
drivers/mmc/core/pwrseq_simple.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/pxa3xx_nand.c
drivers/net/Kconfig
drivers/net/appletalk/Kconfig
drivers/net/can/Kconfig
drivers/net/can/dev.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/dsa/bcm_sf2.h
drivers/net/ethernet/8390/axnet_cs.c
drivers/net/ethernet/8390/pcnet_cs.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/macvtap.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/usb/Kconfig
drivers/net/usb/asix_devices.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/hso.c
drivers/net/usb/plusb.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/cosa.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/brcm80211/brcmfmac/vendor.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/Kconfig
drivers/of/base.c
drivers/of/overlay.c
drivers/of/unittest.c
drivers/pci/host/pci-versatile.c
drivers/pci/host/pci-xgene.c
drivers/pci/pci-sysfs.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/pinctrl/sunxi/pinctrl-sunxi.h
drivers/platform/x86/Kconfig
drivers/pnp/resource.c
drivers/regulator/core.c
drivers/regulator/da9210-regulator.c
drivers/regulator/rk808-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91sam9.c
drivers/rtc/rtc-ds1685.c
drivers/rtc/rtc-s3c.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk_cluster.c
drivers/scsi/am53c974.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/hpsa.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/sg.c
drivers/scsi/wd719x.c
drivers/sh/pm_runtime.c
drivers/spi/spi-atmel.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw-pci.c
drivers/spi/spi-dw.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-pl022.c
drivers/spi/spi-ti-qspi.c
drivers/staging/comedi/drivers/adv_pci1710.c
drivers/staging/comedi/drivers/comedi_isadma.c
drivers/staging/comedi/drivers/vmk80xx.c
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/resolver/ad2s1200.c
drivers/staging/lustre/lustre/llite/dcache.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/namei.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_core.h [deleted file]
drivers/target/iscsi/iscsi_target_datain_values.c
drivers/target/iscsi/iscsi_target_device.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nodeattrib.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_stat.h [deleted file]
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tq.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/thermal/int340x_thermal/int340x_thermal_zone.c
drivers/thermal/intel_powerclamp.c
drivers/thermal/rcar_thermal.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/thermal_core.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/tty/bfin_jtag_comm.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/of_serial.c
drivers/tty/serial/sprd_serial.c
drivers/tty/tty_io.c
drivers/tty/tty_ioctl.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/devio.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_phonet.c
drivers/usb/gadget/function/f_sourcesink.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/uvc_v4l2.c
drivers/usb/gadget/function/uvc_video.c
drivers/usb/gadget/legacy/g_ffs.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/isp1760/isp1760-hcd.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/omap2430.c
drivers/usb/renesas_usbhs/Kconfig
drivers/usb/serial/bus.c
drivers/usb/serial/ch341.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/mxuport.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/unusual_uas.h
drivers/usb/storage/usb.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vfio/pci/vfio_pci_private.h
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/core/fbmon.c
drivers/video/fbdev/omap2/dss/display-sysfs.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/watchdog/at91sam9_wdt.c
drivers/xen/Makefile
drivers/xen/events/events_base.c
drivers/xen/preempt.c [new file with mode: 0644]
drivers/xen/privcmd.c
drivers/xen/xen-pciback/conf_space.c
drivers/xen/xen-pciback/conf_space.h
drivers/xen/xen-pciback/conf_space_header.c
drivers/xen/xen-scsiback.c
fs/9p/vfs_inode.c
fs/aio.c
fs/autofs4/dev-ioctl.c
fs/autofs4/expire.c
fs/autofs4/root.c
fs/bad_inode.c
fs/binfmt_elf.c
fs/btrfs/ctree.c
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/xattr.c
fs/cachefiles/daemon.c
fs/cachefiles/interface.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/ceph/dir.c
fs/ceph/file.c
fs/coda/dir.c
fs/configfs/configfs_internal.h
fs/configfs/dir.c
fs/configfs/file.c
fs/configfs/inode.c
fs/coredump.c
fs/dcache.c
fs/debugfs/inode.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/exportfs/expfs.c
fs/ext4/ext4.h
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/gfs2/dir.c
fs/hfsplus/dir.c
fs/hppfs/hppfs.c
fs/internal.h
fs/jbd2/recovery.c
fs/jffs2/dir.c
fs/jffs2/super.c
fs/libfs.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/client.c
fs/nfs/delegation.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/filelayout/filelayout.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs3proc.c
fs/nfs/nfs3xdr.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfs/proc.c
fs/nfs/write.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsfh.c
fs/nfsd/vfs.c
fs/nilfs2/btree.c
fs/nilfs2/segment.c
fs/notify/fanotify/fanotify.c
fs/ocfs2/ocfs2.h
fs/ocfs2/ocfs2_fs.h
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/posix_acl.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/task_mmu.c
fs/reiserfs/xattr.c
fs/super.c
fs/xfs/Makefile
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_pnfs.c [new file with mode: 0644]
fs/xfs/xfs_pnfs.h [new file with mode: 0644]
fs/xfs/xfs_qm.c
include/drm/drm_mm.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/dt-bindings/clock/alphascale,asm9260.h [new file with mode: 0644]
include/dt-bindings/clock/exynos4.h
include/dt-bindings/clock/exynos7-clk.h
include/dt-bindings/clock/qcom,gcc-ipq806x.h
include/dt-bindings/clock/qcom,lcc-ipq806x.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,lcc-msm8960.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car-common.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car.h
include/dt-bindings/pinctrl/am33xx.h
include/dt-bindings/pinctrl/am43xx.h
include/kvm/arm_vgic.h
include/linux/clk-private.h [deleted file]
include/linux/clk-provider.h
include/linux/clk.h
include/linux/clk/sunxi.h [deleted file]
include/linux/clk/tegra.h
include/linux/clk/ti.h
include/linux/compiler.h
include/linux/cpuidle.h
include/linux/dcache.h
include/linux/hid-sensor-hub.h
include/linux/i2c.h
include/linux/interrupt.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/mips-gic.h
include/linux/irqdesc.h
include/linux/kasan.h
include/linux/kdb.h
include/linux/mlx4/qp.h
include/linux/module.h
include/linux/moduleloader.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/nvme.h
include/linux/of_platform.h
include/linux/pinctrl/consumer.h
include/linux/platform_data/bfin_rotary.h [new file with mode: 0644]
include/linux/rhashtable.h
include/linux/sched.h
include/linux/serial_core.h
include/linux/skbuff.h
include/linux/spi/spi.h
include/linux/sunrpc/metrics.h
include/linux/thermal.h
include/linux/uio.h
include/linux/usb/serial.h
include/linux/vfio.h
include/linux/vmalloc.h
include/linux/workqueue.h
include/net/caif/cfpkt.h
include/net/dst.h
include/net/netfilter/nf_tables.h
include/net/vxlan.h
include/soc/at91/at91sam9_ddrsdr.h
include/target/iscsi/iscsi_target_core.h [new file with mode: 0644]
include/target/iscsi/iscsi_target_stat.h [new file with mode: 0644]
include/target/iscsi/iscsi_transport.h
include/target/target_core_base.h
include/uapi/linux/nvme.h
include/uapi/linux/prctl.h
include/uapi/linux/serial.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/vfio.h
include/uapi/linux/virtio_blk.h
include/uapi/linux/virtio_scsi.h
include/uapi/rdma/ib_user_verbs.h
include/video/omapdss.h
include/xen/xen-ops.h
include/xen/xenbus.h
kernel/cpuset.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_io.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/irq/manage.c
kernel/irq/pm.c
kernel/livepatch/core.c
kernel/locking/rtmutex.c
kernel/module.c
kernel/printk/console_cmdline.h
kernel/printk/printk.c
kernel/rcu/tree_plugin.h
kernel/sched/auto_group.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/sys.c
kernel/time/ntp.c
kernel/trace/ftrace.c
kernel/workqueue.c
lib/Makefile
lib/iov_iter.c [new file with mode: 0644]
lib/rhashtable.c
lib/seq_buf.c
lib/test_rhashtable.c
mm/Makefile
mm/cma.c
mm/huge_memory.c
mm/hugetlb.c
mm/iov_iter.c [deleted file]
mm/kasan/kasan.c
mm/memcontrol.c
mm/memory.c
mm/mlock.c
mm/nommu.c
mm/page_alloc.c
mm/shmem.c
mm/vmalloc.c
net/9p/trans_virtio.c
net/bridge/br.c
net/bridge/br_if.c
net/caif/caif_socket.c
net/caif/cffrml.c
net/caif/cfpkt_skbuff.c
net/can/af_can.c
net/compat.c
net/core/dev.c
net/core/ethtool.c
net/core/gen_stats.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sysctl_net_core.c
net/decnet/dn_route.c
net/hsr/hsr_device.c
net/hsr/hsr_main.c
net/hsr/hsr_slave.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_input.c
net/ipv4/xfrm4_output.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ping.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/irda/ircomm/ircomm_tty.c
net/irda/irnet/irnet_ppp.c
net/mac80211/chan.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_compat.c
net/netfilter/nft_hash.c
net/netfilter/xt_recent.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/openvswitch/datapath.c
net/openvswitch/flow_netlink.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/rds/iw_rdma.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-error.c
net/rxrpc/ar-recvmsg.c
net/sched/act_bpf.c
net/sched/cls_u32.c
net/sched/ematch.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/cache.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/tipc/link.c
net/tipc/socket.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/reg.c
net/xfrm/xfrm_policy.c
scripts/gdb/linux/__init__.py [new file with mode: 0644]
security/apparmor/include/apparmor.h
security/apparmor/lsm.c
security/apparmor/path.c
security/inode.c
security/selinux/hooks.c
security/smack/smack_lsm.c
security/tomoyo/file.c
sound/core/control.c
sound/core/pcm_native.c
sound/core/seq/seq_midi_emul.c
sound/drivers/opl3/opl3_midi.c
sound/firewire/amdtp.c
sound/firewire/bebob/bebob.c
sound/firewire/bebob/bebob_stream.c
sound/firewire/dice/dice-stream.c
sound/firewire/dice/dice.c
sound/firewire/fireworks/fireworks.c
sound/firewire/fireworks/fireworks_stream.c
sound/firewire/iso-resources.c
sound/firewire/oxfw/oxfw-stream.c
sound/firewire/oxfw/oxfw.c
sound/isa/msnd/msnd_pinnacle_mixer.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_proc.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/rme9652/hdspm.c
sound/soc/atmel/sam9g20_wm8731.c
sound/soc/cirrus/Kconfig
sound/soc/codecs/Kconfig
sound/soc/codecs/adav80x.c
sound/soc/codecs/ak4641.c
sound/soc/codecs/ak4671.c
sound/soc/codecs/cs4271.c
sound/soc/codecs/da732x.c
sound/soc/codecs/es8328.c
sound/soc/codecs/max98357a.c
sound/soc/codecs/pcm1681.c
sound/soc/codecs/rt286.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/rt5677.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sn95031.c
sound/soc/codecs/sta32x.c
sound/soc/codecs/tas5086.c
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8903.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8955.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm9712.c
sound/soc/codecs/wm9713.c
sound/soc/fsl/fsl_spdif.c
sound/soc/fsl/fsl_ssi.c
sound/soc/generic/simple-card.c
sound/soc/intel/sst-atom-controls.h
sound/soc/intel/sst-haswell-dsp.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst-haswell-pcm.c
sound/soc/intel/sst/sst.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/omap/omap-hdmi-audio.c
sound/soc/omap/omap-mcbsp.c
sound/soc/omap/omap-pcm.c
sound/soc/samsung/Kconfig
sound/soc/sh/rcar/core.c
sound/soc/soc-core.c
sound/usb/clock.c
sound/usb/line6/driver.c
sound/usb/line6/driver.h
sound/usb/line6/playback.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/quirks.h
tools/perf/bench/mem-memcpy.c
tools/perf/config/Makefile.arch
tools/perf/config/feature-checks/Makefile
tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
tools/perf/util/annotate.c
tools/perf/util/cloexec.c
tools/perf/util/evlist.h
tools/perf/util/symbol-elf.c
tools/power/cpupower/Makefile
tools/testing/selftests/exec/execveat.c
tools/thermal/tmon/.gitignore [new file with mode: 0644]
tools/thermal/tmon/Makefile
tools/thermal/tmon/tmon.8
tools/thermal/tmon/tmon.c
tools/thermal/tmon/tui.c
virt/kvm/arm/vgic-v2.c
virt/kvm/arm/vgic-v3.c
virt/kvm/arm/vgic.c
virt/kvm/kvm_main.c

diff --git a/Documentation/CodeOfConflict b/Documentation/CodeOfConflict
new file mode 100644 (file)
index 0000000..1684d0b
--- /dev/null
@@ -0,0 +1,27 @@
+Code of Conflict
+----------------
+
+The Linux kernel development effort is a very personal process compared
+to "traditional" ways of developing software.  Your code and ideas
+behind it will be carefully reviewed, often resulting in critique and
+criticism.  The review will almost always require improvements to the
+code before it can be included in the kernel.  Know that this happens
+because everyone involved wants to see the best possible solution for
+the overall success of Linux.  This development process has been proven
+to create the most robust operating system kernel ever, and we do not
+want to do anything to cause the quality of submission and eventual
+result to ever decrease.
+
+If however, anyone feels personally abused, threatened, or otherwise
+uncomfortable due to this process, that is not acceptable.  If so,
+please contact the Linux Foundation's Technical Advisory Board at
+<tab@lists.linux-foundation.org>, or the individual members, and they
+will work to resolve the issue to the best of their ability.  For more
+information on who is on the Technical Advisory Board and what their
+role is, please see:
+       http://www.linuxfoundation.org/programs/advisory-councils/tab
+
+As a reviewer of code, please strive to keep things civil and focused on
+the technical issues involved.  We are all humans, and frustrations can
+be high on both sides of the process.  Try to keep in mind the immortal
+words of Bill and Ted, "Be excellent to each other."
index 2428cc04dbc84b5f7243558296f6f36fa9621fb4..f3abca7ec53d62e4bd416da9f53395aef9269855 100644 (file)
    may be configured as a kernel built-in or a kernel loadable module.
    You can only make use of <constant>kgdbwait</constant> and early
    debugging if you build kgdboc into the kernel as a built-in.
+   </para>
    <para>Optionally you can elect to activate kms (Kernel Mode
    Setting) integration.  When you use kms with kgdboc and you have a
    video driver that has atomic mode setting hooks, it is possible to
    crashes or doing analysis of memory with kdb while allowing the
    full graphics console applications to run.
    </para>
-   </para>
    <sect2 id="kgdbocArgs">
    <title>kgdboc arguments</title>
    <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para>
    </listitem>
    </orderedlist>
    </para>
-   </sect3>
    <para>NOTE: Kgdboc does not support interrupting the target via the
    gdb remote protocol.  You must manually send a sysrq-g unless you
    have a proxy that splits console output to a terminal program.
     as well as on the initial connect, or to use a debugger proxy that
     allows an unmodified gdb to do the debugging.
    </para>
+   </sect3>
    </sect2>
    </sect1>
    <sect1 id="kgdbwait">
    </para>
    </listitem>
    </orderedlist>
+  </para>
    <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
    active system console.  An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
    </para>
    <para>It is possible to use this option with kgdboc on a tty that is not a system console.
    </para>
-  </para>
   </sect1>
    <sect1 id="kgdbreboot">
    <title>Run time parameter: kgdbreboot</title>
index 71daa35ec2d9201d2a4647e0a088af3ab36bbae3..eb102fb722134758a39d7d2e17c1dd00fdc0d799 100644 (file)
@@ -404,8 +404,8 @@ supported and the interface files "release_agent" and
   be understood as an underflow into the highest possible value, -2 or
   -10M etc. do not work, so it's not consistent.
 
-  memory.low, memory.high, and memory.max will use the string
-  "infinity" to indicate and set the highest possible value.
+  memory.low, memory.high, and memory.max will use the string "max" to
+  indicate and set the highest possible value.
 
 5. Planned Changes
 
index 4ff84623d5e16eb42c17f90eb1851eea19b9d82f..0e4f90aa1c136eaa40c9d93e3586bbffdd180de4 100644 (file)
@@ -73,6 +73,8 @@ the operations defined in clk.h:
                                                unsigned long *parent_rate);
                long            (*determine_rate)(struct clk_hw *hw,
                                                unsigned long rate,
+                                               unsigned long min_rate,
+                                               unsigned long max_rate,
                                                unsigned long *best_parent_rate,
                                                struct clk_hw **best_parent_clk);
                int             (*set_parent)(struct clk_hw *hw, u8 index);
index c81839b52c4dd0102d6444c464359690f395778e..ad697781f9ac478477cfed76978b047685eda2b6 100644 (file)
@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
     Otherwise #opt_params is the number of following arguments.
 
     Example of optional parameters section:
-        1 allow_discards
+        3 allow_discards same_cpu_crypt submit_from_crypt_cpus
 
 allow_discards
     Block discard requests (a.k.a. TRIM) are passed through the crypt device.
@@ -63,6 +63,19 @@ allow_discards
     used space etc.) if the discarded blocks can be located easily on the
     device later.
 
+same_cpu_crypt
+    Perform encryption using the same cpu that IO was submitted on.
+    The default is to use an unbound workqueue so that encryption work
+    is automatically balanced between available CPUs.
+
+submit_from_crypt_cpus
+    Disable offloading writes to a separate thread after encryption.
+    There are some situations where offloading write bios from the
+    encryption threads to a single thread degrades performance
+    significantly.  The default is to offload write bios to the same
+    thread because it benefits CFQ to have writes submitted using the
+    same context.
+
 Example scripts
 ===============
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
index f4445e5a2bbb7db23a7c32075f2efb5dbb9dec8d..1e097037349c326a22e3f07abb3e3aca9d78d0ce 100644 (file)
@@ -22,6 +22,8 @@ Optional Properties:
        - pclkN, clkN: Pairs of parent of input clock and input clock to the
                devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
                are supported currently.
+- power-domains: phandle pointing to the parent power domain, for more details
+                see Documentation/devicetree/bindings/power/power_domain.txt
 
 Node of a device using power domains must have a power-domains property
 defined with a phandle to respective power domain.
index d70ec358736c48f376020183869d5b582bf81bed..8d27f6b084c7bfd07fda2ffce59c18e9a76390f2 100644 (file)
@@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties:
 Required root node property:
 compatible = "st,stih407";
 
+Boards with the ST STiH410 SoC shall have the following properties:
+Required root node property:
+compatible = "st,stih410";
+
 Boards with the ST STiH418 SoC shall have the following properties:
 Required root node property:
 compatible = "st,stih418";
index 6d3d5f80c1c3186ba35a7c28dd950e2f754e3027..6bf1e7493f61febb762a212d6306dcaa03fb00c4 100644 (file)
@@ -34,6 +34,8 @@ Required Properties for Clock Controller:
        - "samsung,exynos7-clock-peris"
        - "samsung,exynos7-clock-fsys0"
        - "samsung,exynos7-clock-fsys1"
+       - "samsung,exynos7-clock-mscl"
+       - "samsung,exynos7-clock-aud"
 
  - reg: physical base address of the controller and the length of
        memory mapped region.
@@ -53,6 +55,7 @@ Input clocks for top0 clock controller:
        - dout_sclk_bus1_pll
        - dout_sclk_cc_pll
        - dout_sclk_mfc_pll
+       - dout_sclk_aud_pll
 
 Input clocks for top1 clock controller:
        - fin_pll
@@ -76,6 +79,14 @@ Input clocks for peric1 clock controller:
        - sclk_uart1
        - sclk_uart2
        - sclk_uart3
+       - sclk_spi0
+       - sclk_spi1
+       - sclk_spi2
+       - sclk_spi3
+       - sclk_spi4
+       - sclk_i2s1
+       - sclk_pcm1
+       - sclk_spdif
 
 Input clocks for peris clock controller:
        - fin_pll
@@ -91,3 +102,7 @@ Input clocks for fsys1 clock controller:
        - dout_aclk_fsys1_200
        - dout_sclk_mmc0
        - dout_sclk_mmc1
+
+Input clocks for aud clock controller:
+       - fin_pll
+       - fout_aud_pll
index ded5d6212c84dfc0c6e9a5774b1f8e8ce975b0cb..c6620bc9670364315bcc8687ec81c016e7a89719 100644 (file)
@@ -1,4 +1,4 @@
-NVIDIA Tegra124 Clock And Reset Controller
+NVIDIA Tegra124 and Tegra132 Clock And Reset Controller
 
 This binding uses the common clock binding:
 Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -7,14 +7,16 @@ The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
 for muxing and gating Tegra's clocks, and setting their rates.
 
 Required properties :
-- compatible : Should be "nvidia,tegra124-car"
+- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car"
 - reg : Should contain CAR registers location and length
 - clocks : Should contain phandle and clock specifiers for two clocks:
   the 32 KHz "32k_in", and the board-specific oscillator "osc".
 - #clock-cells : Should be 1.
   In clock consumers, this cell represents the clock ID exposed by the
-  CAR. The assignments may be found in header file
-  <dt-bindings/clock/tegra124-car.h>.
+  CAR. The assignments may be found in the header files
+  <dt-bindings/clock/tegra124-car-common.h> (which covers IDs common
+  to Tegra124 and Tegra132) and <dt-bindings/clock/tegra124-car.h>
+  (for Tegra124-specific clocks).
 - #reset-cells : Should be 1.
   In clock consumers, this cell represents the bit number in the CAR's
   array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
new file mode 100644 (file)
index 0000000..dd755be
--- /dev/null
@@ -0,0 +1,21 @@
+Qualcomm LPASS Clock & Reset Controller Binding
+------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+                       "qcom,lcc-msm8960"
+                       "qcom,lcc-apq8064"
+                       "qcom,lcc-ipq8064"
+
+- reg : shall contain base register location and length
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Example:
+       clock-controller@28000000 {
+               compatible = "qcom,lcc-ipq8064";
+               reg = <0x28000000 0x1000>;
+               #clock-cells = <1>;
+               #reset-cells = <1>;
+       };
index 266ff9d232293a976de9e6a851e1bd7d1f2de7d0..df4a259a6898c5d7fe62fd5ad7e3ef323b5719b6 100644 (file)
@@ -1,6 +1,6 @@
-* Clock Block on Freescale CoreNet Platforms
+* Clock Block on Freescale QorIQ Platforms
 
-Freescale CoreNet chips take primary clocking input from the external
+Freescale qoriq chips take primary clocking input from the external
 SYSCLK signal. The SYSCLK input (frequency) is multiplied using
 multiple phase locked loops (PLL) to create a variety of frequencies
 which can then be passed to a variety of internal logic, including
@@ -29,6 +29,7 @@ Required properties:
        * "fsl,t4240-clockgen"
        * "fsl,b4420-clockgen"
        * "fsl,b4860-clockgen"
+       * "fsl,ls1021a-clockgen"
        Chassis clock strings include:
        * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
        * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
index 2e18676bd4b56503ce42ec4b4d368c32041fe1d0..0a80fa70ca265c0f2c7b7657d7270f47d6f53a45 100644 (file)
@@ -11,6 +11,7 @@ Required Properties:
 
   - compatible: Must be one of the following
     - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
+    - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
     - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
     - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
     - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
new file mode 100644 (file)
index 0000000..ece9239
--- /dev/null
@@ -0,0 +1,33 @@
+* Renesas R8A73A4 Clock Pulse Generator (CPG)
+
+The CPG generates core clocks for the R8A73A4 SoC. It includes five PLLs
+and several fixed ratio dividers.
+
+Required Properties:
+
+  - compatible: Must be "renesas,r8a73a4-cpg-clocks"
+
+  - reg: Base address and length of the memory resource used by the CPG
+
+  - clocks: Reference to the parent clocks ("extal1" and "extal2")
+
+  - #clock-cells: Must be 1
+
+  - clock-output-names: The names of the clocks. Supported clocks are "main",
+    "pll0", "pll1", "pll2", "pll2s", "pll2h", "z", "z2", "i", "m3", "b",
+    "m1", "m2", "zx", "zs", and "hp".
+
+
+Example
+-------
+
+        cpg_clocks: cpg_clocks@e6150000 {
+                compatible = "renesas,r8a73a4-cpg-clocks";
+                reg = <0 0xe6150000 0 0x10000>;
+                clocks = <&extal1_clk>, <&extal2_clk>;
+                #clock-cells = <1>;
+                clock-output-names = "main", "pll0", "pll1", "pll2",
+                                     "pll2s", "pll2h", "z", "z2",
+                                     "i", "m3", "b", "m1", "m2",
+                                     "zx", "zs", "hp";
+        };
index e6ad35b894f919f537dbf58cbdea6fffd9cddfc9..b02944fba9de4f8696d9f6ac7845acb96937aeed 100644 (file)
@@ -8,15 +8,18 @@ Required Properties:
   - compatible: Must be one of
     - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG
     - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
+    - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
     - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
     - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG
 
   - reg: Base address and length of the memory resource used by the CPG
 
-  - clocks: Reference to the parent clock
+  - clocks: References to the parent clocks: first to the EXTAL clock, second
+    to the USB_EXTAL clock
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are "main",
-    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1" and "z"
+    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
+    "adsp"
 
 
 Example
@@ -26,8 +29,9 @@ Example
                compatible = "renesas,r8a7790-cpg-clocks",
                             "renesas,rcar-gen2-cpg-clocks";
                reg = <0 0xe6150000 0 0x1000>;
-               clocks = <&extal_clk>;
+               clocks = <&extal_clk &usb_extal_clk>;
                #clock-cells = <1>;
                clock-output-names = "main", "pll0, "pll1", "pll3",
-                                    "lb", "qspi", "sdh", "sd0", "sd1", "z";
+                                    "lb", "qspi", "sdh", "sd0", "sd1", "z",
+                                    "rcan", "adsp";
        };
index 67b2b99f2b339f0a35a8fddfd9127779542ca76b..60b44285250d3b3e9e580f9d71e51239df3b1c76 100644 (file)
@@ -26,7 +26,7 @@ Required properties:
        "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
        "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
        "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
-       "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
+       "allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31
        "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
        "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
        "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
@@ -55,9 +55,11 @@ Required properties:
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
        "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
-       "allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10
-       "allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10
+       "allwinner,sun4i-a10-mmc-clk" - for the MMC clock
+       "allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80
+       "allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80
        "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
+       "allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80
        "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
        "allwinner,sun7i-a20-out-clk" - for the external output clocks
        "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
@@ -73,7 +75,9 @@ Required properties for all clocks:
 - #clock-cells : from common clock binding; shall be set to 0 except for
        the following compatibles where it shall be set to 1:
        "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk"
+       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
+       "allwinner,*-usb-clk", "allwinner,*-mmc-clk",
+       "allwinner,*-mmc-config-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
        If the clock module only has one output, the name shall be the
        module name.
@@ -81,6 +85,10 @@ Required properties for all clocks:
 And "allwinner,*-usb-clk" clocks also require:
 - reset-cells : shall be set to 1
 
+The "allwinner,sun9i-a80-mmc-config-clk" clock also requires:
+- #reset-cells : shall be set to 1
+- resets : shall be the reset control phandle for the mmc block.
+
 For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate
 dummy clocks at 25 MHz and 125 MHz, respectively. See example.
 
@@ -95,6 +103,14 @@ For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
 is the normal PLL6 output, or "pll6". The second output is rate doubled
 PLL6, or "pll6x2".
 
+The "allwinner,*-mmc-clk" clocks have three different outputs: the
+main clock, with the ID 0, and the output and sample clocks, with the
+IDs 1 and 2, respectively.
+
+The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output
+per mmc controller. The number of outputs is determined by the size of
+the address block, which is related to the overall mmc block.
+
 For example:
 
 osc24M: clk@01c20050 {
@@ -138,11 +154,11 @@ cpu: cpu@01c20054 {
 };
 
 mmc0_clk: clk@01c20088 {
-       #clock-cells = <0>;
-       compatible = "allwinner,sun4i-mod0-clk";
+       #clock-cells = <1>;
+       compatible = "allwinner,sun4i-a10-mmc-clk";
        reg = <0x01c20088 0x4>;
        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-       clock-output-names = "mmc0";
+       clock-output-names = "mmc0", "mmc0_output", "mmc0_sample";
 };
 
 mii_phy_tx_clk: clk@2 {
@@ -170,3 +186,16 @@ gmac_clk: clk@01c20164 {
        clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
        clock-output-names = "gmac";
 };
+
+mmc_config_clk: clk@01c13000 {
+       compatible = "allwinner,sun9i-a80-mmc-config-clk";
+       reg = <0x01c13000 0x10>;
+       clocks = <&ahb0_gates 8>;
+       clock-names = "ahb";
+       resets = <&ahb0_resets 8>;
+       reset-names = "ahb";
+       #clock-cells = <1>;
+       #reset-cells = <1>;
+       clock-output-names = "mmc0_config", "mmc1_config",
+                            "mmc2_config", "mmc3_config";
+};
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
new file mode 100644 (file)
index 0000000..616836e
--- /dev/null
@@ -0,0 +1,42 @@
+Bindings for Texas Instruments CDCE706 programmable 3-PLL clock
+synthesizer/multiplier/divider.
+
+Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+
+I2C device node required properties:
+- compatible: shall be "ti,cdce706".
+- reg: i2c device address, shall be in range [0x68...0x6b].
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+  handles, shall be reference clock(s) connected to CLK_IN0
+  and CLK_IN1 pins.
+- clock-names: shall be clk_in0 and/or clk_in1. Use clk_in0
+  in case of crystal oscillator or differential signal input
+  configuration. Use clk_in0 and clk_in1 in case of independent
+  single-ended LVCMOS inputs configuration.
+
+Example:
+
+       clocks {
+               clk54: clk54 {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <54000000>;
+               };
+       };
+       ...
+       i2c0: i2c-master@0d090000 {
+               ...
+               cdce706: clock-synth@69 {
+                       compatible = "ti,cdce706";
+                       #clock-cells = <1>;
+                       reg = <0x69>;
+                       clocks = <&clk54>;
+                       clock-names = "clk_in0";
+               };
+       };
+       ...
+       simple-audio-card,codec {
+               ...
+               clocks = <&cdce706 4>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
new file mode 100644 (file)
index 0000000..c19b3f2
--- /dev/null
@@ -0,0 +1,33 @@
+Binding for Texas Instruments FAPLL clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. It assumes a
+register-mapped FAPLL with usually two selectable input clocks
+(reference clock and bypass clock), and one or more child
+syntesizers.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "ti,dm816-fapll-clock"
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
+- reg : address and length of the register set for controlling the FAPLL.
+
+Examples:
+       main_fapll: main_fapll {
+               #clock-cells = <1>;
+               compatible = "ti,dm816-fapll-clock";
+               reg = <0x400 0x40>;
+               clocks = <&sys_clkin_ck &sys_clkin_ck>;
+               clock-indices = <1>, <2>, <3>, <4>, <5>,
+                               <6>, <7>;
+               clock-output-names = "main_pll_clk1",
+                                    "main_pll_clk2",
+                                    "main_pll_clk3",
+                                    "main_pll_clk4",
+                                    "main_pll_clk5",
+                                    "main_pll_clk6",
+                                    "main_pll_clk7";
+       };
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
new file mode 100644 (file)
index 0000000..81f982c
--- /dev/null
@@ -0,0 +1,37 @@
+Broadcom iProc I2C controller
+
+Required properties:
+
+- compatible:
+    Must be "brcm,iproc-i2c"
+
+- reg:
+    Define the base and range of the I/O address space that contain the iProc
+    I2C controller registers
+
+- interrupts:
+    Should contain the I2C interrupt
+
+- clock-frequency:
+    This is the I2C bus clock. Need to be either 100000 or 400000
+
+- #address-cells:
+    Always 1 (for I2C addresses)
+
+- #size-cells:
+    Always 0
+
+Example:
+       i2c0: i2c@18008000 {
+               compatible = "brcm,iproc-i2c";
+               reg = <0x18008000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+
+               codec: wm8750@1a {
+                       compatible = "wlf,wm8750";
+                       reg = <0x1a>;
+               };
+       };
index 52d37fd8d3e5c151471a70fb45ff2d4a09317a18..ce4311d726ae5e4a414e6240cc8832e876a9c24e 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
   - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC
 - reg : Should contain I2C/HS-I2C registers location and length
 - interrupts : Should contain I2C/HS-I2C interrupt
+- clocks : Should contain the I2C/HS-I2C clock specifier
 
 Optional properties:
 - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
index 34a3fb6f8488b26a9c5def9866898d807ee7df55..cf53d5fba20a0934c631b8320e538cbc5440e155 100644 (file)
@@ -16,6 +16,9 @@ Required Properties:
 Optional Properties:
 
   - reset-gpios: Reference to the GPIO connected to the reset input.
+  - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
+    children in idle state. This is necessary for example, if there are several
+    multiplexers on the bus and the devices behind them use same I2C addresses.
 
 
 Example:
index 1637c298a1b337bf83612cb12129001ecd11da24..17bef9a34e507541ea3f201039b04e199e551fa3 100644 (file)
@@ -4,24 +4,60 @@ Required properties:
 - compatible      : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
 - reg             : bus address start and address range size of device
 - interrupts      : interrupt number
-- clock-frequency : frequency of bus clock in Hz
+- clocks          : handle to the controller clock; see the note below.
+                    Mutually exclusive with opencores,ip-clock-frequency
+- opencores,ip-clock-frequency: frequency of the controller clock in Hz;
+                    see the note below. Mutually exclusive with clocks
 - #address-cells  : should be <1>
 - #size-cells     : should be <0>
 
 Optional properties:
+- clock-frequency : frequency of bus clock in Hz; see the note below.
+                    Defaults to 100 KHz when the property is not specified
 - reg-shift       : device register offsets are shifted by this value
 - reg-io-width    : io register width in bytes (1, 2 or 4)
 - regstep         : deprecated, use reg-shift above
 
-Example:
+Note
+clock-frequency property is meant to control the bus frequency for i2c bus
+drivers, but it was incorrectly used to specify i2c controller input clock
+frequency. So the following rules are set to fix this situation:
+- if clock-frequency is present and neither opencores,ip-clock-frequency nor
+  clocks are, then clock-frequency specifies i2c controller clock frequency.
+  This is to keep backwards compatibility with setups using old DTB. i2c bus
+  frequency is fixed at 100 KHz.
+- if clocks is present it specifies i2c controller clock. clock-frequency
+  property specifies i2c bus frequency.
+- if opencores,ip-clock-frequency is present it specifies i2c controller
+  clock frequency. clock-frequency property specifies i2c bus frequency.
 
+Examples:
+
+       i2c0: ocores@a0000000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "opencores,i2c-ocores";
+               reg = <0xa0000000 0x8>;
+               interrupts = <10>;
+               opencores,ip-clock-frequency = <20000000>;
+
+               reg-shift = <0>;        /* 8 bit registers */
+               reg-io-width = <1>;     /* 8 bit read/write */
+
+               dummy@60 {
+                       compatible = "dummy";
+                       reg = <0x60>;
+               };
+       };
+or
        i2c0: ocores@a0000000 {
                #address-cells = <1>;
                #size-cells = <0>;
                compatible = "opencores,i2c-ocores";
                reg = <0xa0000000 0x8>;
                interrupts = <10>;
-               clock-frequency = <20000000>;
+               clocks = <&osc>;
+               clock-frequency = <400000>; /* i2c bus frequency 400 KHz */
 
                reg-shift = <0>;        /* 8 bit registers */
                reg-io-width = <1>;     /* 8 bit read/write */
index dde6c22ce91a13df20eaab99ebd6dbf684983768..f0d71bc52e64be39cea42a7cc04b1e05662ad641 100644 (file)
@@ -21,6 +21,17 @@ Required on RK3066, RK3188 :
 Optional properties :
 
  - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
+ - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise
+       (t(r) in I2C specification). If not specified this is assumed to be
+       the maximum the specification allows(1000 ns for Standard-mode,
+       300 ns for Fast-mode) which might cause slightly slower communication.
+ - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall
+       (t(f) in the I2C specification). If not specified this is assumed to
+       be the maximum the specification allows (300 ns) which might cause
+       slightly slower communication.
+ - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall
+       (t(f) in the I2C specification). If not specified we'll use the SCL
+       value since they are the same in nearly all cases.
 
 Example:
 
@@ -39,4 +50,7 @@ i2c0: i2c@2002d000 {
 
        clock-names = "i2c";
        clocks = <&cru PCLK_I2C0>;
+
+       i2c-scl-rising-time-ns = <800>;
+       i2c-scl-falling-time-ns = <100>;
 };
index 4dcd88d5f7ca453503120236a2ee3670ac7a55e8..aaa8325004d23ae6313223594817f392c1a31359 100644 (file)
@@ -61,9 +61,8 @@ fsl,sgtl5000          SGTL5000: Ultra Low-Power Audio Codec
 gmt,g751               G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
 infineon,slb9635tt     Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
 infineon,slb9645tt     Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-isl,isl12057           Intersil ISL12057 I2C RTC Chip
-isil,isl29028           (deprecated, use isl)
-isl,isl29028            Intersil ISL29028 Ambient Light and Proximity Sensor
+isil,isl12057          Intersil ISL12057 I2C RTC Chip
+isil,isl29028          Intersil ISL29028 Ambient Light and Proximity Sensor
 maxim,ds1050           5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237          Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625          9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
new file mode 100644 (file)
index 0000000..f39a1aa
--- /dev/null
@@ -0,0 +1,43 @@
+* Cavium Interrupt Bus widget
+
+Properties:
+- compatible: "cavium,octeon-7130-cib"
+
+  Compatibility with cn70XX SoCs.
+
+- interrupt-controller:  This is an interrupt controller.
+
+- reg: Two elements consisting of the addresses of the RAW and EN
+  registers of the CIB block
+
+- cavium,max-bits: The index (zero based) of the highest numbered bit
+  in the CIB block.
+
+- interrupt-parent:  Always the CIU on the SoC.
+
+- interrupts: The CIU line to which the CIB block is connected.
+
+- #interrupt-cells: Must be <2>.  The first cell is the bit within the
+   CIB.  The second cell specifies the triggering semantics of the
+   line.
+
+Example:
+
+       interrupt-controller@107000000e000 {
+               compatible = "cavium,octeon-7130-cib";
+               reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */
+                     <0x10700 0x0000e100 0x0 0x8>; /* EN */
+               cavium,max-bits = <23>;
+
+               interrupt-controller;
+               interrupt-parent = <&ciu>;
+               interrupts = <1 24>;
+               /* Interrupts are specified by two parts:
+                * 1) Bit number in the CIB* registers
+                * 2) Triggering (1 - edge rising
+                *                2 - edge falling
+                *                4 - level active high
+                *                8 - level active low)
+                */
+               #interrupt-cells = <2>;
+       };
index 91b3a34671508bdf11a1f368dde9588fc34961df..4bf41d8338046ef5aef2e47e4ee1dccf727acfe2 100644 (file)
@@ -10,8 +10,8 @@ Absolute maximum transfer rate is 200MB/s
 Required properties:
  - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc"
  - reg : mmc controller base registers
- - clocks : a list with 2 phandle + clock specifier pairs
- - clock-names : must contain "ahb" and "mmc"
+ - clocks : a list with 4 phandle + clock specifier pairs
+ - clock-names : must contain "ahb", "mmc", "output" and "sample"
  - interrupts : mmc controller interrupt
 
 Optional properties:
@@ -25,8 +25,8 @@ Examples:
        mmc0: mmc@01c0f000 {
                compatible = "allwinner,sun5i-a13-mmc";
                reg = <0x01c0f000 0x1000>;
-               clocks = <&ahb_gates 8>, <&mmc0_clk>;
-               clock-names = "ahb", "mod";
+               clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>;
+               clock-names = "ahb", "mod", "output", "sample";
                interrupts = <0 32 4>;
                status = "disabled";
        };
index 33df3932168e1b8941a5de1565b1cf02f96fb551..8db32384a4866e56094aa96e86ac8765ccdf519c 100644 (file)
@@ -27,6 +27,8 @@ property is used.
 - amd,serdes-cdr-rate: CDR rate speed selection
 - amd,serdes-pq-skew: PQ (data sampling) skew
 - amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
 
 Example:
        xgbe_phy@e1240800 {
@@ -41,4 +43,6 @@ Example:
                amd,serdes-cdr-rate = <2>, <2>, <7>;
                amd,serdes-pq-skew = <10>, <10>, <30>;
                amd,serdes-tx-amp = <15>, <15>, <10>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
        };
index cfcc52705ed8093ba11b4f549dd1b02a8949fb40..6151999c5dcae6e31f60dc155b433723550cf693 100644 (file)
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in
 APM X-Gene SoC.
 
 Required properties for all the ethernet interfaces:
-- compatible: Should be "apm,xgene-enet"
+- compatible: Should state binding information from the following list,
+  - "apm,xgene-enet":    RGMII based 1G interface
+  - "apm,xgene1-sgenet": SGMII based 1G interface
+  - "apm,xgene1-xgenet": XFI based 10G interface
 - reg: Address and length of the register set for the device. It contains the
   information of registers in the same order as described by reg-names
 - reg-names: Should contain the register set names
index 98c16672ab5f49e06cd6dd516f3c1a302e65beec..0f8ed3710c66e9e24450a77ba1d86fd9d2a01ea3 100644 (file)
@@ -19,6 +19,16 @@ Required properties:
    providing multiple PM domains (e.g. power controllers), but can be any value
    as specified by device tree binding documentation of particular provider.
 
+Optional properties:
+ - power-domains : A phandle and PM domain specifier as defined by bindings of
+                   the power controller specified by phandle.
+   Some power domains might be powered from another power domain (or have
+   other hardware specific dependencies). For representing such dependency
+   a standard PM domain consumer binding is used. When provided, all domains
+   created by the given provider should be subdomains of the domain
+   specified by this binding. More details about power domain specifier are
+   available in the next section.
+
 Example:
 
        power: power-controller@12340000 {
@@ -30,6 +40,25 @@ Example:
 The node above defines a power controller that is a PM domain provider and
 expects one cell as its phandle argument.
 
+Example 2:
+
+       parent: power-controller@12340000 {
+               compatible = "foo,power-controller";
+               reg = <0x12340000 0x1000>;
+               #power-domain-cells = <1>;
+       };
+
+       child: power-controller@12340000 {
+               compatible = "foo,power-controller";
+               reg = <0x12341000 0x1000>;
+               power-domains = <&parent 0>;
+               #power-domain-cells = <1>;
+       };
+
+The nodes above define two power controllers: 'parent' and 'child'.
+Domains created by the 'child' power controller are subdomains of '0' power
+domain provided by the 'parent' power controller.
+
 ==PM domain consumers==
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
new file mode 100644 (file)
index 0000000..91d5ab0
--- /dev/null
@@ -0,0 +1,66 @@
+* UART (Universal Asynchronous Receiver/Transmitter)
+
+Required properties:
+- compatible : one of:
+       - "ns8250"
+       - "ns16450"
+       - "ns16550a"
+       - "ns16550"
+       - "ns16750"
+       - "ns16850"
+       - For Tegra20, must contain "nvidia,tegra20-uart"
+       - For other Tegra, must contain '"nvidia,<chip>-uart",
+         "nvidia,tegra20-uart"' where <chip> is tegra30, tegra114, tegra124,
+         tegra132, or tegra210.
+       - "nxp,lpc3220-uart"
+       - "ralink,rt2880-uart"
+       - "ibm,qpace-nwp-serial"
+       - "altr,16550-FIFO32"
+       - "altr,16550-FIFO64"
+       - "altr,16550-FIFO128"
+       - "fsl,16550-FIFO64"
+       - "fsl,ns16550"
+       - "serial" if the port type is unknown.
+- reg : offset and length of the register set for the device.
+- interrupts : should contain uart interrupt.
+- clock-frequency : the input clock frequency for the UART
+        or
+  clocks phandle to refer to the clk used as per Documentation/devicetree
+  /bindings/clock/clock-bindings.txt
+
+Optional properties:
+- current-speed : the current active speed of the UART.
+- reg-offset : offset to apply to the mapbase from the start of the registers.
+- reg-shift : quantity to shift the register offsets by.
+- reg-io-width : the size (in bytes) of the IO accesses that should be
+  performed on the device.  There are some systems that require 32-bit
+  accesses to the UART (e.g. TI davinci).
+- used-by-rtas : set to indicate that the port is in use by the OpenFirmware
+  RTAS and should not be registered.
+- no-loopback-test: set to indicate that the port does not implements loopback
+  test mode
+- fifo-size: the fifo size of the UART.
+- auto-flow-control: one way to enable automatic flow control support. The
+  driver is allowed to detect support for the capability even without this
+  property.
+
+Note:
+* fsl,ns16550:
+  ------------
+  Freescale DUART is very similar to the PC16552D (and to a
+  pair of NS16550A), albeit with some nonstandard behavior such as
+  erratum A-004737 (relating to incorrect BRK handling).
+
+  Represents a single port that is compatible with the DUART found
+  on many Freescale chips (examples include mpc8349, mpc8548,
+  mpc8641d, p4080 and ls2085a).
+
+Example:
+
+       uart@80230000 {
+               compatible = "ns8250";
+               reg = <0x80230000 0x100>;
+               clock-frequency = <3686400>;
+               interrupts = <10>;
+               reg-shift = <2>;
+       };
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
new file mode 100644 (file)
index 0000000..ebcbb62
--- /dev/null
@@ -0,0 +1,19 @@
+ETRAX FS UART
+
+Required properties:
+- compatible : "axis,etraxfs-uart"
+- reg: offset and length of the register set for the device.
+- interrupts: device interrupt
+
+Optional properties:
+- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD
+  line respectively.
+
+Example:
+
+serial@b00260000 {
+       compatible = "axis,etraxfs-uart";
+       reg = <0xb0026000 0x1000>;
+       interrupts = <68>;
+       status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/of-serial.txt
deleted file mode 100644 (file)
index 91d5ab0..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-* UART (Universal Asynchronous Receiver/Transmitter)
-
-Required properties:
-- compatible : one of:
-       - "ns8250"
-       - "ns16450"
-       - "ns16550a"
-       - "ns16550"
-       - "ns16750"
-       - "ns16850"
-       - For Tegra20, must contain "nvidia,tegra20-uart"
-       - For other Tegra, must contain '"nvidia,<chip>-uart",
-         "nvidia,tegra20-uart"' where <chip> is tegra30, tegra114, tegra124,
-         tegra132, or tegra210.
-       - "nxp,lpc3220-uart"
-       - "ralink,rt2880-uart"
-       - "ibm,qpace-nwp-serial"
-       - "altr,16550-FIFO32"
-       - "altr,16550-FIFO64"
-       - "altr,16550-FIFO128"
-       - "fsl,16550-FIFO64"
-       - "fsl,ns16550"
-       - "serial" if the port type is unknown.
-- reg : offset and length of the register set for the device.
-- interrupts : should contain uart interrupt.
-- clock-frequency : the input clock frequency for the UART
-        or
-  clocks phandle to refer to the clk used as per Documentation/devicetree
-  /bindings/clock/clock-bindings.txt
-
-Optional properties:
-- current-speed : the current active speed of the UART.
-- reg-offset : offset to apply to the mapbase from the start of the registers.
-- reg-shift : quantity to shift the register offsets by.
-- reg-io-width : the size (in bytes) of the IO accesses that should be
-  performed on the device.  There are some systems that require 32-bit
-  accesses to the UART (e.g. TI davinci).
-- used-by-rtas : set to indicate that the port is in use by the OpenFirmware
-  RTAS and should not be registered.
-- no-loopback-test: set to indicate that the port does not implements loopback
-  test mode
-- fifo-size: the fifo size of the UART.
-- auto-flow-control: one way to enable automatic flow control support. The
-  driver is allowed to detect support for the capability even without this
-  property.
-
-Note:
-* fsl,ns16550:
-  ------------
-  Freescale DUART is very similar to the PC16552D (and to a
-  pair of NS16550A), albeit with some nonstandard behavior such as
-  erratum A-004737 (relating to incorrect BRK handling).
-
-  Represents a single port that is compatible with the DUART found
-  on many Freescale chips (examples include mpc8349, mpc8548,
-  mpc8641d, p4080 and ls2085a).
-
-Example:
-
-       uart@80230000 {
-               compatible = "ns8250";
-               reg = <0x80230000 0x100>;
-               clock-frequency = <3686400>;
-               interrupts = <10>;
-               reg-shift = <2>;
-       };
index 7f76214f728aa6028f133a94d04f183ccb3b1c38..289c40ed747042a70b13f9912dc0b5c443f7827e 100644 (file)
@@ -21,6 +21,18 @@ Optional properties:
 - reg-io-width : the size (in bytes) of the IO accesses that should be
   performed on the device.  If this property is not present then single byte
   accesses are used.
+- dcd-override : Override the DCD modem status signal. This signal will always
+  be reported as active instead of being obtained from the modem status
+  register. Define this if your serial port does not use this pin.
+- dsr-override : Override the DTS modem status signal. This signal will always
+  be reported as active instead of being obtained from the modem status
+  register. Define this if your serial port does not use this pin.
+- cts-override : Override the CTS modem status signal. This signal will always
+  be reported as active instead of being obtained from the modem status
+  register. Define this if your serial port does not use this pin.
+- ri-override : Override the RI modem status signal. This signal will always be
+  reported as inactive instead of being obtained from the modem status register.
+  Define this if your serial port does not use this pin.
 
 Example:
 
@@ -31,6 +43,10 @@ Example:
                interrupts = <10>;
                reg-shift = <2>;
                reg-io-width = <4>;
+               dcd-override;
+               dsr-override;
+               cts-override;
+               ri-override;
        };
 
 Example with one clock:
index 56742bc70218bfd5d4035732301f1df6c6f4927e..7d44eae7ab0b951d2ea77889cee1c6733978dd0b 100644 (file)
@@ -12,6 +12,9 @@ I. For patch submitters
 
        devicetree@vger.kernel.org
 
+     and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
+     all of the DT maintainers.
+
   3) The Documentation/ portion of the patch should come in the series before
      the code implementing the binding.
 
index 389ca1347a771cb049994db077fc38d2d04b7020..fae26d014aaf25c83f499889969e1cb017491f34 100644 (file)
@@ -20,6 +20,7 @@ amlogic       Amlogic, Inc.
 ams    AMS AG
 amstaos        AMS-Taos Inc.
 apm    Applied Micro Circuits Corporation (APM)
+arasan Arasan Chip Systems
 arm    ARM Ltd.
 armadeus       ARMadeus Systems SARL
 asahi-kasei    Asahi Kasei Corp.
@@ -27,6 +28,7 @@ atmel Atmel Corporation
 auo    AU Optronics Corporation
 avago  Avago Technologies
 avic   Shanghai AVIC Optoelectronics Co., Ltd.
+axis   Axis Communications AB
 bosch  Bosch Sensortec GmbH
 brcm   Broadcom Corporation
 buffalo        Buffalo, Inc.
index f90e294d7631f9b538ba3e5e72c2ab1b096a5479..a4d869744f5958f7bd0311868ef3c7b1c3956386 100644 (file)
@@ -26,6 +26,11 @@ Optional properties:
 - atmel,disable : Should be present if you want to disable the watchdog.
 - atmel,idle-halt : Should be present if you want to stop the watchdog when
        entering idle state.
+       CAUTION: This property should be used with care, it actually makes the
+       watchdog not counting when the CPU is in idle state, therefore the
+       watchdog reset time depends on mean CPU usage and will not reset at all
+       if the CPU stop working while it is in idle state, which is probably
+       not what you want.
 - atmel,dbg-halt : Should be present if you want to stop the watchdog when
        entering debug state.
 
index 2ca3d17eee56380cec075d5279c2703fa2b1ed9e..f91926f2f4824dee2c6785dcaf4cedd84df838e9 100644 (file)
@@ -164,8 +164,6 @@ the block device inode.  See there for more details.
 
 --------------------------- file_system_type ---------------------------
 prototypes:
-       int (*get_sb) (struct file_system_type *, int,
-                      const char *, void *, struct vfsmount *);
        struct dentry *(*mount) (struct file_system_type *, int,
                       const char *, void *);
        void (*kill_sb) (struct super_block *);
index 1b528b2ad809b8418cb352be8755de286b78882f..fcf4d509d1186f728b77e398c438af0850d9511c 100644 (file)
@@ -5,8 +5,8 @@ system.
 
 dlmfs is built with OCFS2 as it requires most of its infrastructure.
 
-Project web page:    http://oss.oracle.com/projects/ocfs2
-Tools web page:      http://oss.oracle.com/projects/ocfs2-tools
+Project web page:    http://ocfs2.wiki.kernel.org
+Tools web page:      https://github.com/markfasheh/ocfs2-tools
 OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
 
 All code copyright 2005 Oracle except when otherwise noted.
index 28f8c08201e29e0e90e2a489997f7a6b1d78dabb..4c49e5410595c0ee86447ed6312d58315e2e974c 100644 (file)
@@ -8,8 +8,8 @@ also make it attractive for non-clustered use.
 You'll want to install the ocfs2-tools package in order to at least
 get "mount.ocfs2" and "ocfs2_hb_ctl".
 
-Project web page:    http://oss.oracle.com/projects/ocfs2
-Tools web page:      http://oss.oracle.com/projects/ocfs2-tools
+Project web page:    http://ocfs2.wiki.kernel.org
+Tools git tree:      https://github.com/markfasheh/ocfs2-tools
 OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
 
 All code copyright 2005 Oracle except when otherwise noted.
index a27c950ece61b0d312fbba2a3757fab71c5b3616..6db0e5d1da07ee52d13f5997b84b3b10ef43e2dd 100644 (file)
@@ -159,6 +159,22 @@ overlay filesystem (though an operation on the name of the file such as
 rename or unlink will of course be noticed and handled).
 
 
+Multiple lower layers
+---------------------
+
+Multiple lower layers can now be given using the the colon (":") as a
+separator character between the directory names.  For example:
+
+  mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged
+
+As the example shows, "upperdir=" and "workdir=" may be omitted.  In
+that case the overlay will be read-only.
+
+The specified lower directories will be stacked beginning from the
+rightmost one and going left.  In the above example lower1 will be the
+top, lower2 the middle and lower3 the bottom layer.
+
+
 Non-standard behavior
 ---------------------
 
@@ -196,3 +212,15 @@ Changes to the underlying filesystems while part of a mounted overlay
 filesystem are not allowed.  If the underlying filesystem is changed,
 the behavior of the overlay is undefined, though it will not result in
 a crash or deadlock.
+
+Testsuite
+---------
+
+There's testsuite developed by David Howells at:
+
+  git://git.infradead.org/users/dhowells/unionmount-testsuite.git
+
+Run as root:
+
+  # cd unionmount-testsuite
+  # ./run --ov
index 4556a3eb87c454f3db1c9efbd9c057f454c135d6..4aae8ed15873d1d3c1cc8fc9d80d3ef9b42e5654 100644 (file)
@@ -12,7 +12,7 @@ FUNCTIONALITY CONSTANTS
 -----------------------
 
 For the most up-to-date list of functionality constants, please check
-<linux/i2c.h>!
+<uapi/linux/i2c.h>!
 
   I2C_FUNC_I2C                    Plain i2c-level commands (Pure SMBus
                                   adapters typically can not do these)
index 90bca6f988e115e046583c13b9d157ebc38bd280..a63e5e013a8cddee63b1d3520dd1c2c73e80dc31 100644 (file)
@@ -3,8 +3,8 @@ ALPS Touchpad Protocol
 
 Introduction
 ------------
-Currently the ALPS touchpad driver supports five protocol versions in use by
-ALPS touchpads, called versions 1, 2, 3, 4 and 5.
+Currently the ALPS touchpad driver supports seven protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, 4, 5, 6 and 7.
 
 Since roughly mid-2010 several new ALPS touchpads have been released and
 integrated into a variety of laptops and netbooks.  These new touchpads
@@ -240,3 +240,67 @@ For mt, the format is:
  byte 3:    0  x23  x22   x21 x20  x19  x18   x17
  byte 4:    0   x9   x8    x7  x6   x5   x4    x3
  byte 5:    0  x16  x15   x14 x13  x12  x11   x10
+
+ALPS Absolute Mode - Protocol Version 6
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0   X6   X5   X4   X3   X2   X1   X0
+ byte 2:    0   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+ byte 3:    ?   Y7   X7    ?    ?    M    R    L
+ byte 4:   Z7   Z6   Z5   Z4   Z3   Z2   Z1   Z0
+ byte 5:    0    1    1    1    1    1    1    1
+
+For touchpad packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0    0    0    0   x3   x2   x1   x0
+ byte 2:    0    0    0    0   y3   y2   y1   y0
+ byte 3:    ?   x7   x6   x5   x4    ?    r    l
+ byte 4:    ?   y7   y6   y5   y4    ?    ?    ?
+ byte 5:   z7   z6   z5   z4   z3   z2   z1   z0
+
+(v6 touchpad does not have middle button)
+
+ALPS Absolute Mode - Protocol Version 7
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    0    1    0    0    1    0    0    0
+ byte 1:    1    1    *    *    1    M    R    L
+ byte 2:   X7    1   X5   X4   X3   X2   X1   X0
+ byte 3:   Z6    1   Y6   X6    1   Y2   Y1   Y0
+ byte 4:   Y7    0   Y5   Y4   Y3    1    1    0
+ byte 5:  T&P    0   Z5   Z4   Z3   Z2   Z1   Z0
+
+For touchpad packet, the format is:
+
+         packet-fmt     b7     b6     b5     b4     b3     b2     b1     b0
+ byte 0: TWO & MULTI     L      1      R      M      1   Y0-2   Y0-1   Y0-0
+ byte 0: NEW             L      1   X1-5      1      1   Y0-2   Y0-1   Y0-0
+ byte 1:             Y0-10   Y0-9   Y0-8   Y0-7   Y0-6   Y0-5   Y0-4   Y0-3
+ byte 2:             X0-11      1  X0-10   X0-9   X0-8   X0-7   X0-6   X0-5
+ byte 3:             X1-11      1   X0-4   X0-3      1   X0-2   X0-1   X0-0
+ byte 4: TWO         X1-10    TWO   X1-9   X1-8   X1-7   X1-6   X1-5   X1-4
+ byte 4: MULTI       X1-10    TWO   X1-9   X1-8   X1-7   X1-6   Y1-5      1
+ byte 4: NEW         X1-10    TWO   X1-9   X1-8   X1-7   X1-6      0      0
+ byte 5: TWO & NEW   Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6   Y1-5   Y1-4
+ byte 5: MULTI       Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6    F-1    F-0
+
+ L:         Left button
+ R / M:     Non-clickpads: Right / Middle button
+            Clickpads: When > 2 fingers are down, and some fingers
+            are in the button area, then the 2 coordinates reported
+            are for fingers outside the button area and these report
+            extra fingers being present in the right / left button
+            area. Note these fingers are not added to the F field!
+            so if a TWO packet is received and R = 1 then there are
+            3 fingers down, etc.
+ TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
+            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+               in NEW fmt
+ F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
index 2f9c5a5fcb25ff2d91aa196123952b01d0908e34..8afb29a8604a552c9dc1b1794466bf20933dca35 100644 (file)
@@ -40,8 +40,10 @@ but also to IPIs and to some other special-purpose interrupts.
 
 The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
 requesting a special-purpose interrupt.  It causes suspend_device_irqs() to
-leave the corresponding IRQ enabled so as to allow the interrupt to work all
-the time as expected.
+leave the corresponding IRQ enabled so as to allow the interrupt to work as
+expected during the suspend-resume cycle, but does not guarantee that the
+interrupt will wake the system from a suspended state -- for such cases it is
+necessary to use enable_irq_wake().
 
 Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
 user of it.  Thus, if the IRQ is shared, all of the interrupt handlers installed
@@ -110,8 +112,9 @@ any special interrupt handling logic for it to work.
 IRQF_NO_SUSPEND and enable_irq_wake()
 -------------------------------------
 
-There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND
-flag on the same IRQ.
+There are very few valid reasons to use both enable_irq_wake() and the
+IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the
+same device.
 
 First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
 interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
@@ -120,4 +123,13 @@ handlers are not invoked after suspend_device_irqs()).
 
 Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
 to individual interrupt handlers, so sharing an IRQ between a system wakeup
-interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense.
+interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally
+make sense.
+
+In rare cases an IRQ can be shared between a wakeup device driver and an
+IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver
+must be able to discern spurious IRQs from genuine wakeup events (signalling
+the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to
+ensure that the IRQ will function as a wakeup source, and must request the IRQ
+with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If
+these requirements are not met, it is not valid to use IRQF_COND_SUSPEND.
index 199f453cb4de10016030c2dd230fb9a3a3125cee..82fbdbc1e0b0626611462132d38fc29c5ee27078 100644 (file)
@@ -3,7 +3,7 @@ protocol of kernel. These should be filled by bootloader or 16-bit
 real-mode setup code of the kernel. References/settings to it mainly
 are in:
 
-  arch/x86/include/asm/bootparam.h
+  arch/x86/include/uapi/asm/bootparam.h
 
 
 Offset Proto   Name            Meaning
index 7cfcee4e2bea2f7402c2e6bce16df6943aeb2c5c..9a76a404ea96dd2cb674200444664ea05adfea8a 100644 (file)
@@ -1030,6 +1030,16 @@ F:       arch/arm/mach-mxs/
 F:     arch/arm/boot/dts/imx*
 F:     arch/arm/configs/imx*_defconfig
 
+ARM/FREESCALE VYBRID ARM ARCHITECTURE
+M:     Shawn Guo <shawn.guo@linaro.org>
+M:     Sascha Hauer <kernel@pengutronix.de>
+R:     Stefan Agner <stefan@agner.ch>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+F:     arch/arm/mach-imx/*vf610*
+F:     arch/arm/boot/dts/vf*
+
 ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+M:     Gregory Clement <gregory.clement@free-electrons.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-dove/
@@ -1730,7 +1741,7 @@ S:        Maintained
 F:     drivers/net/ethernet/atheros/
 
 ATM
-M:     Chas Williams <chas@cmf.nrl.navy.mil>
+M:     Chas Williams <3chas3@gmail.com>
 L:     linux-atm-general@lists.sourceforge.net (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 W:     http://linux-atm.sourceforge.net
@@ -2065,7 +2076,7 @@ F:        include/net/bluetooth/
 BONDING DRIVER
 M:     Jay Vosburgh <j.vosburgh@gmail.com>
 M:     Veaceslav Falico <vfalico@gmail.com>
-M:     Andy Gospodarek <andy@greyhouse.net>
+M:     Andy Gospodarek <gospo@cumulusnetworks.com>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -2107,7 +2118,6 @@ F:        drivers/net/ethernet/broadcom/bnx2x/
 
 BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Christian Daudt <bcm@fixthebug.org>
-M:     Matt Porter <mporter@linaro.org>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 T:     git git://github.com/broadcom/mach-bcm
@@ -2369,8 +2379,9 @@ F:        arch/x86/include/asm/tce.h
 
 CAN NETWORK LAYER
 M:     Oliver Hartkopp <socketcan@hartkopp.net>
+M:     Marc Kleine-Budde <mkl@pengutronix.de>
 L:     linux-can@vger.kernel.org
-W:     http://gitorious.org/linux-can
+W:     https://github.com/linux-can
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:     Maintained
@@ -2386,7 +2397,7 @@ CAN NETWORK DRIVERS
 M:     Wolfgang Grandegger <wg@grandegger.com>
 M:     Marc Kleine-Budde <mkl@pengutronix.de>
 L:     linux-can@vger.kernel.org
-W:     http://gitorious.org/linux-can
+W:     https://github.com/linux-can
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:     Maintained
@@ -3937,7 +3948,7 @@ S:        Maintained
 F:     drivers/staging/fbtft/
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
-M:     Robert Love <robert.w.love@intel.com>
+M:     Vasu Dev <vasu.dev@intel.com>
 L:     fcoe-devel@open-fcoe.org
 W:     www.Open-FCoE.org
 S:     Supported
@@ -7213,8 +7224,7 @@ ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
 M:     Mark Fasheh <mfasheh@suse.com>
 M:     Joel Becker <jlbec@evilplan.org>
 L:     ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
-W:     http://oss.oracle.com/projects/ocfs2/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
+W:     http://ocfs2.wiki.kernel.org
 S:     Supported
 F:     Documentation/filesystems/ocfs2.txt
 F:     Documentation/filesystems/dlmfs.txt
@@ -8481,6 +8491,14 @@ S:       Supported
 L:     netdev@vger.kernel.org
 F:     drivers/net/ethernet/samsung/sxgbe/
 
+SAMSUNG THERMAL DRIVER
+M:     Lukasz Majewski <l.majewski@samsung.com>
+L:     linux-pm@vger.kernel.org
+L:     linux-samsung-soc@vger.kernel.org
+S:     Supported
+T:     https://github.com/lmajewski/linux-samsung-thermal.git
+F:     drivers/thermal/samsung/
+
 SAMSUNG USB2 PHY DRIVER
 M:     Kamil Debski <k.debski@samsung.com>
 L:     linux-kernel@vger.kernel.org
@@ -8567,7 +8585,7 @@ S:        Maintained
 F:     drivers/scsi/sr*
 
 SCSI RDMA PROTOCOL (SRP) INITIATOR
-M:     Bart Van Assche <bvanassche@acm.org>
+M:     Bart Van Assche <bart.vanassche@sandisk.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.openfabrics.org
@@ -9719,6 +9737,11 @@ L:       linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
+TI CDCE706 CLOCK DRIVER
+M:     Max Filippov <jcmvbkbc@gmail.com>
+S:     Maintained
+F:     drivers/clk/clk-cdce706.c
+
 TI CLOCK DRIVER
 M:     Tero Kristo <t-kristo@ti.com>
 L:     linux-omap@vger.kernel.org
index 19e256ae26790212c75925912e73b1a0603c1192..e734965b160403185cf1f1342055ce0a4a956a77 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
-VERSION = 3
-PATCHLEVEL = 19
+VERSION = 4
+PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Diseased Newt
+EXTRAVERSION = -rc4
+NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 4e547296831d62ea673239dcb224c69ee02cd782..52312cb5dbe21490b48e21343ab8f82b7eecfc0e 100644 (file)
@@ -47,9 +47,6 @@ struct thread_struct {
 /* Forward declaration, a strange C thing */
 struct task_struct;
 
-/* Return saved PC of a blocked thread  */
-unsigned long thread_saved_pc(struct task_struct *t);
-
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
 
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t);
 #define release_segments(mm)        do { } while (0)
 
 #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
  * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
-#define KSTK_ESP(tsk)   (tsk->thread.ksp)
+#define TSK_K_ESP(tsk)         (tsk->thread.ksp)
 
-#define KSTK_REG(tsk, off)     (*((unsigned int *)(KSTK_ESP(tsk) + \
+#define TSK_K_REG(tsk, off)    (*((unsigned int *)(TSK_K_ESP(tsk) + \
                                        sizeof(struct callee_regs) + off)))
 
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
-#define KSTK_FP(tsk)    KSTK_REG(tsk, 0)
+#define TSK_K_BLINK(tsk)       TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk)          TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk)   TSK_K_BLINK(tsk)
 
 extern void start_thread(struct pt_regs * regs, unsigned long pc,
                         unsigned long usp);
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644 (file)
index 0000000..b29b606
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk:               NULL for current task, specific task otherwise
+ * @regs:              pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ *                     If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ *                     use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn:       Callback invoked for each frame unwound
+ *                     Returns 0 to continue unwinding, -1 to stop
+ * @arg:               Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ *  - synchronous unwinding (e.g. dump_stack): @tsk  NULL, @regs  NULL
+ *  - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs  NULL
+ *  - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+       struct task_struct *tsk, struct pt_regs *regs,
+       int (*consumer_fn) (unsigned int, void *),
+       void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
index fdd89715d2d3783f8a1302513c10a15d138ded91..98c00a2d4dd9a57f1c503ac2ebb6d63a3f1a76b4 100644 (file)
@@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
        return 0;
 }
 
-/*
- * API: expected by schedular Code: If thread is sleeping where is that.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- * So we hard code that anyways.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-       struct pt_regs *regs = task_pt_regs(t);
-       unsigned long blink = 0;
-
-       /*
-        * If the thread being queried for in not itself calling this, then it
-        * implies it is not executing, which in turn implies it is sleeping,
-        * which in turn implies it got switched OUT by the schedular.
-        * In that case, it's kernel mode blink can reliably retrieved as per
-        * the picture above (right above pt_regs).
-        */
-       if (t != current && t->state != TASK_RUNNING)
-               blink = *((unsigned int *)regs - 1);
-
-       return blink;
-}
-
 int elf_check_arch(const struct elf32_hdr *x)
 {
        unsigned int eflags;
index 9ce47cfe23037fa12f463a350819731422aadd9b..92320d6f737cf5149d0968f5da3cd73a47af9848 100644 (file)
@@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
                                   struct pt_regs *regs,
                                   struct unwind_frame_info *frame_info)
 {
+       /*
+        * synchronous unwinding (e.g. dump_stack)
+        *  - uses current values of SP and friends
+        */
        if (tsk == NULL && regs == NULL) {
                unsigned long fp, sp, blink, ret;
                frame_info->task = current;
@@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
                frame_info->regs.r63 = ret;
                frame_info->call_frame = 0;
        } else if (regs == NULL) {
+               /*
+                * Asynchronous unwinding of sleeping task
+                *  - Gets SP etc from task's pt_regs (saved bottom of kernel
+                *    mode stack of task)
+                */
 
                frame_info->task = tsk;
 
-               frame_info->regs.r27 = KSTK_FP(tsk);
-               frame_info->regs.r28 = KSTK_ESP(tsk);
-               frame_info->regs.r31 = KSTK_BLINK(tsk);
+               frame_info->regs.r27 = TSK_K_FP(tsk);
+               frame_info->regs.r28 = TSK_K_ESP(tsk);
+               frame_info->regs.r31 = TSK_K_BLINK(tsk);
                frame_info->regs.r63 = (unsigned int)__switch_to;
 
                /* In the prologue of __switch_to, first FP is saved on stack
@@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
                frame_info->call_frame = 0;
 
        } else {
+               /*
+                * Asynchronous unwinding of intr/exception
+                *  - Just uses the pt_regs passed
+                */
                frame_info->task = tsk;
 
                frame_info->regs.r27 = regs->fp;
@@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
 
 #endif
 
-static noinline unsigned int
+notrace noinline unsigned int
 arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
                int (*consumer_fn) (unsigned int, void *), void *arg)
 {
index 7ff5b5c183bb026716295c13f7b123de1d67a96f..74db59b6f39269f072ce606700375ae14ab9ec8c 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <asm/disasm.h>
@@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
                }
        }
 
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
        return 0;
 
 fault:
index 563cb27e37f55f3f99badc9b99e64aca6ee397b7..6a2e006cbcce1f1cd69866e0f0f9f94463d73dcb 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
+#include <linux/perf_event.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu.h>
 
@@ -139,13 +140,20 @@ good_area:
                        return;
        }
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
        if (likely(!(fault & VM_FAULT_ERROR))) {
                if (flags & FAULT_FLAG_ALLOW_RETRY) {
                        /* To avoid updating stats twice for retry case */
-                       if (fault & VM_FAULT_MAJOR)
+                       if (fault & VM_FAULT_MAJOR) {
                                tsk->maj_flt++;
-                       else
+                               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+                                             regs, address);
+                       } else {
                                tsk->min_flt++;
+                               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+                                             regs, address);
+                       }
 
                        if (fault & VM_FAULT_RETRY) {
                                flags &= ~FAULT_FLAG_ALLOW_RETRY;
index 7f99cd652203ce5705b22f5f0fc24f0574a82840..eb7bb511f853d975d67c0ee26c1167b2d5054279 100644 (file)
@@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN)               += berlin
 machine-$(CONFIG_ARCH_CLPS711X)                += clps711x
 machine-$(CONFIG_ARCH_CNS3XXX)         += cns3xxx
 machine-$(CONFIG_ARCH_DAVINCI)         += davinci
+machine-$(CONFIG_ARCH_DIGICOLOR)       += digicolor
 machine-$(CONFIG_ARCH_DOVE)            += dove
 machine-$(CONFIG_ARCH_EBSA110)         += ebsa110
 machine-$(CONFIG_ARCH_EFM32)           += efm32
index 6cc25ed912eeff57fc9bd06cc390c6ece3e56b14..c3255e0c90aa829fc792f02d1265d413f3c6e624 100644 (file)
 
 &usb0 {
        status = "okay";
+       dr_mode = "peripheral";
 };
 
 &usb1 {
        cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
        cd-inverted;
 };
+
+&aes {
+       status = "okay";
+};
+
+&sham {
+       status = "okay";
+};
index 83d40f7655e52503d76537a4ceca1c76e3023e97..6b849372042419e21ca67f142c53f62618e5191d 100644 (file)
 &mmc1 {
        vmmc-supply = <&ldo3_reg>;
 };
-
-&sham {
-       status = "okay";
-};
-
-&aes {
-       status = "okay";
-};
index 7266a00aab2ea515397a9046942749cb291eb6d4..5c5667a3624dee614047e6b63efde568c063f431 100644 (file)
        dual_emac_res_vlan = <3>;
 };
 
+&phy_sel {
+       rmii-clock-ext;
+};
+
 &mac {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&cpsw_default>;
index 712edce7d6fb12904f7063d38e57e36ab051346a..071b56aa0c7e05fd18201b25ba75836edb9aed5e 100644 (file)
@@ -99,7 +99,7 @@
        ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <0>;
                reg = <0x0664>;
        };
        ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <1>;
                reg = <0x0664>;
        };
        ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <2>;
                reg = <0x0664>;
        };
index f9a17e2ca8cb068dfa63034d43a1077fb0a606bb..0198f5a62b96cd8b8569d4ae5d6dfd2938fcd580 100644 (file)
                >;
        };
 
-       i2c1_pins_default: i2c1_pins_default {
-               pinctrl-single,pins = <
-                       0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */
-                       0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */
-               >;
-       };
-
-       i2c1_pins_sleep: i2c1_pins_sleep {
-               pinctrl-single,pins = <
-                       0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */
-                       0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */
-               >;
-       };
-
        mmc1_pins_default: pinmux_mmc1_pins_default {
                pinctrl-single,pins = <
                        0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
        status = "okay";
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&i2c0_pins_default>;
-       pinctrl-1 = <&i2c0_pins_default>;
+       pinctrl-1 = <&i2c0_pins_sleep>;
        clock-frequency = <400000>;
 
        at24@50 {
                pagesize = <64>;
                reg = <0x50>;
        };
-};
-
-&i2c1 {
-       status = "okay";
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&i2c1_pins_default>;
-       pinctrl-1 = <&i2c1_pins_default>;
-       clock-frequency = <400000>;
 
        tps: tps62362@60 {
                compatible = "ti,tps62362";
+               reg = <0x60>;
                regulator-name = "VDD_MPU";
                regulator-min-microvolt = <950000>;
                regulator-max-microvolt = <1330000>;
index c7dc9dab93a45eaf779497071cb9968027f5e163..cfb49686ab6af02ba7086e344869ee9ad457a2aa 100644 (file)
        ehrpwm0_tbclk: ehrpwm0_tbclk {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <0>;
                reg = <0x0664>;
        };
        ehrpwm1_tbclk: ehrpwm1_tbclk {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <1>;
                reg = <0x0664>;
        };
        ehrpwm2_tbclk: ehrpwm2_tbclk {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <2>;
                reg = <0x0664>;
        };
        ehrpwm3_tbclk: ehrpwm3_tbclk {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <4>;
                reg = <0x0664>;
        };
        ehrpwm4_tbclk: ehrpwm4_tbclk {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <5>;
                reg = <0x0664>;
        };
        ehrpwm5_tbclk: ehrpwm5_tbclk {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
-               clocks = <&dpll_per_m2_ck>;
+               clocks = <&l4ls_gclk>;
                ti,bit-shift = <6>;
                reg = <0x0664>;
        };
index 03750af3b49a41493035217c34c16984343b226c..6463f9ef2b548208bda288a78a07ab3353aa2220 100644 (file)
        pinctrl-0 = <&usb1_pins>;
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-       extcon = <&extcon_usb2>;
-};
-
 &usb2 {
        dr_mode = "peripheral";
 };
index fff0ee69aab495361898b503268fd300a02832c1..e7f0a4ae271c6a53244c3a3b0f6204cae45d6b0b 100644 (file)
 
                                        pinctrl_usart3_rts: usart3_rts-0 {
                                                atmel,pins =
-                                                       <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>;  /* PC8 periph B */
+                                                       <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>;
                                        };
 
                                        pinctrl_usart3_cts: usart3_cts-0 {
                                                atmel,pins =
-                                                       <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */
+                                                       <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;
                                        };
                                };
 
                        };
 
                        usb1: gadget@fffa4000 {
-                               compatible = "atmel,at91rm9200-udc";
+                               compatible = "atmel,at91sam9260-udc";
                                reg = <0xfffa4000 0x4000>;
                                interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>;
                                clocks = <&udc_clk>, <&udpck>;
                                atmel,watchdog-type = "hardware";
                                atmel,reset-type = "all";
                                atmel,dbg-halt;
-                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index e247b0b5fdab2fe1cb41e57fd51450ae723256dd..d55fdf2487ef53f7a3c68cded4e78fe42039f538 100644 (file)
                        };
 
                        usb1: gadget@fffa4000 {
-                               compatible = "atmel,at91rm9200-udc";
+                               compatible = "atmel,at91sam9261-udc";
                                reg = <0xfffa4000 0x4000>;
                                interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>;
-                               clocks = <&usb>, <&udc_clk>, <&udpck>;
-                               clock-names = "usb_clk", "udc_clk", "udpck";
+                               clocks = <&udc_clk>, <&udpck>;
+                               clock-names = "pclk", "hclk";
+                               atmel,matrix = <&matrix>;
                                status = "disabled";
                        };
 
                        };
 
                        matrix: matrix@ffffee00 {
-                               compatible = "atmel,at91sam9260-bus-matrix";
+                               compatible = "atmel,at91sam9260-bus-matrix", "syscon";
                                reg = <0xffffee00 0x200>;
                        };
 
index 1f67bb4c144eef2489891b63b64edda96402f100..fce301c4e9d6e22aeb37d3a2a4fe83bf6634ad78 100644 (file)
@@ -69,7 +69,7 @@
 
        sram1: sram@00500000 {
                compatible = "mmio-sram";
-               reg = <0x00300000 0x4000>;
+               reg = <0x00500000 0x4000>;
        };
 
        ahb {
                        };
 
                        usb1: gadget@fff78000 {
-                               compatible = "atmel,at91rm9200-udc";
+                               compatible = "atmel,at91sam9263-udc";
                                reg = <0xfff78000 0x4000>;
                                interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>;
                                clocks = <&udc_clk>, <&udpck>;
                                atmel,watchdog-type = "hardware";
                                atmel,reset-type = "all";
                                atmel,dbg-halt;
-                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index ee80aa9c0759c17f178965181fb28f2a562f6b13..488af63d5174c7e9dc0eb29319ccb35eebd8fc7c 100644 (file)
                                atmel,watchdog-type = "hardware";
                                atmel,reset-type = "all";
                                atmel,dbg-halt;
-                               atmel,idle-halt;
                                status = "disabled";
                        };
 
                        compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
                        reg = <0x00800000 0x100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+                       clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck";
                        status = "disabled";
                };
index c2666a7cb5b19b547d31f11d58d5af1c13d36acb..0c53a375ba99d214c1c0b5eabd85e77ec0cc72e6 100644 (file)
                                atmel,watchdog-type = "hardware";
                                atmel,reset-type = "all";
                                atmel,dbg-halt;
-                               atmel,idle-halt;
                                status = "disabled";
                        };
 
index 818dabdd8c0e08e3089092f27117c1927bc6122f..d221179d0f1aad8aaccde552f74962a3009c6274 100644 (file)
                                reg = <0x00500000 0x80000
                                       0xf803c000 0x400>;
                                interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
-                               clocks = <&usb>, <&udphs_clk>;
+                               clocks = <&utmi>, <&udphs_clk>;
                                clock-names = "hclk", "pclk";
                                status = "disabled";
 
                                atmel,watchdog-type = "hardware";
                                atmel,reset-type = "all";
                                atmel,dbg-halt;
-                               atmel,idle-halt;
                                status = "disabled";
                        };
 
                        compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
                        reg = <0x00700000 0x100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+                       clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ehci_clk", "uhpck";
                        status = "disabled";
                };
index 5126f9e77a9883ceb4f8af0f24a46d17096e084c..ff5fb6ab0b9748dbecd27fd7432c4f8306f5221d 100644 (file)
                };
        };
 
+       i2c0: i2c@18008000 {
+               compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+               reg = <0x18008000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+               status = "disabled";
+       };
+
+       i2c1: i2c@1800b000 {
+               compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+               reg = <0x1800b000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+               status = "disabled";
+       };
+
        uart0: serial@18020000 {
                compatible = "snps,dw-apb-uart";
                reg = <0x18020000 0x100>;
index d2d8e94e0aa2042b80dff2d34b49bad66e16345d..f46329c8ad75c00c068e269565bdb1b931083f47 100644 (file)
@@ -66,8 +66,9 @@
                        reg = <0x1d000 0x1000>;
                        cache-unified;
                        cache-level = <2>;
-                       cache-sets = <16>;
-                       cache-size = <0x80000>;
+                       cache-size = <524288>;
+                       cache-sets = <1024>;
+                       cache-line-size = <32>;
                        interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index 857d0289ad4d7c9ed81268a2d2a20c923b2177a0..d3a29c1b841727f58200a37ac14b03e96a18cdd2 100644 (file)
                        DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0)     /* SPI_D1 */
                >;
        };
+
+       usb0_pins: pinmux_usb0_pins {
+               pinctrl-single,pins = <
+                       DM816X_IOPAD(0x0d00, MUX_MODE0)                 /* USB0_DRVVBUS */
+               >;
+       };
+
+       usb1_pins: pinmux_usb0_pins {
+               pinctrl-single,pins = <
+                       DM816X_IOPAD(0x0d04, MUX_MODE0)                 /* USB1_DRVVBUS */
+               >;
+       };
 };
 
 &i2c1 {
 &mmc1 {
        vmmc-supply = <&vmmcsd_fixed>;
 };
+
+/* At least dm8168-evm rev c won't support multipoint, later may */
+&usb0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&usb0_pins>;
+       mentor,multipoint = <0>;
+};
+
+&usb1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&usb1_pins>;
+       mentor,multipoint = <0>;
+};
index d98d0f7de380d95d956aa6826dd626c53d500be7..3c97b5f2addc12a86639ec7b8e63e92349831236 100644 (file)
 
                        /* Device Configuration Registers */
                        scm_conf: syscon@600 {
-                               compatible = "syscon";
+                               compatible = "syscon", "simple-bus";
                                reg = <0x600 0x110>;
                                #address-cells = <1>;
                                #size-cells = <1>;
+                               ranges = <0 0x600 0x110>;
+
+                               usb_phy0: usb-phy@20 {
+                                       compatible = "ti,dm8168-usb-phy";
+                                       reg = <0x20 0x8>;
+                                       reg-names = "phy";
+                                       clocks = <&main_fapll 6>;
+                                       clock-names = "refclk";
+                                       #phy-cells = <0>;
+                                       syscon = <&scm_conf>;
+                               };
+
+                               usb_phy1: usb-phy@28 {
+                                       compatible = "ti,dm8168-usb-phy";
+                                       reg = <0x28 0x8>;
+                                       reg-names = "phy";
+                                       clocks = <&main_fapll 6>;
+                                       clock-names = "refclk";
+                                       #phy-cells = <0>;
+                                       syscon = <&scm_conf>;
+                               };
                        };
 
                        scrm_clocks: clocks {
                                reg-names = "mc", "control";
                                interrupts = <18>;
                                interrupt-names = "mc";
-                               dr_mode = "otg";
+                               dr_mode = "host";
+                               interface-type = <0>;
+                               phys = <&usb_phy0>;
+                               phy-names = "usb2-phy";
                                mentor,multipoint = <1>;
                                mentor,num-eps = <16>;
                                mentor,ram-bits = <12>;
 
                        usb1: usb@47401800 {
                                compatible = "ti,musb-am33xx";
-                               status = "disabled";
                                reg = <0x47401c00 0x400
                                       0x47401800 0x200>;
                                reg-names = "mc", "control";
                                interrupts = <19>;
                                interrupt-names = "mc";
-                               dr_mode = "otg";
+                               dr_mode = "host";
+                               interface-type = <0>;
+                               phys = <&usb_phy1>;
+                               phy-names = "usb2-phy";
                                mentor,multipoint = <1>;
                                mentor,num-eps = <16>;
                                mentor,ram-bits = <12>;
index 746cddb1b8f538e1d1fed717b077998f4a46adc8..7563d7ce01bbc74ed71fab8cd07a578060325500 100644 (file)
 
        dcan1_pins_default: dcan1_pins_default {
                pinctrl-single,pins = <
-                       0x3d0   (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */
-                       0x3d4   (MUX_MODE15)            /* dcan1_rx.off */
-                       0x418   (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
+                       0x3d0   (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+                       0x418   (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
                >;
        };
 
        dcan1_pins_sleep: dcan1_pins_sleep {
                pinctrl-single,pins = <
-                       0x3d0   (MUX_MODE15)    /* dcan1_tx.off */
-                       0x3d4   (MUX_MODE15)    /* dcan1_rx.off */
-                       0x418   (MUX_MODE15)    /* wakeup0.off */
+                       0x3d0   (MUX_MODE15 | PULL_UP)  /* dcan1_tx.off */
+                       0x418   (MUX_MODE15 | PULL_UP)  /* wakeup0.off */
                >;
        };
 };
        };
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-       extcon = <&extcon_usb2>;
-};
-
 &usb1 {
        dr_mode = "peripheral";
        pinctrl-names = "default";
index 5827fedafd43d58a00169a4186cbb595d6f11234..127608d79033e32e76143b0707918134f6450283 100644 (file)
                                     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <127>;
+                       dma-channels = <32>;
+                       dma-requests = <127>;
                };
 
                gpio1: gpio@4ae10000 {
                                      <0x4A096800 0x40>; /* pll_ctrl */
                                reg-names = "phy_rx", "phy_tx", "pll_ctrl";
                                ctrl-module = <&omap_control_sata>;
-                               clocks = <&sys_clkin1>;
-                               clock-names = "sysclk";
+                               clocks = <&sys_clkin1>, <&sata_ref_clk>;
+                               clock-names = "sysclk", "refclk";
                                #phy-cells = <0>;
                        };
 
index 4d87117136108873d2410ffaa68010652fd2e356..40ed539ce4743a0f3eccc319e4a4c6be09472363 100644 (file)
 
        dcan1_pins_default: dcan1_pins_default {
                pinctrl-single,pins = <
-                       0x3d0   (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */
-                       0x3d4   (MUX_MODE15)            /* dcan1_rx.off */
-                       0x418   (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
+                       0x3d0   (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+                       0x418   (PULL_UP | MUX_MODE1)   /* wakeup0.dcan1_rx */
                >;
        };
 
        dcan1_pins_sleep: dcan1_pins_sleep {
                pinctrl-single,pins = <
-                       0x3d0   (MUX_MODE15)    /* dcan1_tx.off */
-                       0x3d4   (MUX_MODE15)    /* dcan1_rx.off */
-                       0x418   (MUX_MODE15)    /* wakeup0.off */
+                       0x3d0   (MUX_MODE15 | PULL_UP)  /* dcan1_tx.off */
+                       0x418   (MUX_MODE15 | PULL_UP)  /* wakeup0.off */
                >;
        };
 
        phy-supply = <&ldo4_reg>;
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-       extcon = <&extcon_usb2>;
-};
-
 &usb1 {
        dr_mode = "peripheral";
        pinctrl-names = "default";
index 4bdcbd61ce47eac73d287448b2163426d67c7305..99b09a44e2694129ece5426f8df2227719255a37 100644 (file)
                ti,invert-autoidle-bit;
        };
 
+       dpll_core_byp_mux: dpll_core_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x012c>;
+       };
+
        dpll_core_ck: dpll_core_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-core-clock";
-               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
                reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_dsp_byp_mux: dpll_dsp_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x0240>;
+       };
+
        dpll_dsp_ck: dpll_dsp_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+               clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
                reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_iva_byp_mux: dpll_iva_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x01ac>;
+       };
+
        dpll_iva_ck: dpll_iva_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+               clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
                reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_gpu_byp_mux: dpll_gpu_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x02e4>;
+       };
+
        dpll_gpu_ck: dpll_gpu_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
                reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_ddr_byp_mux: dpll_ddr_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x021c>;
+       };
+
        dpll_ddr_ck: dpll_ddr_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
                reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
        };
 
                ti,invert-autoidle-bit;
        };
 
+       dpll_gmac_byp_mux: dpll_gmac_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x02b4>;
+       };
+
        dpll_gmac_ck: dpll_gmac_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+               clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
                reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_eve_byp_mux: dpll_eve_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x0290>;
+       };
+
        dpll_eve_ck: dpll_eve_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+               clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
                reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_per_byp_mux: dpll_per_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x014c>;
+       };
+
        dpll_per_ck: dpll_per_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+               clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
                reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_usb_byp_mux: dpll_usb_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x018c>;
+       };
+
        dpll_usb_ck: dpll_usb_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-j-type-clock";
-               clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+               clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
                reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
        };
 
index 277b48b0b6f9cd155737550d0586e9d720cf75f4..ac6b0ae42caff5f9ad7d14307f2421ddbf35c689 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #include "skeleton.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
 #include <dt-bindings/clock/exynos3250.h>
 
 / {
                        interrupts = <0 216 0>;
                        clocks = <&cmu CLK_TMU_APBIF>;
                        clock-names = "tmu_apbif";
+                       #include "exynos4412-tmu-sensor-conf.dtsi"
                        status = "disabled";
                };
 
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
new file mode 100644 (file)
index 0000000..735cb2f
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Device tree sources for Exynos4 thermal zone
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+thermal-zones {
+       cpu_thermal: cpu-thermal {
+               thermal-sensors = <&tmu 0>;
+               polling-delay-passive = <0>;
+               polling-delay = <0>;
+               trips {
+                       cpu_alert0: cpu-alert-0 {
+                               temperature = <70000>; /* millicelsius */
+                               hysteresis = <10000>; /* millicelsius */
+                               type = "active";
+                       };
+                       cpu_alert1: cpu-alert-1 {
+                               temperature = <95000>; /* millicelsius */
+                               hysteresis = <10000>; /* millicelsius */
+                               type = "active";
+                       };
+                       cpu_alert2: cpu-alert-2 {
+                               temperature = <110000>; /* millicelsius */
+                               hysteresis = <10000>; /* millicelsius */
+                               type = "active";
+                       };
+                       cpu_crit0: cpu-crit-0 {
+                               temperature = <120000>; /* millicelsius */
+                               hysteresis = <0>; /* millicelsius */
+                               type = "critical";
+                       };
+               };
+               cooling-maps {
+                       map0 {
+                               trip = <&cpu_alert0>;
+                       };
+                       map1 {
+                               trip = <&cpu_alert1>;
+                       };
+               };
+       };
+};
+};
index 76173cacd4501fdd5af0ff061cf6f93e3bd922df..77ea547768f4fa24965ce945b6a7ea2adc12ba60 100644 (file)
@@ -38,6 +38,7 @@
                i2c5 = &i2c_5;
                i2c6 = &i2c_6;
                i2c7 = &i2c_7;
+               i2c8 = &i2c_8;
                csis0 = &csis_0;
                csis1 = &csis_1;
                fimc0 = &fimc_0;
                compatible = "samsung,exynos4210-pd";
                reg = <0x10023C20 0x20>;
                #power-domain-cells = <0>;
+               power-domains = <&pd_lcd0>;
        };
 
        pd_cam: cam-power-domain@10023C00 {
                status = "disabled";
        };
 
+       i2c_8: i2c@138E0000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "samsung,s3c2440-hdmiphy-i2c";
+               reg = <0x138E0000 0x100>;
+               interrupts = <0 93 0>;
+               clocks = <&clock CLK_I2C_HDMI>;
+               clock-names = "i2c";
+               status = "disabled";
+
+               hdmi_i2c_phy: hdmiphy@38 {
+                       compatible = "exynos4210-hdmiphy";
+                       reg = <0x38>;
+               };
+       };
+
        spi_0: spi@13920000 {
                compatible = "samsung,exynos4210-spi";
                reg = <0x13920000 0x100>;
                status = "disabled";
        };
 
+       tmu: tmu@100C0000 {
+               #include "exynos4412-tmu-sensor-conf.dtsi"
+       };
+
+       hdmi: hdmi@12D00000 {
+               compatible = "samsung,exynos4210-hdmi";
+               reg = <0x12D00000 0x70000>;
+               interrupts = <0 92 0>;
+               clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy",
+                       "mout_hdmi";
+               clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
+                       <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
+                       <&clock CLK_MOUT_HDMI>;
+               phy = <&hdmi_i2c_phy>;
+               power-domains = <&pd_tv>;
+               samsung,syscon-phandle = <&pmu_system_controller>;
+               status = "disabled";
+       };
+
+       mixer: mixer@12C10000 {
+               compatible = "samsung,exynos4210-mixer";
+               interrupts = <0 91 0>;
+               reg = <0x12C10000 0x2100>, <0x12c00000 0x300>;
+               power-domains = <&pd_tv>;
+               status = "disabled";
+       };
+
        ppmu_dmc0: ppmu_dmc0@106a0000 {
                compatible = "samsung,exynos-ppmu";
                reg = <0x106a0000 0x2000>;
index 3d6652a4b6cbafad4a935ee82430c0b998e8590e..32c5fd8f6269d9c5932de0d715e7763f99541057 100644 (file)
                status = "okay";
        };
 
+       tmu@100C0000 {
+               status = "okay";
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       cooling-maps {
+                               map0 {
+                                    /* Corresponds to 800MHz at freq_table */
+                                    cooling-device = <&cpu0 2 2>;
+                               };
+                               map1 {
+                                    /* Corresponds to 200MHz at freq_table */
+                                    cooling-device = <&cpu0 4 4>;
+                              };
+                      };
+               };
+       };
+
        camera {
                pinctrl-names = "default";
                pinctrl-0 = <>;
index b57e6b82ea203b521399fc13f56f3e3a99c10c17..d4f2b11319dd10d4d7b79fa295d55e63baccff9c 100644 (file)
                        assigned-clock-rates = <0>, <160000000>;
                };
        };
+
+       hdmi_en: voltage-regulator-hdmi-5v {
+               compatible = "regulator-fixed";
+               regulator-name = "HDMI_5V";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               gpio = <&gpe0 1 0>;
+               enable-active-high;
+       };
+
+       hdmi_ddc: i2c-ddc {
+               compatible = "i2c-gpio";
+               gpios = <&gpe4 2 0 &gpe4 3 0>;
+               i2c-gpio,delay-us = <100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               pinctrl-0 = <&i2c_ddc_bus>;
+               pinctrl-names = "default";
+               status = "okay";
+       };
+
+       mixer@12C10000 {
+               status = "okay";
+       };
+
+       hdmi@12D00000 {
+               hpd-gpio = <&gpx3 7 0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&hdmi_hpd>;
+               hdmi-en-supply = <&hdmi_en>;
+               vdd-supply = <&ldo3_reg>;
+               vdd_osc-supply = <&ldo4_reg>;
+               vdd_pll-supply = <&ldo3_reg>;
+               ddc = <&hdmi_ddc>;
+               status = "okay";
+       };
+
+       i2c@138E0000 {
+               status = "okay";
+       };
+};
+
+&pinctrl_1 {
+       hdmi_hpd: hdmi-hpd {
+               samsung,pins = "gpx3-7";
+               samsung,pin-pud = <0>;
+       };
+};
+
+&pinctrl_0 {
+       i2c_ddc_bus: i2c-ddc-bus {
+               samsung,pins = "gpe4-2", "gpe4-3";
+               samsung,pin-function = <2>;
+               samsung,pin-pud = <3>;
+               samsung,pin-drv = <0>;
+       };
 };
 
 &mdma1 {
index 67c832c9dcf140d6203d742810a374aff867ef69..be89f83f70e7750577441c7400567a57c89ec9cb 100644 (file)
@@ -21,6 +21,7 @@
 
 #include "exynos4.dtsi"
 #include "exynos4210-pinctrl.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
 
 / {
        compatible = "samsung,exynos4210", "samsung,exynos4";
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu@900 {
+               cpu0: cpu@900 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0x900>;
+                       cooling-min-level = <4>;
+                       cooling-max-level = <2>;
+                       #cooling-cells = <2>; /* min followed by max */
                };
 
                cpu@901 {
                reg = <0x03860000 0x1000>;
        };
 
-       tmu@100C0000 {
+       tmu: tmu@100C0000 {
                compatible = "samsung,exynos4210-tmu";
                interrupt-parent = <&combiner>;
                reg = <0x100C0000 0x100>;
                interrupts = <2 4>;
                clocks = <&clock CLK_TMU_APBIF>;
                clock-names = "tmu_apbif";
+               samsung,tmu_gain = <15>;
+               samsung,tmu_reference_voltage = <7>;
                status = "disabled";
        };
 
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&tmu 0>;
+
+                       trips {
+                             cpu_alert0: cpu-alert-0 {
+                                     temperature = <85000>; /* millicelsius */
+                             };
+                             cpu_alert1: cpu-alert-1 {
+                                     temperature = <100000>; /* millicelsius */
+                             };
+                             cpu_alert2: cpu-alert-2 {
+                                     temperature = <110000>; /* millicelsius */
+                             };
+                       };
+               };
+       };
+
        g2d@12800000 {
                compatible = "samsung,s5pv210-g2d";
                reg = <0x12800000 0x1000>;
                };
        };
 
+       mixer: mixer@12C10000 {
+               clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer",
+                       "sclk_mixer";
+               clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+                       <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>,
+                       <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>;
+       };
+
        ppmu_lcd1: ppmu_lcd1@12240000 {
                compatible = "samsung,exynos-ppmu";
                reg = <0x12240000 0x2000>;
index dd0a43ec56da905d64fc02f596b70d0ce92e67f3..5be03288f1ee6157cf35849c256743c9ff86b70d 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu@A00 {
+               cpu0: cpu@A00 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA00>;
+                       cooling-min-level = <13>;
+                       cooling-max-level = <7>;
+                       #cooling-cells = <2>; /* min followed by max */
                };
 
                cpu@A01 {
index de80b5bba20454b3e504fd26228d581bcb725190..adb4f6a97a1d5b19d67d486e7d36dd386884d1fd 100644 (file)
                                        regulator-always-on;
                                };
 
+                               ldo8_reg: ldo@8 {
+                                       regulator-compatible = "LDO8";
+                                       regulator-name = "VDD10_HDMI_1.0V";
+                                       regulator-min-microvolt = <1000000>;
+                                       regulator-max-microvolt = <1000000>;
+                               };
+
+                               ldo10_reg: ldo@10 {
+                                       regulator-compatible = "LDO10";
+                                       regulator-name = "VDDQ_MIPIHSI_1.8V";
+                                       regulator-min-microvolt = <1800000>;
+                                       regulator-max-microvolt = <1800000>;
+                               };
+
                                ldo11_reg: LDO11 {
                                        regulator-name = "VDD18_ABB1_1.8V";
                                        regulator-min-microvolt = <1800000>;
        ehci: ehci@12580000 {
                status = "okay";
        };
+
+       tmu@100C0000 {
+               vtmu-supply = <&ldo10_reg>;
+               status = "okay";
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       cooling-maps {
+                               map0 {
+                                    /* Corresponds to 800MHz at freq_table */
+                                    cooling-device = <&cpu0 7 7>;
+                               };
+                               map1 {
+                                    /* Corresponds to 200MHz at freq_table */
+                                    cooling-device = <&cpu0 13 13>;
+                              };
+                      };
+               };
+       };
+
+       mixer: mixer@12C10000 {
+               status = "okay";
+       };
+
+       hdmi@12D00000 {
+               hpd-gpio = <&gpx3 7 0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&hdmi_hpd>;
+               vdd-supply = <&ldo8_reg>;
+               vdd_osc-supply = <&ldo10_reg>;
+               vdd_pll-supply = <&ldo8_reg>;
+               ddc = <&hdmi_ddc>;
+               status = "okay";
+       };
+
+       hdmi_ddc: i2c@13880000 {
+               status = "okay";
+               pinctrl-names = "default";
+               pinctrl-0 = <&i2c2_bus>;
+       };
+
+       i2c@138E0000 {
+               status = "okay";
+       };
 };
 
 &pinctrl_1 {
                samsung,pin-pud = <0>;
                samsung,pin-drv = <0>;
        };
+
+       hdmi_hpd: hdmi-hpd {
+               samsung,pins = "gpx3-7";
+               samsung,pin-pud = <1>;
+       };
 };
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
new file mode 100644 (file)
index 0000000..e3f7934
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Device tree sources for Exynos4412 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <55>;
+samsung,tmu_min_efuse_value = <40>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
index 21f7480835868396061bc8216833eae37398398a..173ffa479ad3cb03eb6e6742663fafaccacf9d53 100644 (file)
                pulldown-ohm = <100000>; /* 100K */
                io-channels = <&adc 2>;  /* Battery temperature */
        };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       cooling-maps {
+                               map0 {
+                                    /* Corresponds to 800MHz at freq_table */
+                                    cooling-device = <&cpu0 7 7>;
+                               };
+                               map1 {
+                                    /* Corresponds to 200MHz at freq_table */
+                                    cooling-device = <&cpu0 13 13>;
+                              };
+                      };
+               };
+       };
 };
 
 &pmu_system_controller {
index 0f6ec93bb1d8a243d511dac4e2e42f6a794e9015..68ad43b391ae6122c3783b443cf23b5ccb7a2d04 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu@A00 {
+               cpu0: cpu@A00 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA00>;
+                       cooling-min-level = <13>;
+                       cooling-max-level = <7>;
+                       #cooling-cells = <2>; /* min followed by max */
                };
 
                cpu@A01 {
index f5e0ae780d6ce8dd25622abed741b03416570f03..6a6abe14fd9b59eed66e033ef43b970c6d4ce256 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "exynos4.dtsi"
 #include "exynos4x12-pinctrl.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
 
 / {
        aliases {
                clock-names = "tmu_apbif";
                status = "disabled";
        };
+
+       hdmi: hdmi@12D00000 {
+               compatible = "samsung,exynos4212-hdmi";
+       };
+
+       mixer: mixer@12C10000 {
+               compatible = "samsung,exynos4212-mixer";
+               clock-names = "mixer", "hdmi", "sclk_hdmi", "vp";
+               clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+                        <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>;
+       };
 };
index 9bb1b0b738f53d2e544baa582faeec5841b16783..adbde1adad95ddf0bde124e25b2b16ac41dba83b 100644 (file)
@@ -20,7 +20,7 @@
 #include <dt-bindings/clock/exynos5250.h>
 #include "exynos5.dtsi"
 #include "exynos5250-pinctrl.dtsi"
-
+#include "exynos4-cpu-thermal.dtsi"
 #include <dt-bindings/clock/exynos-audss-clk.h>
 
 / {
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu@0 {
+               cpu0: cpu@0 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a15";
                        reg = <0>;
                        clock-frequency = <1700000000>;
+                       cooling-min-level = <15>;
+                       cooling-max-level = <9>;
+                       #cooling-cells = <2>; /* min followed by max */
                };
                cpu@1 {
                        device_type = "cpu";
                #power-domain-cells = <0>;
        };
 
+       pd_disp1: disp1-power-domain@100440A0 {
+               compatible = "samsung,exynos4210-pd";
+               reg = <0x100440A0 0x20>;
+               #power-domain-cells = <0>;
+       };
+
        clock: clock-controller@10010000 {
                compatible = "samsung,exynos5250-clock";
                reg = <0x10010000 0x30000>;
                status = "disabled";
        };
 
-       tmu@10060000 {
+       tmu: tmu@10060000 {
                compatible = "samsung,exynos5250-tmu";
                reg = <0x10060000 0x100>;
                interrupts = <0 65 0>;
                clocks = <&clock CLK_TMU>;
                clock-names = "tmu_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <0>;
+                       polling-delay = <0>;
+                       thermal-sensors = <&tmu 0>;
+
+                       cooling-maps {
+                               map0 {
+                                    /* Corresponds to 800MHz at freq_table */
+                                    cooling-device = <&cpu0 9 9>;
+                               };
+                               map1 {
+                                    /* Corresponds to 200MHz at freq_table */
+                                    cooling-device = <&cpu0 15 15>;
+                              };
+                      };
+               };
        };
 
        serial@12C00000 {
        hdmi: hdmi {
                compatible = "samsung,exynos4212-hdmi";
                reg = <0x14530000 0x70000>;
+               power-domains = <&pd_disp1>;
                interrupts = <0 95 0>;
                clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
                         <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
        mixer {
                compatible = "samsung,exynos5250-mixer";
                reg = <0x14450000 0x10000>;
+               power-domains = <&pd_disp1>;
                interrupts = <0 94 0>;
-               clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>;
-               clock-names = "mixer", "sclk_hdmi";
+               clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+                        <&clock CLK_SCLK_HDMI>;
+               clock-names = "mixer", "hdmi", "sclk_hdmi";
        };
 
        dp_phy: video-phy@10040720 {
        };
 
        dp: dp-controller@145B0000 {
+               power-domains = <&pd_disp1>;
                clocks = <&clock CLK_DP>;
                clock-names = "dp";
                phys = <&dp_phy>;
        };
 
        fimd: fimd@14400000 {
+               power-domains = <&pd_disp1>;
                clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
                clock-names = "sclk_fimd", "fimd";
        };
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
new file mode 100644 (file)
index 0000000..5d31fc1
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Device tree sources for default Exynos5420 thermal zone definition
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+polling-delay-passive = <0>;
+polling-delay = <0>;
+trips {
+       cpu-alert-0 {
+               temperature = <85000>; /* millicelsius */
+               hysteresis = <10000>; /* millicelsius */
+               type = "active";
+       };
+       cpu-alert-1 {
+               temperature = <103000>; /* millicelsius */
+               hysteresis = <10000>; /* millicelsius */
+               type = "active";
+       };
+       cpu-alert-2 {
+               temperature = <110000>; /* millicelsius */
+               hysteresis = <10000>; /* millicelsius */
+               type = "active";
+       };
+       cpu-crit-0 {
+               temperature = <1200000>; /* millicelsius */
+               hysteresis = <0>; /* millicelsius */
+               type = "critical";
+       };
+};
index 9dc2e9773b30c5f5aaa22e5b3d1c1968bc1dd226..c0e98cf3514fa1fec0031984f735b02ca054af3c 100644 (file)
                compatible = "samsung,exynos5420-mixer";
                reg = <0x14450000 0x10000>;
                interrupts = <0 94 0>;
-               clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>;
-               clock-names = "mixer", "sclk_hdmi";
+               clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+                        <&clock CLK_SCLK_HDMI>;
+               clock-names = "mixer", "hdmi", "sclk_hdmi";
                power-domains = <&disp_pd>;
        };
 
                interrupts = <0 65 0>;
                clocks = <&clock CLK_TMU>;
                clock-names = "tmu_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_cpu1: tmu@10064000 {
                interrupts = <0 183 0>;
                clocks = <&clock CLK_TMU>;
                clock-names = "tmu_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_cpu2: tmu@10068000 {
                interrupts = <0 184 0>;
                clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_cpu3: tmu@1006c000 {
                interrupts = <0 185 0>;
                clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_gpu: tmu@100a0000 {
                interrupts = <0 215 0>;
                clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
+       };
+
+       thermal-zones {
+               cpu0_thermal: cpu0-thermal {
+                       thermal-sensors = <&tmu_cpu0>;
+                       #include "exynos5420-trip-points.dtsi"
+               };
+               cpu1_thermal: cpu1-thermal {
+                      thermal-sensors = <&tmu_cpu1>;
+                      #include "exynos5420-trip-points.dtsi"
+               };
+               cpu2_thermal: cpu2-thermal {
+                      thermal-sensors = <&tmu_cpu2>;
+                      #include "exynos5420-trip-points.dtsi"
+               };
+               cpu3_thermal: cpu3-thermal {
+                      thermal-sensors = <&tmu_cpu3>;
+                      #include "exynos5420-trip-points.dtsi"
+               };
+               gpu_thermal: gpu-thermal {
+                      thermal-sensors = <&tmu_gpu>;
+                      #include "exynos5420-trip-points.dtsi"
+               };
        };
 
         watchdog: watchdog@101D0000 {
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
new file mode 100644 (file)
index 0000000..7b2fba0
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Device tree sources for Exynos5440 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <5>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <0x5d2d>;
+samsung,tmu_min_efuse_value = <16>;
+samsung,tmu_max_efuse_value = <76>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <70>;
+samsung,tmu_default_temp_offset = <25>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
new file mode 100644 (file)
index 0000000..48adfa8
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for default Exynos5440 thermal zone definition
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+polling-delay-passive = <0>;
+polling-delay = <0>;
+trips {
+       cpu-alert-0 {
+               temperature = <100000>; /* millicelsius */
+               hysteresis = <0>; /* millicelsius */
+               type = "active";
+       };
+       cpu-crit-0 {
+               temperature = <1050000>; /* millicelsius */
+               hysteresis = <0>; /* millicelsius */
+               type = "critical";
+       };
+};
index 8f3373cd7b878b79f2e499518a7ec774ffd06db8..59d9416b3b03f042cd05c412736c1fd432440747 100644 (file)
                interrupts = <0 58 0>;
                clocks = <&clock CLK_B_125>;
                clock-names = "tmu_apbif";
+               #include "exynos5440-tmu-sensor-conf.dtsi"
        };
 
        tmuctrl_1: tmuctrl@16011C {
                interrupts = <0 58 0>;
                clocks = <&clock CLK_B_125>;
                clock-names = "tmu_apbif";
+               #include "exynos5440-tmu-sensor-conf.dtsi"
        };
 
        tmuctrl_2: tmuctrl@160120 {
                interrupts = <0 58 0>;
                clocks = <&clock CLK_B_125>;
                clock-names = "tmu_apbif";
+               #include "exynos5440-tmu-sensor-conf.dtsi"
+       };
+
+       thermal-zones {
+               cpu0_thermal: cpu0-thermal {
+                       thermal-sensors = <&tmuctrl_0>;
+                       #include "exynos5440-trip-points.dtsi"
+               };
+               cpu1_thermal: cpu1-thermal {
+                      thermal-sensors = <&tmuctrl_1>;
+                      #include "exynos5440-trip-points.dtsi"
+               };
+               cpu2_thermal: cpu2-thermal {
+                      thermal-sensors = <&tmuctrl_2>;
+                      #include "exynos5440-trip-points.dtsi"
+               };
        };
 
        sata@210000 {
index f1cd2147421d2e0f82e2a921cc5c449d8ff6d1b4..a626e6dd8022c04defdbc56171147c04027aa48b 100644 (file)
@@ -35,6 +35,7 @@
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio3 22 0>;
                        enable-active-high;
+                       vin-supply = <&swbst_reg>;
                };
 
                reg_usb_h1_vbus: regulator@1 {
@@ -45,6 +46,7 @@
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio1 29 0>;
                        enable-active-high;
+                       vin-supply = <&swbst_reg>;
                };
 
                reg_audio: regulator@2 {
index fda4932faefda2f0b847547f15954a94347d5c40..945887d3fdb35a6588155590474479901a45671f 100644 (file)
@@ -52,6 +52,7 @@
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio4 0 0>;
                        enable-active-high;
+                       vin-supply = <&swbst_reg>;
                };
 
                reg_usb_otg2_vbus: regulator@1 {
@@ -62,6 +63,7 @@
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio4 2 0>;
                        enable-active-high;
+                       vin-supply = <&swbst_reg>;
                };
 
                reg_aud3v: regulator@2 {
index 59d1c297bb30f17aaab458f94e6ba52363c635cb..578fa2a54dce35482ba2d7b7fd1d42ce67a762ac 100644 (file)
@@ -87,8 +87,8 @@
                                     <14>,
                                     <15>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <64>;
+                       dma-channels = <32>;
+                       dma-requests = <64>;
                };
 
                i2c1: i2c@48070000 {
index 60403273f83e6918483641884707c8f5bd546d1e..db80f9d376fadf569655fc9660526b16b0c10739 100644 (file)
        model = "Nokia N900";
        compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
 
+       aliases {
+               i2c0;
+               i2c1 = &i2c1;
+               i2c2 = &i2c2;
+               i2c3 = &i2c3;
+       };
+
        cpus {
                cpu@0 {
                        cpu0-supply = <&vcc>;
                compatible = "smsc,lan91c94";
                interrupt-parent = <&gpio2>;
                interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;  /* gpio54 */
-               reg = <1 0x300 0xf>;            /* 16 byte IO range at offset 0x300 */
+               reg = <1 0 0xf>;                /* 16 byte IO range */
                bank-width = <2>;
                pinctrl-names = "default";
                pinctrl-0 = <&ethernet_pins>;
index 01b71111bd558738595fde6deaa71b05c4c08e9a..f4f78c40b56450160ba566cc25bcb7fac9ca489b 100644 (file)
                                     <14>,
                                     <15>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <96>;
+                       dma-channels = <32>;
+                       dma-requests = <96>;
                };
 
                omap3_pmx_core: pinmux@48002030 {
index 074147cebae49f965b254915240b667bffcf9d21..87401d9f4d8b02314323e5a46ee38e5b0c6e4523 100644 (file)
                                     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <127>;
+                       dma-channels = <32>;
+                       dma-requests = <127>;
                };
 
                gpio1: gpio@4a310000 {
index 19212ac6eef054e62e52454c8548a196ca08cafa..de8a3d456cf7de0e3252074e9fb4e3fcb2eca80a 100644 (file)
@@ -13,7 +13,7 @@
 
 core_thermal: core_thermal {
        polling-delay-passive = <250>; /* milliseconds */
-       polling-delay = <1000>; /* milliseconds */
+       polling-delay = <500>; /* milliseconds */
 
                        /* sensor       ID */
        thermal-sensors = <&bandgap     2>;
index 1b87aca88b77130991bb4155331983792116ddeb..bc3090f2e84b3bea10779becd939d5e9555fbffe 100644 (file)
@@ -13,7 +13,7 @@
 
 gpu_thermal: gpu_thermal {
        polling-delay-passive = <250>; /* milliseconds */
-       polling-delay = <1000>; /* milliseconds */
+       polling-delay = <500>; /* milliseconds */
 
                        /* sensor       ID */
        thermal-sensors = <&bandgap     1>;
index b321fdf42c9f3c51e3a59d4b51b27a08f4fcaba4..4a485b63a1413bf8cfda0f027ed5e7a903c68066 100644 (file)
                                     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
-                       #dma-channels = <32>;
-                       #dma-requests = <127>;
+                       dma-channels = <32>;
+                       dma-requests = <127>;
                };
 
                gpio1: gpio@4ae10000 {
                                      <0x4A096800 0x40>; /* pll_ctrl */
                                reg-names = "phy_rx", "phy_tx", "pll_ctrl";
                                ctrl-module = <&omap_control_sata>;
-                               clocks = <&sys_clkin>;
-                               clock-names = "sysclk";
+                               clocks = <&sys_clkin>, <&sata_ref_clk>;
+                               clock-names = "sysclk", "refclk";
                                #phy-cells = <0>;
                        };
                };
        };
 };
 
+&cpu_thermal {
+       polling-delay = <500>; /* milliseconds */
+};
+
 /include/ "omap54xx-clocks.dtsi"
index 58c27466f01262a6f9ecee2058fb11dff9b5df3f..83b425fb3ac20eb1421cede5283b6a3befcdd391 100644 (file)
                ti,index-starts-at-one;
        };
 
+       dpll_core_byp_mux: dpll_core_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+               ti,bit-shift = <23>;
+               reg = <0x012c>;
+       };
+
        dpll_core_ck: dpll_core_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-core-clock";
-               clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+               clocks = <&sys_clkin>, <&dpll_core_byp_mux>;
                reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
        };
 
                clock-div = <1>;
        };
 
+       dpll_iva_byp_mux: dpll_iva_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x01ac>;
+       };
+
        dpll_iva_ck: dpll_iva_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+               clocks = <&sys_clkin>, <&dpll_iva_byp_mux>;
                reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
        };
 
        };
 };
 &cm_core_clocks {
+
+       dpll_per_byp_mux: dpll_per_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x014c>;
+       };
+
        dpll_per_ck: dpll_per_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-clock";
-               clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+               clocks = <&sys_clkin>, <&dpll_per_byp_mux>;
                reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
        };
 
                ti,index-starts-at-one;
        };
 
+       dpll_usb_byp_mux: dpll_usb_byp_mux {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+               ti,bit-shift = <23>;
+               reg = <0x018c>;
+       };
+
        dpll_usb_ck: dpll_usb_ck {
                #clock-cells = <0>;
                compatible = "ti,omap4-dpll-j-type-clock";
-               clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+               clocks = <&sys_clkin>, <&dpll_usb_byp_mux>;
                reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
        };
 
index 261311bdf65bcb601ee5824b57ea3dcf68920db2..367af53c1b8437d30059b32abff7dd3ed7a0c992 100644 (file)
                                atmel,watchdog-type = "hardware";
                                atmel,reset-type = "all";
                                atmel,dbg-halt;
-                               atmel,idle-halt;
                                status = "disabled";
                        };
 
                        compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
                        reg = <0x00700000 0x100000>;
                        interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+                       clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ehci_clk", "uhpck";
                        status = "disabled";
                };
index d986b41b965495bce29dc0d610a715ff4ffc0208..4303874889c69954fa894ff458bb9bfb492f33d4 100644 (file)
@@ -66,6 +66,7 @@
                gpio4 = &pioE;
                tcb0 = &tcb0;
                tcb1 = &tcb1;
+               i2c0 = &i2c0;
                i2c2 = &i2c2;
        };
        cpus {
                        compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
                        reg = <0x00600000 0x100000>;
                        interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+                       clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ehci_clk", "uhpck";
                        status = "disabled";
                };
 
                                        lcdck: lcdck {
                                                #clock-cells = <0>;
-                                               reg = <4>;
-                                               clocks = <&smd>;
+                                               reg = <3>;
+                                               clocks = <&mck>;
                                        };
 
                                        smdck: smdck {
                                                reg = <50>;
                                        };
 
-                                       lcd_clk: lcd_clk {
+                                       lcdc_clk: lcdc_clk {
                                                #clock-cells = <0>;
                                                reg = <51>;
                                        };
index 252c3d1bda501f0814b73f6fa5e267c177cbff75..9d87609567523efd0c0e75033e8bf9e73079d6bd 100644 (file)
                        reg-shift = <2>;
                        reg-io-width = <4>;
                        clocks = <&l4_sp_clk>;
+                       dmas = <&pdma 28>,
+                              <&pdma 29>;
+                       dma-names = "tx", "rx";
                };
 
                uart1: serial1@ffc03000 {
                        reg-shift = <2>;
                        reg-io-width = <4>;
                        clocks = <&l4_sp_clk>;
+                       dmas = <&pdma 30>,
+                              <&pdma 31>;
+                       dma-names = "tx", "rx";
                };
 
                rst: rstmgr@ffd05000 {
index 8ca3c1a2063deeb0eb426a7b320b269f77e4b04d..5c2925831f2038318258003ae6cd3736da71c0de 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <33>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <35>;
                        status = "disabled";
                };
index 905f84d141f03213947def68787c42f905e2b54b..2fd8988f310c6e25dc2d215a35ed1e633d95f83a 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <33>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
index 4910393d1b09013bcb929dc6325d8ddbd6a7d945..f8818f1edbbef27f16adadc139b8e46ae49823ad 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
index 47e557656993fe99f1ea46667a1c1d27e8a2648f..fa2f403ccf28adf4f6aa10c08978dd3b59b6e709 100644 (file)
                        clock-output-names = "axi";
                };
 
-               ahb1_mux: ahb1_mux@01c20054 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-                       reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
-                       clock-output-names = "ahb1_mux";
-               };
-
                ahb1: ahb1@01c20054 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-ahb-clk";
+                       compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&ahb1_mux>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
                        clock-output-names = "ahb1";
                };
 
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                spi0_clk: clk@01c200a0 {
                        #dma-cells = <1>;
 
                        /* DMA controller requires AHB1 clocked from PLL6 */
-                       assigned-clocks = <&ahb1_mux>;
+                       assigned-clocks = <&ahb1>;
                        assigned-clock-parents = <&pll6 0>;
                };
 
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 8>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 9>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 10>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb1_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 11>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
index 786d491542ac429567fd6db6142406858090f81c..3a8530b79f1c46200d2b7ee8bbe343198c7106d8 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
index dd34527293e465d4739cf4518bbd8b292a61989b..382ebd137ee4fbe97514362eee3f336d4e253047 100644 (file)
                };
 
                /* dummy clock until actually implemented */
-               pll6: pll6_clk {
+               pll5: pll5_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-clock";
-                       clock-frequency = <600000000>;
-                       clock-output-names = "pll6";
+                       clock-frequency = <0>;
+                       clock-output-names = "pll5";
+               };
+
+               pll6: clk@01c20028 {
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun6i-a31-pll6-clk";
+                       reg = <0x01c20028 0x4>;
+                       clocks = <&osc24M>;
+                       clock-output-names = "pll6", "pll6x2";
                };
 
                cpu: cpu_clk@01c20050 {
                        clock-output-names = "axi";
                };
 
-               ahb1_mux: ahb1_mux_clk@01c20054 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-                       reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
-                       clock-output-names = "ahb1_mux";
-               };
-
                ahb1: ahb1_clk@01c20054 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-ahb-clk";
+                       compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&ahb1_mux>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
                        clock-output-names = "apb2";
                };
 
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc0";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc1";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc2";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
+               };
+
+               mbus_clk: clk@01c2015c {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun8i-a23-mbus-clk";
+                       reg = <0x01c2015c 0x4>;
+                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clock-output-names = "mbus";
                };
        };
 
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 8>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 9>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 10>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
index f2670f638e97564ac8e3a01ea5bc9a1cbc24be6f..811e72bbe6429b6e6a18ac5c31c267c178d17714 100644 (file)
@@ -70,6 +70,7 @@ CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
+CONFIG_ARM_AT91_ETHER=y
 CONFIG_MACB=y
 # CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_DM9000=y
index e8a4c955241b88e38dfda2cc91d17b701ca6935a..06075b6d246364db1c57b1e8860e6f675d1857c5 100644 (file)
@@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y
 CONFIG_ARCH_STI=y
 CONFIG_ARCH_EXYNOS=y
 CONFIG_EXYNOS5420_MCPM=y
+CONFIG_ARCH_SHMOBILE_MULTI=y
+CONFIG_ARCH_EMEV2=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R8A73A4=y
+CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7779=y
+CONFIG_ARCH_R8A7790=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7794=y
+CONFIG_ARCH_SH73A0=y
+CONFIG_MACH_MARZEN=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
@@ -84,9 +95,11 @@ CONFIG_PCI_KEYSTONE=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
+CONFIG_PCI_RCAR_GEN2=y
+CONFIG_PCI_RCAR_GEN2_PCIE=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=16
 CONFIG_HIGHPTE=y
 CONFIG_CMA=y
 CONFIG_ARM_APPENDED_DTB=y
@@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
+CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y
 CONFIG_AHCI_TEGRA=y
 CONFIG_SATA_HIGHBANK=y
 CONFIG_SATA_MV=y
+CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_HIX5HD2_GMAC=y
 CONFIG_SUN4I_EMAC=y
@@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
 CONFIG_KS8851=y
 CONFIG_R8169=y
+CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
 CONFIG_MARVELL_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
+CONFIG_MICREL_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
@@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MPU3050=y
 CONFIG_INPUT_AXP20X_PEK=y
+CONFIG_INPUT_ADXL34X=m
 CONFIG_SERIO_AMBAKMI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=20
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
@@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
 CONFIG_I2C_CADENCE=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_GPIO=m
 CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_RIIC=y
 CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SH_MOBILE=y
 CONFIG_I2C_SIRF=y
-CONFIG_I2C_TEGRA=y
 CONFIG_I2C_ST=y
-CONFIG_SPI=y
+CONFIG_I2C_TEGRA=y
 CONFIG_I2C_XILINX=y
-CONFIG_SPI_DAVINCI=y
+CONFIG_I2C_RCAR=y
+CONFIG_SPI=y
 CONFIG_SPI_CADENCE=y
+CONFIG_SPI_DAVINCI=y
 CONFIG_SPI_OMAP24XX=y
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
+CONFIG_SPI_RSPI=y
+CONFIG_SPI_SH_MSIOF=m
+CONFIG_SPI_SH_HSPI=y
 CONFIG_SPI_SIRF=y
 CONFIG_SPI_SUN4I=y
 CONFIG_SPI_SUN6I=y
@@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_GENERIC_PLATFORM=y
-CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_DAVINCI=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_EM=y
+CONFIG_GPIO_RCAR=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCF857X=y
 CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
 CONFIG_GPIO_SYSCON=y
@@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_KEYSTONE=y
 CONFIG_POWER_RESET_SUN6I=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
+CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_DAVINCI_WATCHDOG
 CONFIG_ST_THERMAL_SYSCFG=y
@@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_MESON_WATCHDOG=y
+CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AXP20X=y
@@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_BCM590XX=y
+CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_MFD_SYSCON=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_MAX8907=y
+CONFIG_REGULATOR_MAX8973=y
 CONFIG_REGULATOR_MAX77686=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_S2MPS11=y
@@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y
 CONFIG_REGULATOR_VEXPRESS=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_RENESAS_VSP1=m
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7180=m
 CONFIG_DRM=y
+CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_TEGRA=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
+CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
+CONFIG_FB_SH_MOBILE_MERAM=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_AS3711=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=y
@@ -343,6 +397,8 @@ CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_SH4_FSI=m
+CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_TEGRA=y
 CONFIG_SND_SOC_TEGRA_RT5640=y
 CONFIG_SND_SOC_TEGRA_WM8753=y
@@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
 CONFIG_SND_SOC_TEGRA_ALC5632=y
 CONFIG_SND_SOC_TEGRA_MAX98090=y
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_WM8978=m
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
@@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_CHIPIDEA=y
@@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
 CONFIG_USB_MXS_PHY=y
+CONFIG_USB_RCAR_PHY=m
+CONFIG_USB_RCAR_GEN2_PHY=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_ARMMMCI=y
@@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_MVSDIO=y
-CONFIG_MMC_SUNXI=y
+CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SH_MMCIF=y
+CONFIG_MMC_SUNXI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
@@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_RTC_DRV_MAX8907=y
 CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TWL4030=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
+CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_EM3027=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_VT8500=y
@@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y
 CONFIG_DW_DMAC=y
 CONFIG_MV_XOR=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_SH_DMAE=y
+CONFIG_RCAR_AUDMAC_PP=m
+CONFIG_RCAR_DMAC=y
 CONFIG_STE_DMA40=y
 CONFIG_SIRF_DMA=y
 CONFIG_TI_EDMA=y
@@ -468,6 +539,7 @@ CONFIG_IIO=y
 CONFIG_XILINX_XADC=y
 CONFIG_AK8975=y
 CONFIG_PWM=y
+CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
 CONFIG_PHY_HIX5HD2_SATA=y
index b7386524c356619ceb9889553144e1292d2e1904..8e108599e1af401451aeeb01a9e6b7a1752c7c0d 100644 (file)
@@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ECC_BCH=y
 CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_OMAP_BCH=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_OMAP2=y
@@ -248,6 +249,7 @@ CONFIG_TWL6040_CORE=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_TI_ABB=y
+CONFIG_REGULATOR_TPS62360=m
 CONFIG_REGULATOR_TPS65023=y
 CONFIG_REGULATOR_TPS6507X=y
 CONFIG_REGULATOR_TPS65217=y
@@ -374,7 +376,8 @@ CONFIG_PWM_TIEHRPWM=m
 CONFIG_PWM_TWL=m
 CONFIG_PWM_TWL_LED=m
 CONFIG_OMAP_USB2=m
-CONFIG_TI_PIPE3=m
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
index 41d856effe6cadcf928ddd6f6fd8efa6edbc951d..510c747c65b446b173fb241d15da8c16983a989e 100644 (file)
@@ -3,8 +3,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
index 38840a8129240f7e89af853521b9aa2bb398c40f..8f6a5702b69619eb6d332db0dbf45853b527faa4 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_PERF_EVENTS=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_SMP=y
+CONFIG_NR_CPUS=8
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
 CONFIG_HIGHPTE=y
index f489fdaa19b8ff127944d6bde8a7e0fa806848c4..37fe607a4ede57755a112da4f17fdd8d4ae44f8a 100644 (file)
@@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_MON=y
-CONFIG_USB_ISP1760_HCD=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_ISP1760=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_NEW_LEDS=y
index 71e5fc7cfb18f489f3adadb20c6f8049421fb079..1d1800f71c5b3d372d33ec1e43acdaa35e17d3be 100644 (file)
 # define VFP_ABI_FRAME 0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__  7
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
 #endif
 
 #ifdef __thumb__
 # define adrl adr
 #endif
 
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch  armv7-a
+.fpu   neon
+
 .text
 .syntax        unified         @ ARMv7-capable assembler is expected to handle this
 #ifdef __thumb2__
@@ -74,8 +78,6 @@
 .code   32
 #endif
 
-.fpu   neon
-
 .type  _bsaes_decrypt8,%function
 .align 4
 _bsaes_decrypt8:
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt:
        vld1.8  {q8}, [r0]                      @ initial tweak
        adr     r2, .Lxts_magic
 
+#ifndef        XTS_CHAIN_TWEAK
        tst     r9, #0xf                        @ if not multiple of 16
        it      ne                              @ Thumb2 thing, sanity check in ARM
        subne   r9, #0x10                       @ subtract another 16 bytes
+#endif
        subs    r9, #0x80
 
        blo     .Lxts_dec_short
index be068db960ee0006ac1aa25a4e69b95265404844..a4d3856e7d2477ec6379f1f10a9121e32f36a974 100644 (file)
@@ -701,14 +701,18 @@ $code.=<<___;
 # define VFP_ABI_FRAME 0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__  7
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
 #endif
 
 #ifdef __thumb__
 # define adrl adr
 #endif
 
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch  armv7-a
+.fpu   neon
+
 .text
 .syntax        unified         @ ARMv7-capable assembler is expected to handle this
 #ifdef __thumb2__
@@ -717,8 +721,6 @@ $code.=<<___;
 .code   32
 #endif
 
-.fpu   neon
-
 .type  _bsaes_decrypt8,%function
 .align 4
 _bsaes_decrypt8:
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt:
        vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
        adr     $magic, .Lxts_magic
 
+#ifndef        XTS_CHAIN_TWEAK
        tst     $len, #0xf                      @ if not multiple of 16
        it      ne                              @ Thumb2 thing, sanity check in ARM
        subne   $len, #0x10                     @ subtract another 16 bytes
+#endif
        subs    $len, #0x80
 
        blo     .Lxts_dec_short
index 37ca2a4c6f0944598cacb0fbdab33c716258ea4e..4cf48c3aca13e9afe7b852660afa4d5945994a93 100644 (file)
@@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
        (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 })
 
+#define kvm_pgd_index(addr)                    pgd_index(addr)
+
 static inline bool kvm_page_empty(void *ptr)
 {
        struct page *ptr_page = virt_to_page(ptr);
        return page_count(ptr_page) == 1;
 }
 
-
 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
 #define kvm_pud_table_empty(kvm, pudp) (0)
 
 #define KVM_PREALLOC_LEVEL     0
 
-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
+static inline void *kvm_get_hwpgd(struct kvm *kvm)
 {
-       return 0;
+       return kvm->arch.pgd;
 }
 
-static inline void kvm_free_hwpgd(struct kvm *kvm) { }
-
-static inline void *kvm_get_hwpgd(struct kvm *kvm)
+static inline unsigned int kvm_get_hwpgd_size(void)
 {
-       return kvm->arch.pgd;
+       return PTRS_PER_S2_PGD * sizeof(pgd_t);
 }
 
 struct kvm;
@@ -207,7 +206,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 
        bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
 
-       VM_BUG_ON(size & PAGE_MASK);
+       VM_BUG_ON(size & ~PAGE_MASK);
 
        if (!need_flush && !icache_is_pipt())
                goto vipt_cache;
index 80a6501b4d5068ceed534f43ba3b1d59083d0b2a..c3c45e628e33bb9bf80a96014f1b510ede78580c 100644 (file)
 #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */
 #endif
 
-/* Keep in sync with mach-at91/include/mach/hardware.h */
+#ifdef CONFIG_MMU
 #define AT91_IO_P2V(x) ((x) - 0x01000000)
+#else
+#define AT91_IO_P2V(x) (x)
+#endif
 
 #define AT91_DBGU_SR           (0x14)  /* Status Register */
 #define AT91_DBGU_THR          (0x1c)  /* Transmitter Holding Register */
index dd9acc95ebc0ef3df443ac9e48b3aa4b06d7b73b..61b53c46edfa7556827ae552082e79ece19df909 100644 (file)
@@ -231,7 +231,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 /*
  * PMU platform driver and devicetree bindings.
  */
-static struct of_device_id cpu_pmu_of_device_ids[] = {
+static const struct of_device_id cpu_pmu_of_device_ids[] = {
        {.compatible = "arm,cortex-a17-pmu",    .data = armv7_a17_pmu_init},
        {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
        {.compatible = "arm,cortex-a12-pmu",    .data = armv7_a12_pmu_init},
index e55408e965596964ff8c8708dcfec529a559b1bc..1d60bebea4b8b7923748cd6eb2d25fc4299bfc32 100644 (file)
@@ -246,12 +246,9 @@ static int __get_cpu_architecture(void)
                if (cpu_arch)
                        cpu_arch += CPU_ARCH_ARMv3;
        } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
-               unsigned int mmfr0;
-
                /* Revised CPUID format. Read the Memory Model Feature
                 * Register 0 and check for VMSAv7 or PMSAv7 */
-               asm("mrc        p15, 0, %0, c0, c1, 4"
-                   : "=r" (mmfr0));
+               unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
                if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
                    (mmfr0 & 0x000000f0) >= 0x00000030)
                        cpu_arch = CPU_ARCH_ARMv7;
index 07e7eb1d7ab63b0417f8a54203620988b72228c4..5560f74f9eeef1e3e4d2c9c39fc672e539eee93f 100644 (file)
@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
                kvm_guest_exit();
-               trace_kvm_exit(*vcpu_pc(vcpu));
+               trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
                /*
                 * We may have taken a host interrupt in HYP mode (ie
                 * while executing the guest). This interrupt is still
index 3e6859bc3e1170fc83489be9ba51a15c97b148a5..5656d79c5a44f4d2ca816e15b647abf29a114e0b 100644 (file)
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        phys_addr_t addr = start, end = start + size;
        phys_addr_t next;
 
-       pgd = pgdp + pgd_index(addr);
+       pgd = pgdp + kvm_pgd_index(addr);
        do {
                next = kvm_pgd_addr_end(addr, end);
                if (!pgd_none(*pgd))
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
        phys_addr_t next;
        pgd_t *pgd;
 
-       pgd = kvm->arch.pgd + pgd_index(addr);
+       pgd = kvm->arch.pgd + kvm_pgd_index(addr);
        do {
                next = kvm_pgd_addr_end(addr, end);
                stage2_flush_puds(kvm, pgd, addr, next);
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
                                     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 }
 
+/* Free the HW pgd, one page at a time */
+static void kvm_free_hwpgd(void *hwpgd)
+{
+       free_pages_exact(hwpgd, kvm_get_hwpgd_size());
+}
+
+/* Allocate the HW PGD, making sure that each page gets its own refcount */
+static void *kvm_alloc_hwpgd(void)
+{
+       unsigned int size = kvm_get_hwpgd_size();
+
+       return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+}
+
 /**
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:       The KVM struct pointer for the VM.
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
  */
 int kvm_alloc_stage2_pgd(struct kvm *kvm)
 {
-       int ret;
        pgd_t *pgd;
+       void *hwpgd;
 
        if (kvm->arch.pgd != NULL) {
                kvm_err("kvm_arch already initialized?\n");
                return -EINVAL;
        }
 
+       hwpgd = kvm_alloc_hwpgd();
+       if (!hwpgd)
+               return -ENOMEM;
+
+       /* When the kernel uses more levels of page tables than the
+        * guest, we allocate a fake PGD and pre-populate it to point
+        * to the next-level page table, which will be the real
+        * initial page table pointed to by the VTTBR.
+        *
+        * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
+        * the PMD and the kernel will use folded pud.
+        * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
+        * pages.
+        */
        if (KVM_PREALLOC_LEVEL > 0) {
+               int i;
+
                /*
                 * Allocate fake pgd for the page table manipulation macros to
                 * work.  This is not used by the hardware and we have no
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
                 */
                pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
                                       GFP_KERNEL | __GFP_ZERO);
+
+               if (!pgd) {
+                       kvm_free_hwpgd(hwpgd);
+                       return -ENOMEM;
+               }
+
+               /* Plug the HW PGD into the fake one. */
+               for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+                       if (KVM_PREALLOC_LEVEL == 1)
+                               pgd_populate(NULL, pgd + i,
+                                            (pud_t *)hwpgd + i * PTRS_PER_PUD);
+                       else if (KVM_PREALLOC_LEVEL == 2)
+                               pud_populate(NULL, pud_offset(pgd, 0) + i,
+                                            (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+               }
        } else {
                /*
                 * Allocate actual first-level Stage-2 page table used by the
                 * hardware for Stage-2 page table walks.
                 */
-               pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
+               pgd = (pgd_t *)hwpgd;
        }
 
-       if (!pgd)
-               return -ENOMEM;
-
-       ret = kvm_prealloc_hwpgd(kvm, pgd);
-       if (ret)
-               goto out_err;
-
        kvm_clean_pgd(pgd);
        kvm->arch.pgd = pgd;
        return 0;
-out_err:
-       if (KVM_PREALLOC_LEVEL > 0)
-               kfree(pgd);
-       else
-               free_pages((unsigned long)pgd, S2_PGD_ORDER);
-       return ret;
 }
 
 /**
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
                return;
 
        unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
-       kvm_free_hwpgd(kvm);
+       kvm_free_hwpgd(kvm_get_hwpgd(kvm));
        if (KVM_PREALLOC_LEVEL > 0)
                kfree(kvm->arch.pgd);
-       else
-               free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+
        kvm->arch.pgd = NULL;
 }
 
@@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
        pgd_t *pgd;
        pud_t *pud;
 
-       pgd = kvm->arch.pgd + pgd_index(addr);
+       pgd = kvm->arch.pgd + kvm_pgd_index(addr);
        if (WARN_ON(pgd_none(*pgd))) {
                if (!cache)
                        return NULL;
@@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
        pgd_t *pgd;
        phys_addr_t next;
 
-       pgd = kvm->arch.pgd + pgd_index(addr);
+       pgd = kvm->arch.pgd + kvm_pgd_index(addr);
        do {
                /*
                 * Release kvm_mmu_lock periodically if the memory region is
index 881874b1a036ce117e01c2c6e124b28fb2506b89..6817664b46b80419047066686a47a8bc7953ebeb 100644 (file)
@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry,
 );
 
 TRACE_EVENT(kvm_exit,
-       TP_PROTO(unsigned long vcpu_pc),
-       TP_ARGS(vcpu_pc),
+       TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
+       TP_ARGS(exit_reason, vcpu_pc),
 
        TP_STRUCT__entry(
+               __field(        unsigned int,   exit_reason     )
                __field(        unsigned long,  vcpu_pc         )
        ),
 
        TP_fast_assign(
+               __entry->exit_reason            = exit_reason;
                __entry->vcpu_pc                = vcpu_pc;
        ),
 
-       TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+       TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
+                 __entry->exit_reason,
+                 __entry->vcpu_pc)
 );
 
 TRACE_EVENT(kvm_guest_fault,
index 8423be76080eaa293e8f1d36f740548ddbad6973..52241207a82a3d3a802a90c1fe47e78150e8d5ba 100644 (file)
@@ -2,5 +2,7 @@ config MACH_ASM9260
        bool "Alphascale ASM9260"
        depends on ARCH_MULTI_V5
        select CPU_ARM926T
+       select ASM9260_TIMER
+       select GENERIC_CLOCKEVENTS
        help
          Support for Alphascale ASM9260 based platform.
index c6740e359a44e253abea81a8fac9549e7994804f..c74a44324e5bc3dc7cb4b5e0f31e64ca8c167f5f 100644 (file)
@@ -64,7 +64,6 @@ config SOC_SAMA5D4
        select SOC_SAMA5
        select CLKSRC_MMIO
        select CACHE_L2X0
-       select CACHE_PL310
        select HAVE_FB_ATMEL
        select HAVE_AT91_UTMI
        select HAVE_AT91_SMD
index 51761f8927b7a4959468ad0df8c4ddf86565303a..b00d09555f2b7662cb3262ace823a11ccb3fd5e5 100644 (file)
@@ -183,7 +183,7 @@ static struct clock_event_device clkevt = {
 void __iomem *at91_st_base;
 EXPORT_SYMBOL_GPL(at91_st_base);
 
-static struct of_device_id at91rm9200_st_timer_ids[] = {
+static const struct of_device_id at91rm9200_st_timer_ids[] = {
        { .compatible = "atmel,at91rm9200-st" },
        { /* sentinel */ }
 };
index a6e726a6e0b578dc0c6e6dfaa2c996d3baaa59bf..583369ffc284d5588b4c7f4c742cedbefb7614f3 100644 (file)
@@ -35,10 +35,10 @@ extern void __init at91sam9260_pm_init(void);
 extern void __init at91sam9g45_pm_init(void);
 extern void __init at91sam9x5_pm_init(void);
 #else
-void __init at91rm9200_pm_init(void) { }
-void __init at91sam9260_pm_init(void) { }
-void __init at91sam9g45_pm_init(void) { }
-void __init at91sam9x5_pm_init(void) { }
+static inline void __init at91rm9200_pm_init(void) { }
+static inline void __init at91sam9260_pm_init(void) { }
+static inline void __init at91sam9g45_pm_init(void) { }
+static inline void __init at91sam9x5_pm_init(void) { }
 #endif
 
 #endif /* _AT91_GENERIC_H */
index af8d8afc2e12f05d34a395f10408acdec2d8917f..aa4116e9452f725e0f63241cfc083576c665be19 100644 (file)
@@ -226,7 +226,7 @@ void at91_pm_set_standby(void (*at91_standby)(void))
        }
 }
 
-static struct of_device_id ramc_ids[] = {
+static const struct of_device_id ramc_ids[] __initconst = {
        { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
        { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
        { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
@@ -234,7 +234,7 @@ static struct of_device_id ramc_ids[] = {
        { /*sentinel*/ }
 };
 
-static void at91_dt_ramc(void)
+static __init void at91_dt_ramc(void)
 {
        struct device_node *np;
        const struct of_device_id *of_id;
@@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void)
        phys_addr_t sram_pbase;
        unsigned long sram_base;
        struct device_node *node;
-       struct platform_device *pdev;
+       struct platform_device *pdev = NULL;
 
-       node = of_find_compatible_node(NULL, NULL, "mmio-sram");
-       if (!node) {
-               pr_warn("%s: failed to find sram node!\n", __func__);
-               return;
+       for_each_compatible_node(node, NULL, "mmio-sram") {
+               pdev = of_find_device_by_node(node);
+               if (pdev) {
+                       of_node_put(node);
+                       break;
+               }
        }
 
-       pdev = of_find_device_by_node(node);
        if (!pdev) {
                pr_warn("%s: failed to find sram device!\n", __func__);
-               goto put_node;
+               return;
        }
 
        sram_pool = dev_get_gen_pool(&pdev->dev);
        if (!sram_pool) {
                pr_warn("%s: sram pool unavailable!\n", __func__);
-               goto put_node;
+               return;
        }
 
        sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz);
        if (!sram_base) {
                pr_warn("%s: unable to alloc ocram!\n", __func__);
-               goto put_node;
+               return;
        }
 
        sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
        slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false);
-
-put_node:
-       of_node_put(node);
 }
 #endif
 
index d2c89963af2d168179e01e72d51f6684decc8d5e..86c0aa819d2590aae5af7146e5e3a4ebeb086e35 100644 (file)
@@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void)
                "    mcr    p15, 0, %0, c7, c0, 4\n\t"
                "    str    %5, [%1, %2]"
                :
-               : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
+               : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
                  "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
                  "r" (lpr));
 }
index 556151e85ec4c71712373098ce731c68674a757b..931f0e302c035ecc33a138ec0bd61dc910321c66 100644 (file)
  */
 #undef SLOWDOWN_MASTER_CLOCK
 
-#define MCKRDY_TIMEOUT         1000
-#define MOSCRDY_TIMEOUT        1000
-#define PLLALOCK_TIMEOUT       1000
-#define PLLBLOCK_TIMEOUT       1000
-
 pmc    .req    r0
 sdramc .req    r1
 ramc1  .req    r2
@@ -41,60 +36,42 @@ tmp2        .req    r5
  * Wait until master clock is ready (after switching master clock source)
  */
        .macro wait_mckrdy
-       mov     tmp2, #MCKRDY_TIMEOUT
-1:     sub     tmp2, tmp2, #1
-       cmp     tmp2, #0
-       beq     2f
-       ldr     tmp1, [pmc, #AT91_PMC_SR]
+1:     ldr     tmp1, [pmc, #AT91_PMC_SR]
        tst     tmp1, #AT91_PMC_MCKRDY
        beq     1b
-2:
        .endm
 
 /*
  * Wait until master oscillator has stabilized.
  */
        .macro wait_moscrdy
-       mov     tmp2, #MOSCRDY_TIMEOUT
-1:     sub     tmp2, tmp2, #1
-       cmp     tmp2, #0
-       beq     2f
-       ldr     tmp1, [pmc, #AT91_PMC_SR]
+1:     ldr     tmp1, [pmc, #AT91_PMC_SR]
        tst     tmp1, #AT91_PMC_MOSCS
        beq     1b
-2:
        .endm
 
 /*
  * Wait until PLLA has locked.
  */
        .macro wait_pllalock
-       mov     tmp2, #PLLALOCK_TIMEOUT
-1:     sub     tmp2, tmp2, #1
-       cmp     tmp2, #0
-       beq     2f
-       ldr     tmp1, [pmc, #AT91_PMC_SR]
+1:     ldr     tmp1, [pmc, #AT91_PMC_SR]
        tst     tmp1, #AT91_PMC_LOCKA
        beq     1b
-2:
        .endm
 
 /*
  * Wait until PLLB has locked.
  */
        .macro wait_pllblock
-       mov     tmp2, #PLLBLOCK_TIMEOUT
-1:     sub     tmp2, tmp2, #1
-       cmp     tmp2, #0
-       beq     2f
-       ldr     tmp1, [pmc, #AT91_PMC_SR]
+1:     ldr     tmp1, [pmc, #AT91_PMC_SR]
        tst     tmp1, #AT91_PMC_LOCKB
        beq     1b
-2:
        .endm
 
        .text
 
+       .arm
+
 /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc,
  *                     void __iomem *ramc1, int memctrl)
  */
@@ -134,6 +111,16 @@ ddr_sr_enable:
        cmp     memctrl, #AT91_MEMCTRL_DDRSDR
        bne     sdr_sr_enable
 
+       /* LPDDR1 --> force DDR2 mode during self-refresh */
+       ldr     tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+       str     tmp1, .saved_sam9_mdr
+       bic     tmp1, tmp1, #~AT91_DDRSDRC_MD
+       cmp     tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+       ldreq   tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+       biceq   tmp1, tmp1, #AT91_DDRSDRC_MD
+       orreq   tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2
+       streq   tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+
        /* prepare for DDRAM self-refresh mode */
        ldr     tmp1, [sdramc, #AT91_DDRSDRC_LPR]
        str     tmp1, .saved_sam9_lpr
@@ -142,14 +129,26 @@ ddr_sr_enable:
 
        /* figure out if we use the second ram controller */
        cmp     ramc1, #0
-       ldrne   tmp2, [ramc1, #AT91_DDRSDRC_LPR]
-       strne   tmp2, .saved_sam9_lpr1
-       bicne   tmp2, #AT91_DDRSDRC_LPCB
-       orrne   tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
+       beq     ddr_no_2nd_ctrl
+
+       ldr     tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+       str     tmp2, .saved_sam9_mdr1
+       bic     tmp2, tmp2, #~AT91_DDRSDRC_MD
+       cmp     tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+       ldreq   tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+       biceq   tmp2, tmp2, #AT91_DDRSDRC_MD
+       orreq   tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2
+       streq   tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+
+       ldr     tmp2, [ramc1, #AT91_DDRSDRC_LPR]
+       str     tmp2, .saved_sam9_lpr1
+       bic     tmp2, #AT91_DDRSDRC_LPCB
+       orr     tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
 
        /* Enable DDRAM self-refresh mode */
+       str     tmp2, [ramc1, #AT91_DDRSDRC_LPR]
+ddr_no_2nd_ctrl:
        str     tmp1, [sdramc, #AT91_DDRSDRC_LPR]
-       strne   tmp2, [ramc1, #AT91_DDRSDRC_LPR]
 
        b       sdr_sr_done
 
@@ -208,6 +207,7 @@ sdr_sr_done:
        /* Turn off the main oscillator */
        ldr     tmp1, [pmc, #AT91_CKGR_MOR]
        bic     tmp1, tmp1, #AT91_PMC_MOSCEN
+       orr     tmp1, tmp1, #AT91_PMC_KEY
        str     tmp1, [pmc, #AT91_CKGR_MOR]
 
        /* Wait for interrupt */
@@ -216,6 +216,7 @@ sdr_sr_done:
        /* Turn on the main oscillator */
        ldr     tmp1, [pmc, #AT91_CKGR_MOR]
        orr     tmp1, tmp1, #AT91_PMC_MOSCEN
+       orr     tmp1, tmp1, #AT91_PMC_KEY
        str     tmp1, [pmc, #AT91_CKGR_MOR]
 
        wait_moscrdy
@@ -280,12 +281,17 @@ sdr_sr_done:
         */
        cmp     memctrl, #AT91_MEMCTRL_DDRSDR
        bne     sdr_en_restore
+       /* Restore MDR in case of LPDDR1 */
+       ldr     tmp1, .saved_sam9_mdr
+       str     tmp1, [sdramc, #AT91_DDRSDRC_MDR]
        /* Restore LPR on AT91 with DDRAM */
        ldr     tmp1, .saved_sam9_lpr
        str     tmp1, [sdramc, #AT91_DDRSDRC_LPR]
 
        /* if we use the second ram controller */
        cmp     ramc1, #0
+       ldrne   tmp2, .saved_sam9_mdr1
+       strne   tmp2, [ramc1, #AT91_DDRSDRC_MDR]
        ldrne   tmp2, .saved_sam9_lpr1
        strne   tmp2, [ramc1, #AT91_DDRSDRC_LPR]
 
@@ -319,5 +325,11 @@ ram_restored:
 .saved_sam9_lpr1:
        .word 0
 
+.saved_sam9_mdr:
+       .word 0
+
+.saved_sam9_mdr1:
+       .word 0
+
 ENTRY(at91_slow_clock_sz)
        .word .-at91_slow_clock
index 19e5a1d9539751c8fba373a88e3f31f558ffd773..4db76a493c5a68e066aa9f664112570dddef5cf3 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <asm/mach/arch.h>
 
-static const char *axxia_dt_match[] __initconst = {
+static const char *const axxia_dt_match[] __initconst = {
        "lsi,axm5516",
        "lsi,axm5516-sim",
        "lsi,axm5516-emu",
index aaeec78c3ec4d05df74531a36245d21c4b9e2dd8..8b11f44bb36e5a3dcfe59cf331e18730e71c9ec5 100644 (file)
@@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE
          This enables support for systems based on Broadcom mobile SoCs.
 
 config ARCH_BCM_281XX
-       bool "Broadcom BCM281XX SoC family"
+       bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7
        select ARCH_BCM_MOBILE
        select HAVE_SMP
        help
@@ -77,7 +77,7 @@ config ARCH_BCM_281XX
          variants.
 
 config ARCH_BCM_21664
-       bool "Broadcom BCM21664 SoC family"
+       bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7
        select ARCH_BCM_MOBILE
        select HAVE_SMP
        help
index 60a5afa06ed7f90428729c2daa1ef59beeeb1c0d..3a60f7ee3f0cc1583788f9cd3da81a5723354647 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
-static const char *brcmstb_match[] __initconst = {
+static const char *const brcmstb_match[] __initconst = {
        "brcm,bcm7445",
        "brcm,brcmstb",
        NULL
index 584e8d4e28926956bed6971713d8f2f3758f59a7..cd30f6f5f2ff15723a2abae07b471b03f65f9b11 100644 (file)
@@ -32,12 +32,14 @@ config ARCH_DAVINCI_DM646x
 
 config ARCH_DAVINCI_DA830
        bool "DA830/OMAP-L137/AM17x based system"
+       depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
        select ARCH_DAVINCI_DA8XX
        select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
        select CP_INTC
 
 config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
+       depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
        select ARCH_DAVINCI_DA8XX
        select CP_INTC
 
index f703d82f08a80d22adc3e8a1f45acbd7fbfa846c..438f68547f4c79ea4a12b0bf82bd85583c02be62 100644 (file)
@@ -20,7 +20,7 @@
 
 #define DA8XX_NUM_UARTS        3
 
-static struct of_device_id da8xx_irq_match[] __initdata = {
+static const struct of_device_id da8xx_irq_match[] __initconst = {
        { .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
        { }
 };
index a8eb909a2b6ccd94dd7f66c0c11370afc9503184..6a2ff0a654a5b53efce08a40c60a1d25f603b166 100644 (file)
@@ -30,7 +30,7 @@ static void __iomem *pinmux_base;
 /*
  * Sets the DAVINCI MUX register based on the table
  */
-int __init_or_module davinci_cfg_reg(const unsigned long index)
+int davinci_cfg_reg(const unsigned long index)
 {
        static DEFINE_SPINLOCK(mux_spin_lock);
        struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -101,7 +101,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
 }
 EXPORT_SYMBOL(davinci_cfg_reg);
 
-int __init_or_module davinci_cfg_reg_list(const short pins[])
+int davinci_cfg_reg_list(const short pins[])
 {
        int i, error = -EINVAL;
 
index 2013f73797ed6c24d8c6f5f5cd850b96cb1f9869..9e9dfdfad9d77fd670fd186d61907f503815cde3 100644 (file)
@@ -227,7 +227,7 @@ static void __init exynos_dt_machine_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static char const *exynos_dt_compat[] __initconst = {
+static char const *const exynos_dt_compat[] __initconst = {
        "samsung,exynos3",
        "samsung,exynos3250",
        "samsung,exynos4",
index 3f32c47a6d74e780f429e5a9331c0737c2cb64d7..d2e9f12d12f187e1e2c40f11748f75bbe0a283f2 100644 (file)
@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
  */
 void exynos_cpu_power_down(int cpu)
 {
-       if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
-               of_machine_is_compatible("samsung,exynos5800"))) {
+       if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
                /*
                 * Bypass power down for CPU0 during suspend. Check for
                 * the SYS_PWR_REG value to decide if we are suspending
index 20f267121b3e7876e4ab806ab6c2f655e9467499..37266a8264372a9d8ab898aec6389476f707869f 100644 (file)
@@ -161,6 +161,34 @@ no_clk:
                of_genpd_add_provider_simple(np, &pd->pd);
        }
 
+       /* Assign the child power domains to their parents */
+       for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+               struct generic_pm_domain *child_domain, *parent_domain;
+               struct of_phandle_args args;
+
+               args.np = np;
+               args.args_count = 0;
+               child_domain = of_genpd_get_from_provider(&args);
+               if (!child_domain)
+                       continue;
+
+               if (of_parse_phandle_with_args(np, "power-domains",
+                                        "#power-domain-cells", 0, &args) != 0)
+                       continue;
+
+               parent_domain = of_genpd_get_from_provider(&args);
+               if (!parent_domain)
+                       continue;
+
+               if (pm_genpd_add_subdomain(parent_domain, child_domain))
+                       pr_warn("%s failed to add subdomain: %s\n",
+                               parent_domain->name, child_domain->name);
+               else
+                       pr_info("%s has as child subdomain: %s.\n",
+                               parent_domain->name, child_domain->name);
+               of_node_put(np);
+       }
+
        return 0;
 }
 arch_initcall(exynos4_pm_init_power_domain);
index 666ec3e5b03fbc05592bf66d82b4bd8d311c41de..318d127df147c2515f5ecb6674bec31423d790b1 100644 (file)
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3;
 static u32 exynos_irqwake_intmask = 0xffffffff;
 
 static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
-       { 73, BIT(1) }, /* RTC alarm */
-       { 74, BIT(2) }, /* RTC tick */
+       { 105, BIT(1) }, /* RTC alarm */
+       { 106, BIT(2) }, /* RTC tick */
        { /* sentinel */ },
 };
 
@@ -587,7 +587,7 @@ static struct exynos_pm_data exynos5420_pm_data = {
        .cpu_suspend    = exynos5420_cpu_suspend,
 };
 
-static struct of_device_id exynos_pmu_of_device_ids[] = {
+static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
        {
                .compatible = "samsung,exynos3250-pmu",
                .data = &exynos3250_pm_data,
index 07a09570175d8b034082a3639ddfa01999bf59d3..231fba0d03e5135466aa55bd765f684b8900084e 100644 (file)
@@ -169,7 +169,7 @@ static void __init highbank_init(void)
                platform_device_register(&highbank_cpuidle_device);
 }
 
-static const char *highbank_match[] __initconst = {
+static const char *const highbank_match[] __initconst = {
        "calxeda,highbank",
        "calxeda,ecx-2000",
        NULL,
index 76b907078b58d365a1acaa73ac85e98e9862833f..c6bd7c7bd4aa4949203505247f0fd9db21ea0c5b 100644 (file)
@@ -45,7 +45,7 @@ static void __init hi3620_map_io(void)
        iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc));
 }
 
-static const char *hi3xxx_compat[] __initconst = {
+static const char *const hi3xxx_compat[] __initconst = {
        "hisilicon,hi3620-hi4511",
        NULL,
 };
@@ -55,7 +55,7 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)")
        .dt_compat      = hi3xxx_compat,
 MACHINE_END
 
-static const char *hix5hd2_compat[] __initconst = {
+static const char *const hix5hd2_compat[] __initconst = {
        "hisilicon,hix5hd2",
        NULL,
 };
@@ -64,7 +64,7 @@ DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)")
        .dt_compat      = hix5hd2_compat,
 MACHINE_END
 
-static const char *hip04_compat[] __initconst = {
+static const char *const hip04_compat[] __initconst = {
        "hisilicon,hip04-d01",
        NULL,
 };
@@ -73,7 +73,7 @@ DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)")
        .dt_compat      = hip04_compat,
 MACHINE_END
 
-static const char *hip01_compat[] __initconst = {
+static const char *const hip01_compat[] __initconst = {
        "hisilicon,hip01",
        "hisilicon,hip01-ca9x2",
        NULL,
index 4ad6e473cf83ab82e3a769ddcae8956762f47be9..9de3412af4063ccd4ae7b8f2a176e9300020947a 100644 (file)
@@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void)
         * set bit IOMUXC_GPR1[21].  Or the PTP clock must be from pad
         * (external OSC), and we need to clear the bit.
         */
-       clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
-                                      IMX6Q_GPR1_ENET_CLK_SEL_PAD;
+       clksel = clk_is_match(ptp_clk, enet_ref) ?
+                               IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
+                               IMX6Q_GPR1_ENET_CLK_SEL_PAD;
        gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
        if (!IS_ERR(gpr))
                regmap_update_bits(gpr, IOMUXC_GPR1,
index a377f95033aecc6edaab43a7d012df3997b27017..0411f0664c15c0bce0469b9cd073205834ba366a 100644 (file)
@@ -68,7 +68,7 @@ int imx_mmdc_get_ddr_type(void)
        return ddr_type;
 }
 
-static struct of_device_id imx_mmdc_dt_ids[] = {
+static const struct of_device_id imx_mmdc_dt_ids[] = {
        { .compatible = "fsl,imx6q-mmdc", },
        { /* sentinel */ }
 };
index 6a722860e34dc2a4fb0d79f095656067d191bd4a..b024390199639ca31201730ccdbcfceae577be2b 100644 (file)
@@ -245,8 +245,10 @@ static inline void outb(u8 value, u32 addr)
 }
 
 #define outsb outsb
-static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
+static inline void outsb(u32 io_addr, const void *p, u32 count)
 {
+       const u8 *vaddr = p;
+
        while (count--)
                outb(*vaddr++, io_addr);
 }
@@ -262,8 +264,9 @@ static inline void outw(u16 value, u32 addr)
 }
 
 #define outsw outsw
-static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
+static inline void outsw(u32 io_addr, const void *p, u32 count)
 {
+       const u16 *vaddr = p;
        while (count--)
                outw(cpu_to_le16(*vaddr++), io_addr);
 }
@@ -275,8 +278,9 @@ static inline void outl(u32 value, u32 addr)
 }
 
 #define outsl outsl
-static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
+static inline void outsl(u32 io_addr, const void *p, u32 count)
 {
+       const u32 *vaddr = p;
        while (count--)
                outl(cpu_to_le32(*vaddr++), io_addr);
 }
@@ -294,8 +298,9 @@ static inline u8 inb(u32 addr)
 }
 
 #define insb insb
-static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
+static inline void insb(u32 io_addr, void *p, u32 count)
 {
+       u8 *vaddr = p;
        while (count--)
                *vaddr++ = inb(io_addr);
 }
@@ -313,8 +318,9 @@ static inline u16 inw(u32 addr)
 }
 
 #define insw insw
-static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
+static inline void insw(u32 io_addr, void *p, u32 count)
 {
+       u16 *vaddr = p;
        while (count--)
                *vaddr++ = le16_to_cpu(inw(io_addr));
 }
@@ -330,8 +336,9 @@ static inline u32 inl(u32 addr)
 }
 
 #define insl insl
-static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
+static inline void insl(u32 io_addr, void *p, u32 count)
 {
+       u32 *vaddr = p;
        while (count--)
                *vaddr++ = le32_to_cpu(inl(io_addr));
 }
index 7f352de2609909ad6c5087aaa203f744876fd4b7..06620875813ae93c76dee1628c94d5439e94783e 100644 (file)
@@ -103,7 +103,7 @@ static void __init keystone_init_meminfo(void)
        pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
 }
 
-static const char *keystone_match[] __initconst = {
+static const char *const keystone_match[] __initconst = {
        "ti,keystone",
        NULL,
 };
index ef6041e7e6754c5daf8c07d7f91309f32ce4f4b1..41bebfd296dcbacac7a9d0c60747d5b1c01c73d4 100644 (file)
@@ -61,7 +61,7 @@ static struct pm_clk_notifier_block platform_domain_notifier = {
        .pm_domain = &keystone_pm_domain,
 };
 
-static struct of_device_id of_keystone_table[] = {
+static const struct of_device_id of_keystone_table[] = {
        {.compatible = "ti,keystone"},
        { /* end of list */ },
 };
index 2756351dbb35acac76190f3342a92e006d77c9d6..10bfa03e58d4777a03377391a63f8a69ec2e3c78 100644 (file)
@@ -213,7 +213,7 @@ void __init timer_init(int irq)
 }
 
 #ifdef CONFIG_OF
-static struct of_device_id mmp_timer_dt_ids[] = {
+static const struct of_device_id mmp_timer_dt_ids[] = {
        { .compatible = "mrvl,mmp-timer", },
        {}
 };
index 61bfe584a9d7fad4a7204d3ddb2a0a42afe3a23b..fc832040c6e979f139e272c73a47d167a65d8df5 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/input.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
        [1] = {
                .start  = MSM_GPIO_TO_INT(49),
                .end    = MSM_GPIO_TO_INT(49),
-               .flags  = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
        },
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static struct platform_device *devices[] __initdata = {
index 4c748616ef47eb9de63d751072e8d779fc3ffe44..10016a3bc69826351830ea53f822c7a8e3c4f033 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/usb/msm_hsusb.h>
 #include <linux/err.h>
 #include <linux/clkdev.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
                .flags = IORESOURCE_MEM,
        },
        [1] = {
-               .flags = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
        },
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static int __init msm_init_smc91x(void)
index b5895f040caaf73a9f304f91c0fca2533a9dcc86..e46e9ea1e187ecfbbeee56c68a882c68076f28b5 100644 (file)
@@ -51,7 +51,7 @@ enum {
        COHERENCY_FABRIC_TYPE_ARMADA_380,
 };
 
-static struct of_device_id of_coherency_table[] = {
+static const struct of_device_id of_coherency_table[] = {
        {.compatible = "marvell,coherency-fabric",
         .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
        {.compatible = "marvell,armada-375-coherency-fabric",
index d8ab605a44fa7f80a87edbd2ffeadd386903ea16..8b9f5e202ccf67d34681a2e1966fc424680a417f 100644 (file)
@@ -104,7 +104,7 @@ static void __iomem *pmsu_mp_base;
 
 static void *mvebu_cpu_resume;
 
-static struct of_device_id of_pmsu_table[] = {
+static const struct of_device_id of_pmsu_table[] = {
        { .compatible = "marvell,armada-370-pmsu", },
        { .compatible = "marvell,armada-370-xp-pmsu", },
        { .compatible = "marvell,armada-380-pmsu", },
index a068cb5c2ce809c285d254a3f6fb924bb9f4ed30..c6c132acd7a61a052e27dce226d5b8bb62648307 100644 (file)
@@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
                return -ENODEV;
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
 void mvebu_armada375_smp_wa_init(void)
 {
        u32 dev, rev;
index 3d24ebf120953d936fa5785fdcd199fcfbf8f1ba..3445a5686805e082d6464aa35df2bb0389b3e276 100644 (file)
@@ -27,7 +27,7 @@
 #include "mmio.h"
 #include "clcd.h"
 
-static const char *nspire_dt_match[] __initconst = {
+static const char *const nspire_dt_match[] __initconst = {
        "ti,nspire",
        "ti,nspire-cx",
        "ti,nspire-tp",
index 00d5d8f9f150ed8e8a475c9103bdc74da2f3cdf1..b83f18fcec9b3c693944a6a2d0aafccf8e660a75 100644 (file)
@@ -190,7 +190,7 @@ obj-$(CONFIG_SOC_OMAP2430)          += clock2430.o
 obj-$(CONFIG_ARCH_OMAP3)               += $(clock-common) clock3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock34xx.o clkt34xx_dpll3m2.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock3517.o clock36xx.o
-obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o cclock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += clkt_iclk.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common)
 obj-$(CONFIG_ARCH_OMAP4)               += dpll3xxx.o dpll44xx.o
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
deleted file mode 100644 (file)
index e79c80b..0000000
+++ /dev/null
@@ -1,3688 +0,0 @@
-/*
- * OMAP3 clock data
- *
- * Copyright (C) 2007-2012 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com>
- * With many device clock fixes by Kevin Hilman and Jouni Högander
- * DPLL bypass clock support added by Roman Tereshonkov
- *
- */
-
-/*
- * Virtual clocks are introduced as convenient tools.
- * They are sources for other clocks and not supposed
- * to be requested from drivers directly.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/clk-private.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock3xxx.h"
-#include "clock34xx.h"
-#include "clock36xx.h"
-#include "clock3517.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-#include "prm3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "control.h"
-
-/*
- * clocks
- */
-
-#define OMAP_CM_REGADDR                OMAP34XX_CM_REGADDR
-
-/* Maximum DPLL multiplier, divider values for OMAP3 */
-#define OMAP3_MAX_DPLL_MULT            2047
-#define OMAP3630_MAX_JTYPE_DPLL_MULT   4095
-#define OMAP3_MAX_DPLL_DIV             128
-
-DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0);
-
-static const char *osc_sys_ck_parent_names[] = {
-       "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck",
-       "virt_38_4m_ck", "virt_16_8m_ck",
-};
-
-DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0,
-              OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT,
-              OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0,
-                  OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT,
-                  OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct dpll_data dpll3_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-       .mult_mask      = OMAP3430_CORE_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_CORE_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_CORE_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_CORE_DPLL_MASK,
-       .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_CORE_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_CORE_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_CORE_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll3_ck;
-
-static const char *dpll3_ck_parent_names[] = {
-       "sys_ck",
-       "sys_ck",
-};
-
-static const struct clk_ops dpll3_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll3_ck_hw = {
-       .hw = {
-               .clk = &dpll3_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll3_dd,
-       .clkdm_name     = "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-                  OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk core_ck;
-
-static const char *core_ck_parent_names[] = {
-       "dpll3_m2_ck",
-};
-
-static const struct clk_ops core_ck_ops = {};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL);
-DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
-
-DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0,
-                  OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk security_l4_ick2;
-
-static const char *security_l4_ick2_parent_names[] = {
-       "l4_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL);
-DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops);
-
-static struct clk aes1_ick;
-
-static const char *aes1_ick_parent_names[] = {
-       "security_l4_ick2",
-};
-
-static const struct clk_ops aes1_ick_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes1_ick_hw = {
-       .hw = {
-               .clk = &aes1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_AES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk core_l4_ick;
-
-static const struct clk_ops core_l4_ick_ops = {
-       .init           = &omap2_init_clk_clkdm,
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk aes2_ick;
-
-static const char *aes2_ick_parent_names[] = {
-       "core_l4_ick",
-};
-
-static const struct clk_ops aes2_ick_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes2_ick_hw = {
-       .hw = {
-               .clk = &aes2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_AES2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk dpll1_fck;
-
-static struct dpll_data dpll1_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-       .mult_mask      = OMAP3430_MPU_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_MPU_DPLL_DIV_MASK,
-       .clk_bypass     = &dpll1_fck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_MPU_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
-       .enable_mask    = OMAP3430_EN_MPU_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_MPU_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-       .autoidle_mask  = OMAP3430_AUTO_MPU_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-       .idlest_mask    = OMAP3430_ST_MPU_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll1_ck;
-
-static const struct clk_ops dpll1_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .set_parent     = &omap3_noncore_dpll_set_parent,
-       .set_rate_and_parent    = &omap3_noncore_dpll_set_rate_and_parent,
-       .determine_rate = &omap3_noncore_dpll_determine_rate,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll1_ck_hw = {
-       .hw = {
-               .clk = &dpll1_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll1_dd,
-       .clkdm_name     = "dpll1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
-                  OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk mpu_ck;
-
-static const char *mpu_ck_parent_names[] = {
-       "dpll1_x2m2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm");
-DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-                  OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH,
-                  0x0, NULL);
-
-static struct clk cam_ick;
-
-static struct clk_hw_omap cam_ick_hw = {
-       .hw = {
-               .clk = &cam_ick,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_CAM_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-/* DPLL4 */
-/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
-/* Type: DPLL */
-static struct dpll_data dpll4_dd;
-
-static struct dpll_data dpll4_dd_34xx __initdata = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-       .mult_mask      = OMAP3430_PERIPH_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_PERIPH_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_PERIPH_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_PERIPH_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_PERIPH_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct dpll_data dpll4_dd_3630 __initdata = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-       .mult_mask      = OMAP3630_PERIPH_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_PERIPH_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_PERIPH_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_PERIPH_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_PERIPH_CLK_MASK,
-       .dco_mask       = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
-       .sddiv_mask     = OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
-       .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-       .flags          = DPLL_J_TYPE
-};
-
-static struct clk dpll4_ck;
-
-static const struct clk_ops dpll4_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .set_rate       = &omap3_dpll4_set_rate,
-       .set_parent     = &omap3_noncore_dpll_set_parent,
-       .set_rate_and_parent    = &omap3_dpll4_set_rate_and_parent,
-       .determine_rate = &omap3_noncore_dpll_determine_rate,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll4_ck_hw = {
-       .hw = {
-               .clk = &dpll4_ck,
-       },
-       .dpll_data      = &dpll4_dd,
-       .ops            = &clkhwops_omap3_dpll,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops);
-
-static const struct clk_div_table dpll4_mx_ck_div_table[] = {
-       { .div = 1, .val = 1 },
-       { .div = 2, .val = 2 },
-       { .div = 3, .val = 3 },
-       { .div = 4, .val = 4 },
-       { .div = 5, .val = 5 },
-       { .div = 6, .val = 6 },
-       { .div = 7, .val = 7 },
-       { .div = 8, .val = 8 },
-       { .div = 9, .val = 9 },
-       { .div = 10, .val = 10 },
-       { .div = 11, .val = 11 },
-       { .div = 12, .val = 12 },
-       { .div = 13, .val = 13 },
-       { .div = 14, .val = 14 },
-       { .div = 15, .val = 15 },
-       { .div = 16, .val = 16 },
-       { .div = 17, .val = 17 },
-       { .div = 18, .val = 18 },
-       { .div = 19, .val = 19 },
-       { .div = 20, .val = 20 },
-       { .div = 21, .val = 21 },
-       { .div = 22, .val = 22 },
-       { .div = 23, .val = 23 },
-       { .div = 24, .val = 24 },
-       { .div = 25, .val = 25 },
-       { .div = 26, .val = 26 },
-       { .div = 27, .val = 27 },
-       { .div = 28, .val = 28 },
-       { .div = 29, .val = 29 },
-       { .div = 30, .val = 30 },
-       { .div = 31, .val = 31 },
-       { .div = 32, .val = 32 },
-       { .div = 0 },
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m5x2_ck;
-
-static const char *dpll4_m5x2_ck_parent_names[] = {
-       "dpll4_m5_ck",
-};
-
-static const struct clk_ops dpll4_m5x2_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .set_rate       = &omap3_clkoutx2_set_rate,
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-       .round_rate     = &omap3_clkoutx2_round_rate,
-};
-
-static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
-       .disable        = &omap2_dflt_clk_disable,
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-};
-
-static struct clk_hw_omap dpll4_m5x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m5x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_CAM_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
-                       dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m5x2_ck_3630 = {
-       .name           = "dpll4_m5x2_ck",
-       .hw             = &dpll4_m5x2_ck_hw.hw,
-       .parent_names   = dpll4_m5x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-static struct clk cam_mclk;
-
-static const char *cam_mclk_parent_names[] = {
-       "dpll4_m5x2_ck",
-};
-
-static struct clk_hw_omap cam_mclk_hw = {
-       .hw = {
-               .clk = &cam_mclk,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_CAM_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-static struct clk cam_mclk = {
-       .name           = "cam_mclk",
-       .hw             = &cam_mclk_hw.hw,
-       .parent_names   = cam_mclk_parent_names,
-       .num_parents    = ARRAY_SIZE(cam_mclk_parent_names),
-       .ops            = &aes2_ick_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-static const struct clksel_rate clkout2_src_core_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_sys_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_96m_rates[] = {
-       { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
-                  OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m2x2_ck;
-
-static const char *dpll4_m2x2_ck_parent_names[] = {
-       "dpll4_m2_ck",
-};
-
-static struct clk_hw_omap dpll4_m2x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m2x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_96M_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m2x2_ck_3630 = {
-       .name           = "dpll4_m2x2_ck",
-       .hw             = &dpll4_m2x2_ck_hw.hw,
-       .parent_names   = dpll4_m2x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m2x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-static struct clk omap_96m_alwon_fck;
-
-static const char *omap_96m_alwon_fck_parent_names[] = {
-       "dpll4_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names,
-                 core_ck_ops);
-
-static struct clk cm_96m_fck;
-
-static const char *cm_96m_fck_parent_names[] = {
-       "omap_96m_alwon_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL);
-DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops);
-
-static const struct clksel_rate clkout2_src_54m_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH,
-                  0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m3x2_ck;
-
-static const char *dpll4_m3x2_ck_parent_names[] = {
-       "dpll4_m3_ck",
-};
-
-static struct clk_hw_omap dpll4_m3x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m3x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_TV_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m3x2_ck_3630 = {
-       .name           = "dpll4_m3x2_ck",
-       .hw             = &dpll4_m3x2_ck_hw.hw,
-       .parent_names   = dpll4_m3x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m3x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-static const char *omap_54m_fck_parent_names[] = {
-       "dpll4_m3x2_ck", "sys_altclk",
-};
-
-DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT,
-              OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL);
-
-static const struct clksel clkout2_src_clksel[] = {
-       { .parent = &core_ck, .rates = clkout2_src_core_rates },
-       { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
-       { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
-       { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
-       { .parent = NULL },
-};
-
-static const char *clkout2_src_ck_parent_names[] = {
-       "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck",
-};
-
-static const struct clk_ops clkout2_src_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm",
-                        clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL,
-                        OMAP3430_CLKOUT2SOURCE_MASK,
-                        OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT,
-                        NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops);
-
-static const struct clksel_rate omap_48m_cm96m_rates[] = {
-       { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate omap_48m_alt_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel omap_48m_clksel[] = {
-       { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
-       { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
-       { .parent = NULL },
-};
-
-static const char *omap_48m_fck_parent_names[] = {
-       "cm_96m_fck", "sys_altclk",
-};
-
-static struct clk omap_48m_fck;
-
-static const struct clk_ops omap_48m_fck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-static struct clk_hw_omap omap_48m_fck_hw = {
-       .hw = {
-               .clk = &omap_48m_fck,
-       },
-       .clksel         = omap_48m_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_SOURCE_48M_MASK,
-};
-
-DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4);
-
-static struct clk core_12m_fck;
-
-static const char *core_12m_fck_parent_names[] = {
-       "omap_12m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_48m_fck;
-
-static const char *core_48m_fck_parent_names[] = {
-       "omap_48m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static const char *omap_96m_fck_parent_names[] = {
-       "cm_96m_fck", "sys_ck",
-};
-
-DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-              OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL);
-
-static struct clk core_96m_fck;
-
-static const char *core_96m_fck_parent_names[] = {
-       "omap_96m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_l3_ick;
-
-static const char *core_l3_ick_parent_names[] = {
-       "l3_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm");
-DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1);
-
-static struct clk corex2_fck;
-
-static const char *corex2_fck_parent_names[] = {
-       "dpll3_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
-DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
-
-static const char *cpefuse_fck_parent_names[] = {
-       "sys_ck",
-};
-
-static struct clk cpefuse_fck;
-
-static struct clk_hw_omap cpefuse_fck_hw = {
-       .hw = {
-               .clk = &cpefuse_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_CPEFUSE_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk csi2_96m_fck;
-
-static const char *csi2_96m_fck_parent_names[] = {
-       "core_96m_fck",
-};
-
-static struct clk_hw_omap csi2_96m_fck_hw = {
-       .hw = {
-               .clk = &csi2_96m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_CSI2_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk d2d_26m_fck;
-
-static struct clk_hw_omap d2d_26m_fck_hw = {
-       .hw = {
-               .clk = &d2d_26m_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_D2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk des1_ick;
-
-static struct clk_hw_omap des1_ick_hw = {
-       .hw = {
-               .clk = &des1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_DES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk des2_ick;
-
-static struct clk_hw_omap des2_ick_hw = {
-       .hw = {
-               .clk = &des2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_DES2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-                  OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll2_fck;
-
-static struct dpll_data dpll2_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-       .mult_mask      = OMAP3430_IVA2_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_IVA2_DPLL_DIV_MASK,
-       .clk_bypass     = &dpll2_fck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
-       .enable_mask    = OMAP3430_EN_IVA2_DPLL_MASK,
-       .modes          = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
-                          (1 << DPLL_LOW_POWER_BYPASS)),
-       .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-       .autoidle_mask  = OMAP3430_AUTO_IVA2_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
-       .idlest_mask    = OMAP3430_ST_IVA2_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll2_ck;
-
-static struct clk_hw_omap dpll2_ck_hw = {
-       .hw = {
-               .clk = &dpll2_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll2_dd,
-       .clkdm_name     = "dpll2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-                  OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL),
-                  OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll3_m3x2_ck;
-
-static const char *dpll3_m3x2_ck_parent_names[] = {
-       "dpll3_m3_ck",
-};
-
-static struct clk_hw_omap dpll3_m3x2_ck_hw = {
-       .hw = {
-               .clk = &dpll3_m3x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_EMU_CORE_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll3_m3x2_ck_3630 = {
-       .name           = "dpll3_m3x2_ck",
-       .hw             = &dpll3_m3x2_ck_hw.hw,
-       .parent_names   = dpll3_m3x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll3_m3x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH,
-                  0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m4x2_ck;
-
-static const char *dpll4_m4x2_ck_parent_names[] = {
-       "dpll4_m4_ck",
-};
-
-static struct clk_hw_omap dpll4_m4x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m4x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_DSS1_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names,
-               dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m4x2_ck_3630 = {
-       .name           = "dpll4_m4x2_ck",
-       .hw             = &dpll4_m4x2_ck_hw.hw,
-       .parent_names   = dpll4_m4x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m4x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m6x2_ck;
-
-static const char *dpll4_m6x2_ck_parent_names[] = {
-       "dpll4_m6_ck",
-};
-
-static struct clk_hw_omap dpll4_m6x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m6x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m6x2_ck_3630 = {
-       .name           = "dpll4_m6x2_ck",
-       .hw             = &dpll4_m6x2_ck_hw.hw,
-       .parent_names   = dpll4_m6x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m6x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1);
-
-static struct dpll_data dpll5_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
-       .mult_mask      = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
-       .enable_mask    = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
-       .autoidle_mask  = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
-       .idlest_mask    = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll5_ck;
-
-static struct clk_hw_omap dpll5_ck_hw = {
-       .hw = {
-               .clk = &dpll5_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll5_dd,
-       .clkdm_name     = "dpll5_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
-                  OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dss1_alwon_fck_3430es1;
-
-static const char *dss1_alwon_fck_3430es1_parent_names[] = {
-       "dpll4_m4x2_ck",
-};
-
-static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = {
-       .hw = {
-               .clk = &dss1_alwon_fck_3430es1,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS1_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1,
-               dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-               CLK_SET_RATE_PARENT);
-
-static struct clk dss1_alwon_fck_3430es2;
-
-static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = {
-       .hw = {
-               .clk = &dss1_alwon_fck_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS1_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2,
-               dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-               CLK_SET_RATE_PARENT);
-
-static struct clk dss2_alwon_fck;
-
-static struct clk_hw_omap dss2_alwon_fck_hw = {
-       .hw = {
-               .clk = &dss2_alwon_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS2_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_96m_fck;
-
-static struct clk_hw_omap dss_96m_fck_hw = {
-       .hw = {
-               .clk = &dss_96m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_TV_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es1;
-
-static struct clk_hw_omap dss_ick_3430es1_hw = {
-       .hw = {
-               .clk = &dss_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es2;
-
-static struct clk_hw_omap dss_ick_3430es2_hw = {
-       .hw = {
-               .clk = &dss_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_tv_fck;
-
-static const char *dss_tv_fck_parent_names[] = {
-       "omap_54m_fck",
-};
-
-static struct clk_hw_omap dss_tv_fck_hw = {
-       .hw = {
-               .clk = &dss_tv_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_TV_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops);
-
-static struct clk emac_fck;
-
-static const char *emac_fck_parent_names[] = {
-       "rmii_ck",
-};
-
-static struct clk_hw_omap emac_fck_hw = {
-       .hw = {
-               .clk = &emac_fck,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_CPGMAC_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops);
-
-static struct clk ipss_ick;
-
-static const char *ipss_ick_parent_names[] = {
-       "core_l3_ick",
-};
-
-static struct clk_hw_omap ipss_ick_hw = {
-       .hw = {
-               .clk = &ipss_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = AM35XX_EN_IPSS_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk emac_ick;
-
-static const char *emac_ick_parent_names[] = {
-       "ipss_ick",
-};
-
-static struct clk_hw_omap emac_ick_hw = {
-       .hw = {
-               .clk = &emac_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk emu_core_alwon_ck;
-
-static const char *emu_core_alwon_ck_parent_names[] = {
-       "dpll3_m3x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm");
-DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names,
-                 core_l4_ick_ops);
-
-static struct clk emu_mpu_alwon_ck;
-
-static const char *emu_mpu_alwon_ck_parent_names[] = {
-       "mpu_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL);
-DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops);
-
-static struct clk emu_per_alwon_ck;
-
-static const char *emu_per_alwon_ck_parent_names[] = {
-       "dpll4_m6x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm");
-DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names,
-                 core_l4_ick_ops);
-
-static const char *emu_src_ck_parent_names[] = {
-       "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck",
-};
-
-static const struct clksel_rate emu_src_sys_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_core_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_per_rates[] = {
-       { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_mpu_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel emu_src_clksel[] = {
-       { .parent = &sys_ck,            .rates = emu_src_sys_rates },
-       { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
-       { .parent = &emu_per_alwon_ck,  .rates = emu_src_per_rates },
-       { .parent = &emu_mpu_alwon_ck,  .rates = emu_src_mpu_rates },
-       { .parent = NULL },
-};
-
-static const struct clk_ops emu_src_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-       .enable         = &omap2_clkops_enable_clkdm,
-       .disable        = &omap2_clkops_disable_clkdm,
-};
-
-static struct clk emu_src_ck;
-
-static struct clk_hw_omap emu_src_ck_hw = {
-       .hw = {
-               .clk = &emu_src_ck,
-       },
-       .clksel         = emu_src_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_MUX_CTRL_MASK,
-       .clkdm_name     = "emu_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk fac_ick;
-
-static struct clk_hw_omap fac_ick_hw = {
-       .hw = {
-               .clk = &fac_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_FAC_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk fshostusb_fck;
-
-static const char *fshostusb_fck_parent_names[] = {
-       "core_48m_fck",
-};
-
-static struct clk_hw_omap fshostusb_fck_hw = {
-       .hw = {
-               .clk = &fshostusb_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ck;
-
-static struct clk_hw_omap gfx_l3_ck_hw = {
-       .hw = {
-               .clk = &gfx_l3_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP_EN_GFX_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0,
-                  OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
-                  OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk gfx_cg1_ck;
-
-static const char *gfx_cg1_ck_parent_names[] = {
-       "gfx_l3_fck",
-};
-
-static struct clk_hw_omap gfx_cg1_ck_hw = {
-       .hw = {
-               .clk = &gfx_cg1_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES1_EN_2D_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_cg2_ck;
-
-static struct clk_hw_omap gfx_cg2_ck_hw = {
-       .hw = {
-               .clk = &gfx_cg2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES1_EN_3D_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ick;
-
-static const char *gfx_l3_ick_parent_names[] = {
-       "gfx_l3_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm");
-DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops);
-
-static struct clk wkup_32k_fck;
-
-static const char *wkup_32k_fck_parent_names[] = {
-       "omap_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_dbck;
-
-static const char *gpio1_dbck_parent_names[] = {
-       "wkup_32k_fck",
-};
-
-static struct clk_hw_omap gpio1_dbck_hw = {
-       .hw = {
-               .clk = &gpio1_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wkup_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_ick;
-
-static const char *gpio1_ick_parent_names[] = {
-       "wkup_l4_ick",
-};
-
-static struct clk_hw_omap gpio1_ick_hw = {
-       .hw = {
-               .clk = &gpio1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_32k_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names,
-                 core_l4_ick_ops);
-
-static struct clk gpio2_dbck;
-
-static const char *gpio2_dbck_parent_names[] = {
-       "per_32k_alwon_fck",
-};
-
-static struct clk_hw_omap gpio2_dbck_hw = {
-       .hw = {
-               .clk = &gpio2_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk per_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm");
-DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk gpio2_ick;
-
-static const char *gpio2_ick_parent_names[] = {
-       "per_l4_ick",
-};
-
-static struct clk_hw_omap gpio2_ick_hw = {
-       .hw = {
-               .clk = &gpio2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_dbck;
-
-static struct clk_hw_omap gpio3_dbck_hw = {
-       .hw = {
-               .clk = &gpio3_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_ick;
-
-static struct clk_hw_omap gpio3_ick_hw = {
-       .hw = {
-               .clk = &gpio3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_dbck;
-
-static struct clk_hw_omap gpio4_dbck_hw = {
-       .hw = {
-               .clk = &gpio4_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_ick;
-
-static struct clk_hw_omap gpio4_ick_hw = {
-       .hw = {
-               .clk = &gpio4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_dbck;
-
-static struct clk_hw_omap gpio5_dbck_hw = {
-       .hw = {
-               .clk = &gpio5_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_ick;
-
-static struct clk_hw_omap gpio5_ick_hw = {
-       .hw = {
-               .clk = &gpio5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_dbck;
-
-static struct clk_hw_omap gpio6_dbck_hw = {
-       .hw = {
-               .clk = &gpio6_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_ick;
-
-static struct clk_hw_omap gpio6_ick_hw = {
-       .hw = {
-               .clk = &gpio6_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpmc_fck;
-
-static struct clk_hw_omap gpmc_fck_hw = {
-       .hw = {
-               .clk = &gpmc_fck,
-       },
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops);
-
-static const struct clksel omap343x_gpt_clksel[] = {
-       { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
-       { .parent = &sys_ck, .rates = gpt_sys_rates },
-       { .parent = NULL },
-};
-
-static const char *gpt10_fck_parent_names[] = {
-       "omap_32k_fck", "sys_ck",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT10_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt10_ick;
-
-static struct clk_hw_omap gpt10_ick_hw = {
-       .hw = {
-               .clk = &gpt10_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_GPT10_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT11_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt11_ick;
-
-static struct clk_hw_omap gpt11_ick_hw = {
-       .hw = {
-               .clk = &gpt11_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_GPT11_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpt12_fck;
-
-static const char *gpt12_fck_parent_names[] = {
-       "secure_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpt12_ick;
-
-static struct clk_hw_omap gpt12_ick_hw = {
-       .hw = {
-               .clk = &gpt12_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT12_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT1_MASK,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt1_ick;
-
-static struct clk_hw_omap gpt1_ick_hw = {
-       .hw = {
-               .clk = &gpt1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT2_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt2_ick;
-
-static struct clk_hw_omap gpt2_ick_hw = {
-       .hw = {
-               .clk = &gpt2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT3_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt3_ick;
-
-static struct clk_hw_omap gpt3_ick_hw = {
-       .hw = {
-               .clk = &gpt3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT4_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt4_ick;
-
-static struct clk_hw_omap gpt4_ick_hw = {
-       .hw = {
-               .clk = &gpt4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT5_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt5_ick;
-
-static struct clk_hw_omap gpt5_ick_hw = {
-       .hw = {
-               .clk = &gpt5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT6_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt6_ick;
-
-static struct clk_hw_omap gpt6_ick_hw = {
-       .hw = {
-               .clk = &gpt6_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT7_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt7_ick;
-
-static struct clk_hw_omap gpt7_ick_hw = {
-       .hw = {
-               .clk = &gpt7_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT7_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT8_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt8_ick;
-
-static struct clk_hw_omap gpt8_ick_hw = {
-       .hw = {
-               .clk = &gpt8_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT8_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT9_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt9_ick;
-
-static struct clk_hw_omap gpt9_ick_hw = {
-       .hw = {
-               .clk = &gpt9_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT9_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hdq_fck;
-
-static const char *hdq_fck_parent_names[] = {
-       "core_12m_fck",
-};
-
-static struct clk_hw_omap hdq_fck_hw = {
-       .hw = {
-               .clk = &hdq_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_HDQ_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops);
-
-static struct clk hdq_ick;
-
-static struct clk_hw_omap hdq_ick_hw = {
-       .hw = {
-               .clk = &hdq_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HDQ_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hecc_ck;
-
-static struct clk_hw_omap hecc_ck_hw = {
-       .hw = {
-               .clk = &hecc_ck,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_HECC_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_fck_am35xx;
-
-static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
-       .hw = {
-               .clk = &hsotgusb_fck_am35xx,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_USBOTG_FCLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es1;
-
-static struct clk_hw_omap hsotgusb_ick_3430es1_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HSOTGUSB_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es2;
-
-static struct clk_hw_omap hsotgusb_ick_3430es2_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_hsotgusb_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HSOTGUSB_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_am35xx;
-
-static struct clk_hw_omap hsotgusb_ick_am35xx_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_am35xx,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_USBOTG_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_fck;
-
-static struct clk_hw_omap i2c1_fck_hw = {
-       .hw = {
-               .clk = &i2c1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_ick;
-
-static struct clk_hw_omap i2c1_ick_hw = {
-       .hw = {
-               .clk = &i2c1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_fck;
-
-static struct clk_hw_omap i2c2_fck_hw = {
-       .hw = {
-               .clk = &i2c2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_ick;
-
-static struct clk_hw_omap i2c2_ick_hw = {
-       .hw = {
-               .clk = &i2c2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_fck;
-
-static struct clk_hw_omap i2c3_fck_hw = {
-       .hw = {
-               .clk = &i2c3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_ick;
-
-static struct clk_hw_omap i2c3_ick_hw = {
-       .hw = {
-               .clk = &i2c3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk icr_ick;
-
-static struct clk_hw_omap icr_ick_hw = {
-       .hw = {
-               .clk = &icr_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_ICR_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk iva2_ck;
-
-static const char *iva2_ck_parent_names[] = {
-       "dpll2_m2_ck",
-};
-
-static struct clk_hw_omap iva2_ck_hw = {
-       .hw = {
-               .clk = &iva2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
-       .clkdm_name     = "iva2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops);
-
-static struct clk mad2d_ick;
-
-static struct clk_hw_omap mad2d_ick_hw = {
-       .hw = {
-               .clk = &mad2d_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-       .enable_bit     = OMAP3430_EN_MAD2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk mailboxes_ick;
-
-static struct clk_hw_omap mailboxes_ick_hw = {
-       .hw = {
-               .clk = &mailboxes_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MAILBOXES_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel mcbsp_15_clksel[] = {
-       { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
-       { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-       { .parent = NULL },
-};
-
-static const char *mcbsp1_fck_parent_names[] = {
-       "core_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-                        OMAP2_MCBSP1_CLKS_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait,
-                        mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp1_ick;
-
-static struct clk_hw_omap mcbsp1_ick_hw = {
-       .hw = {
-               .clk = &mcbsp1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCBSP1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_96m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops);
-
-static const struct clksel mcbsp_234_clksel[] = {
-       { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
-       { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-       { .parent = NULL },
-};
-
-static const char *mcbsp2_fck_parent_names[] = {
-       "per_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-                        OMAP2_MCBSP2_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp2_ick;
-
-static struct clk_hw_omap mcbsp2_ick_hw = {
-       .hw = {
-               .clk = &mcbsp2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP3_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp3_ick;
-
-static struct clk_hw_omap mcbsp3_ick_hw = {
-       .hw = {
-               .clk = &mcbsp3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP4_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp4_ick;
-
-static struct clk_hw_omap mcbsp4_ick_hw = {
-       .hw = {
-               .clk = &mcbsp4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP5_CLKS_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait,
-                        mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp5_ick;
-
-static struct clk_hw_omap mcbsp5_ick_hw = {
-       .hw = {
-               .clk = &mcbsp5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCBSP5_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_fck;
-
-static struct clk_hw_omap mcspi1_fck_hw = {
-       .hw = {
-               .clk = &mcspi1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_ick;
-
-static struct clk_hw_omap mcspi1_ick_hw = {
-       .hw = {
-               .clk = &mcspi1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_fck;
-
-static struct clk_hw_omap mcspi2_fck_hw = {
-       .hw = {
-               .clk = &mcspi2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_ick;
-
-static struct clk_hw_omap mcspi2_ick_hw = {
-       .hw = {
-               .clk = &mcspi2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_fck;
-
-static struct clk_hw_omap mcspi3_fck_hw = {
-       .hw = {
-               .clk = &mcspi3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_ick;
-
-static struct clk_hw_omap mcspi3_ick_hw = {
-       .hw = {
-               .clk = &mcspi3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_fck;
-
-static struct clk_hw_omap mcspi4_fck_hw = {
-       .hw = {
-               .clk = &mcspi4_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_ick;
-
-static struct clk_hw_omap mcspi4_ick_hw = {
-       .hw = {
-               .clk = &mcspi4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_fck;
-
-static struct clk_hw_omap mmchs1_fck_hw = {
-       .hw = {
-               .clk = &mmchs1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_ick;
-
-static struct clk_hw_omap mmchs1_ick_hw = {
-       .hw = {
-               .clk = &mmchs1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_fck;
-
-static struct clk_hw_omap mmchs2_fck_hw = {
-       .hw = {
-               .clk = &mmchs2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_ick;
-
-static struct clk_hw_omap mmchs2_ick_hw = {
-       .hw = {
-               .clk = &mmchs2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_fck;
-
-static struct clk_hw_omap mmchs3_fck_hw = {
-       .hw = {
-               .clk = &mmchs3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES2_EN_MMC3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_ick;
-
-static struct clk_hw_omap mmchs3_ick_hw = {
-       .hw = {
-               .clk = &mmchs3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430ES2_EN_MMC3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk modem_fck;
-
-static struct clk_hw_omap modem_fck_hw = {
-       .hw = {
-               .clk = &modem_fck,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MODEM_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_fck;
-
-static struct clk_hw_omap mspro_fck_hw = {
-       .hw = {
-               .clk = &mspro_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MSPRO_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_ick;
-
-static struct clk_hw_omap mspro_ick_hw = {
-       .hw = {
-               .clk = &mspro_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MSPRO_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk omap_192m_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names,
-                 core_ck_ops);
-
-static struct clk omap_32ksync_ick;
-
-static struct clk_hw_omap omap_32ksync_ick_hw = {
-       .hw = {
-               .clk = &omap_32ksync_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_32KSYNC_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_36XX },
-       { .div = 2, .val = 2, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel omap_96m_alwon_fck_clksel[] = {
-       { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
-       { .parent = NULL }
-};
-
-static struct clk omap_96m_alwon_fck_3630;
-
-static const char *omap_96m_alwon_fck_3630_parent_names[] = {
-       "omap_192m_alwon_fck",
-};
-
-static const struct clk_ops omap_96m_alwon_fck_3630_ops = {
-       .set_rate       = &omap2_clksel_set_rate,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = {
-       .hw = {
-               .clk = &omap_96m_alwon_fck_3630,
-       },
-       .clksel         = omap_96m_alwon_fck_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-       .clksel_mask    = OMAP3630_CLKSEL_96M_MASK,
-};
-
-static struct clk omap_96m_alwon_fck_3630 = {
-       .name   = "omap_96m_alwon_fck",
-       .hw     = &omap_96m_alwon_fck_3630_hw.hw,
-       .parent_names   = omap_96m_alwon_fck_3630_parent_names,
-       .num_parents    = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names),
-       .ops    = &omap_96m_alwon_fck_3630_ops,
-};
-
-static struct clk omapctrl_ick;
-
-static struct clk_hw_omap omapctrl_ick_hw = {
-       .hw = {
-               .clk = &omapctrl_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_OMAPCTRL_SHIFT,
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk per_48m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk security_l3_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL);
-DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops);
-
-static struct clk pka_ick;
-
-static const char *pka_ick_parent_names[] = {
-       "security_l3_ick",
-};
-
-static struct clk_hw_omap pka_ick_hw = {
-       .hw = {
-               .clk = &pka_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_PKA_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0,
-                  OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk rng_ick;
-
-static struct clk_hw_omap rng_ick_hw = {
-       .hw = {
-               .clk = &rng_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_RNG_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sad2d_ick;
-
-static struct clk_hw_omap sad2d_ick_hw = {
-       .hw = {
-               .clk = &sad2d_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SAD2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sdrc_ick;
-
-static struct clk_hw_omap sdrc_ick_hw = {
-       .hw = {
-               .clk = &sdrc_ick,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SDRC_SHIFT,
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate sgx_core_rates[] = {
-       { .div = 2, .val = 5, .flags = RATE_IN_36XX },
-       { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_96m_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_192m_rates[] = {
-       { .div = 1, .val = 4, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_corex2_rates[] = {
-       { .div = 3, .val = 6, .flags = RATE_IN_36XX },
-       { .div = 5, .val = 7, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel sgx_clksel[] = {
-       { .parent = &core_ck, .rates = sgx_core_rates },
-       { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
-       { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
-       { .parent = &corex2_fck, .rates = sgx_corex2_rates },
-       { .parent = NULL },
-};
-
-static const char *sgx_fck_parent_names[] = {
-       "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck",
-};
-
-static struct clk sgx_fck;
-
-static const struct clk_ops sgx_fck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel,
-                        OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
-                        OMAP3430ES2_CLKSEL_SGX_MASK,
-                        OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
-                        OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
-                        &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops);
-
-static struct clk sgx_ick;
-
-static struct clk_hw_omap sgx_ick_hw = {
-       .hw = {
-               .clk = &sgx_ick,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
-       .clkdm_name     = "sgx_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sha11_ick;
-
-static struct clk_hw_omap sha11_ick_hw = {
-       .hw = {
-               .clk = &sha11_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_SHA11_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sha12_ick;
-
-static struct clk_hw_omap sha12_ick_hw = {
-       .hw = {
-               .clk = &sha12_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SHA12_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk sr1_fck;
-
-static struct clk_hw_omap sr1_fck_hw = {
-       .hw = {
-               .clk = &sr1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_SR1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr2_fck;
-
-static struct clk_hw_omap sr2_fck_hw = {
-       .hw = {
-               .clk = &sr2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_SR2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_ick_3430es1;
-
-static const char *ssi_ick_3430es1_parent_names[] = {
-       "ssi_l4_ick",
-};
-
-static struct clk_hw_omap ssi_ick_3430es1_hw = {
-       .hw = {
-               .clk = &ssi_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SSI_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static struct clk ssi_ick_3430es2;
-
-static struct clk_hw_omap ssi_ick_3430es2_hw = {
-       .hw = {
-               .clk = &ssi_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_ssi_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SSI_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate ssi_ssr_corex2_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-       { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_clksel[] = {
-       { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
-       { .parent = NULL },
-};
-
-static const char *ssi_ssr_fck_3430es1_parent_names[] = {
-       "corex2_fck",
-};
-
-static const struct clk_ops ssi_ssr_fck_3430es1_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm",
-                        ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_SSI_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_SSI_SHIFT,
-                        NULL, ssi_ssr_fck_3430es1_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm",
-                        ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_SSI_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_SSI_SHIFT,
-                        NULL, ssi_ssr_fck_3430es1_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1",
-                       &ssi_ssr_fck_3430es1, 0x0, 1, 2);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2",
-                       &ssi_ssr_fck_3430es2, 0x0, 1, 2);
-
-static struct clk sys_clkout1;
-
-static const char *sys_clkout1_parent_names[] = {
-       "osc_sys_ck",
-};
-
-static struct clk_hw_omap sys_clkout1_hw = {
-       .hw = {
-               .clk = &sys_clkout1,
-       },
-       .enable_reg     = OMAP3430_PRM_CLKOUT_CTRL,
-       .enable_bit     = OMAP3430_CLKOUT_EN_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0,
-                  OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT,
-                  OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-              OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_TRACECLK_SHIFT,
-                  OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk ts_fck;
-
-static struct clk_hw_omap ts_fck_hw = {
-       .hw = {
-               .clk = &ts_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_TS_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_fck;
-
-static struct clk_hw_omap uart1_fck_hw = {
-       .hw = {
-               .clk = &uart1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_UART1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_ick;
-
-static struct clk_hw_omap uart1_ick_hw = {
-       .hw = {
-               .clk = &uart1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_UART1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart2_fck;
-
-static struct clk_hw_omap uart2_fck_hw = {
-       .hw = {
-               .clk = &uart2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_UART2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart2_ick;
-
-static struct clk_hw_omap uart2_ick_hw = {
-       .hw = {
-               .clk = &uart2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_UART2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart3_fck;
-
-static const char *uart3_fck_parent_names[] = {
-       "per_48m_fck",
-};
-
-static struct clk_hw_omap uart3_fck_hw = {
-       .hw = {
-               .clk = &uart3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_UART3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart3_ick;
-
-static struct clk_hw_omap uart3_ick_hw = {
-       .hw = {
-               .clk = &uart3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_UART3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck;
-
-static struct clk_hw_omap uart4_fck_hw = {
-       .hw = {
-               .clk = &uart4_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3630_EN_UART4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck_am35xx;
-
-static struct clk_hw_omap uart4_fck_am35xx_hw = {
-       .hw = {
-               .clk = &uart4_fck_am35xx,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = AM35XX_EN_UART4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick;
-
-static struct clk_hw_omap uart4_ick_hw = {
-       .hw = {
-               .clk = &uart4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3630_EN_UART4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick_am35xx;
-
-static struct clk_hw_omap uart4_ick_am35xx_hw = {
-       .hw = {
-               .clk = &uart4_ick_am35xx,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = AM35XX_EN_UART4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate div2_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel usb_l4_clksel[] = {
-       { .parent = &l4_ick, .rates = div2_rates },
-       { .parent = NULL },
-};
-
-static const char *usb_l4_ick_parent_names[] = {
-       "l4_ick",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-                        OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-                        &clkhwops_iclk_wait, usb_l4_ick_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-static struct clk usbhost_120m_fck;
-
-static const char *usbhost_120m_fck_parent_names[] = {
-       "dpll5_m2_ck",
-};
-
-static struct clk_hw_omap usbhost_120m_fck_hw = {
-       .hw = {
-               .clk = &usbhost_120m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST2_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names,
-                 aes2_ick_ops);
-
-static struct clk usbhost_48m_fck;
-
-static struct clk_hw_omap usbhost_48m_fck_hw = {
-       .hw = {
-               .clk = &usbhost_48m_fck,
-       },
-       .ops            = &clkhwops_omap3430es2_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST1_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbhost_ick;
-
-static struct clk_hw_omap usbhost_ick_hw = {
-       .hw = {
-               .clk = &usbhost_ick,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_fck;
-
-static struct clk_hw_omap usbtll_fck_hw = {
-       .hw = {
-               .clk = &usbtll_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_ick;
-
-static struct clk_hw_omap usbtll_ick_hw = {
-       .hw = {
-               .clk = &usbtll_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate usim_96m_rates[] = {
-       { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
-       { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate usim_120m_rates[] = {
-       { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-       { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
-       { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel usim_clksel[] = {
-       { .parent = &omap_96m_fck, .rates = usim_96m_rates },
-       { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
-       { .parent = &sys_ck, .rates = div2_rates },
-       { .parent = NULL },
-};
-
-static const char *usim_fck_parent_names[] = {
-       "omap_96m_fck", "dpll5_m2_ck", "sys_ck",
-};
-
-static struct clk usim_fck;
-
-static const struct clk_ops usim_fck_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                        OMAP3430ES2_CLKSEL_USIMOCP_MASK,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-                        OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait,
-                        usim_fck_parent_names, usim_fck_ops);
-
-static struct clk usim_ick;
-
-static struct clk_hw_omap usim_ick_hw = {
-       .hw = {
-               .clk = &usim_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USIMOCP_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk vpfe_fck;
-
-static const char *vpfe_fck_parent_names[] = {
-       "pclk_ck",
-};
-
-static struct clk_hw_omap vpfe_fck_hw = {
-       .hw = {
-               .clk = &vpfe_fck,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_VPFE_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops);
-
-static struct clk vpfe_ick;
-
-static struct clk_hw_omap vpfe_ick_hw = {
-       .hw = {
-               .clk = &vpfe_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_VPFE_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt1_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk wdt1_ick;
-
-static struct clk_hw_omap wdt1_ick_hw = {
-       .hw = {
-               .clk = &wdt1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_fck;
-
-static struct clk_hw_omap wdt2_fck_hw = {
-       .hw = {
-               .clk = &wdt2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_WDT2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_ick;
-
-static struct clk_hw_omap wdt2_ick_hw = {
-       .hw = {
-               .clk = &wdt2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_fck;
-
-static struct clk_hw_omap wdt3_fck_hw = {
-       .hw = {
-               .clk = &wdt3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_WDT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_ick;
-
-static struct clk_hw_omap wdt3_ick_hw = {
-       .hw = {
-               .clk = &wdt3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-/*
- * clocks specific to omap3430es1
- */
-static struct omap_clk omap3430es1_clks[] = {
-       CLK(NULL,       "gfx_l3_ck",    &gfx_l3_ck),
-       CLK(NULL,       "gfx_l3_fck",   &gfx_l3_fck),
-       CLK(NULL,       "gfx_l3_ick",   &gfx_l3_ick),
-       CLK(NULL,       "gfx_cg1_ck",   &gfx_cg1_ck),
-       CLK(NULL,       "gfx_cg2_ck",   &gfx_cg2_ck),
-       CLK(NULL,       "d2d_26m_fck",  &d2d_26m_fck),
-       CLK(NULL,       "fshostusb_fck", &fshostusb_fck),
-       CLK(NULL,       "ssi_ssr_fck",  &ssi_ssr_fck_3430es1),
-       CLK(NULL,       "ssi_sst_fck",  &ssi_sst_fck_3430es1),
-       CLK("musb-omap2430",    "ick",  &hsotgusb_ick_3430es1),
-       CLK(NULL,       "hsotgusb_ick", &hsotgusb_ick_3430es1),
-       CLK(NULL,       "fac_ick",      &fac_ick),
-       CLK(NULL,       "ssi_ick",      &ssi_ick_3430es1),
-       CLK(NULL,       "usb_l4_ick",   &usb_l4_ick),
-       CLK(NULL,       "dss1_alwon_fck",       &dss1_alwon_fck_3430es1),
-       CLK("omapdss_dss",      "ick",          &dss_ick_3430es1),
-       CLK(NULL,       "dss_ick",              &dss_ick_3430es1),
-};
-
-/*
- * clocks specific to am35xx
- */
-static struct omap_clk am35xx_clks[] = {
-       CLK(NULL,       "ipss_ick",     &ipss_ick),
-       CLK(NULL,       "rmii_ck",      &rmii_ck),
-       CLK(NULL,       "pclk_ck",      &pclk_ck),
-       CLK(NULL,       "emac_ick",     &emac_ick),
-       CLK(NULL,       "emac_fck",     &emac_fck),
-       CLK("davinci_emac.0",   NULL,   &emac_ick),
-       CLK("davinci_mdio.0",   NULL,   &emac_fck),
-       CLK("vpfe-capture",     "master",       &vpfe_ick),
-       CLK("vpfe-capture",     "slave",        &vpfe_fck),
-       CLK(NULL,       "hsotgusb_ick",         &hsotgusb_ick_am35xx),
-       CLK(NULL,       "hsotgusb_fck",         &hsotgusb_fck_am35xx),
-       CLK(NULL,       "hecc_ck",      &hecc_ck),
-       CLK(NULL,       "uart4_ick",    &uart4_ick_am35xx),
-       CLK(NULL,       "uart4_fck",    &uart4_fck_am35xx),
-};
-
-/*
- * clocks specific to omap36xx
- */
-static struct omap_clk omap36xx_clks[] = {
-       CLK(NULL,       "omap_192m_alwon_fck", &omap_192m_alwon_fck),
-       CLK(NULL,       "uart4_fck",    &uart4_fck),
-};
-
-/*
- * clocks common to omap36xx omap34xx
- */
-static struct omap_clk omap34xx_omap36xx_clks[] = {
-       CLK(NULL,       "aes1_ick",     &aes1_ick),
-       CLK("omap_rng", "ick",          &rng_ick),
-       CLK("omap3-rom-rng",    "ick",  &rng_ick),
-       CLK(NULL,       "sha11_ick",    &sha11_ick),
-       CLK(NULL,       "des1_ick",     &des1_ick),
-       CLK(NULL,       "cam_mclk",     &cam_mclk),
-       CLK(NULL,       "cam_ick",      &cam_ick),
-       CLK(NULL,       "csi2_96m_fck", &csi2_96m_fck),
-       CLK(NULL,       "security_l3_ick", &security_l3_ick),
-       CLK(NULL,       "pka_ick",      &pka_ick),
-       CLK(NULL,       "icr_ick",      &icr_ick),
-       CLK("omap-aes", "ick",  &aes2_ick),
-       CLK("omap-sham",        "ick",  &sha12_ick),
-       CLK(NULL,       "des2_ick",     &des2_ick),
-       CLK(NULL,       "mspro_ick",    &mspro_ick),
-       CLK(NULL,       "mailboxes_ick", &mailboxes_ick),
-       CLK(NULL,       "ssi_l4_ick",   &ssi_l4_ick),
-       CLK(NULL,       "sr1_fck",      &sr1_fck),
-       CLK(NULL,       "sr2_fck",      &sr2_fck),
-       CLK(NULL,       "sr_l4_ick",    &sr_l4_ick),
-       CLK(NULL,       "security_l4_ick2", &security_l4_ick2),
-       CLK(NULL,       "wkup_l4_ick",  &wkup_l4_ick),
-       CLK(NULL,       "dpll2_fck",    &dpll2_fck),
-       CLK(NULL,       "iva2_ck",      &iva2_ck),
-       CLK(NULL,       "modem_fck",    &modem_fck),
-       CLK(NULL,       "sad2d_ick",    &sad2d_ick),
-       CLK(NULL,       "mad2d_ick",    &mad2d_ick),
-       CLK(NULL,       "mspro_fck",    &mspro_fck),
-       CLK(NULL,       "dpll2_ck",     &dpll2_ck),
-       CLK(NULL,       "dpll2_m2_ck",  &dpll2_m2_ck),
-};
-
-/*
- * clocks common to omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_omap3430es2plus_clks[] = {
-       CLK(NULL,       "ssi_ssr_fck",  &ssi_ssr_fck_3430es2),
-       CLK(NULL,       "ssi_sst_fck",  &ssi_sst_fck_3430es2),
-       CLK("musb-omap2430",    "ick",  &hsotgusb_ick_3430es2),
-       CLK(NULL,       "hsotgusb_ick", &hsotgusb_ick_3430es2),
-       CLK(NULL,       "ssi_ick",      &ssi_ick_3430es2),
-       CLK(NULL,       "usim_fck",     &usim_fck),
-       CLK(NULL,       "usim_ick",     &usim_ick),
-};
-
-/*
- * clocks common to am35xx omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
-       CLK(NULL,       "virt_16_8m_ck", &virt_16_8m_ck),
-       CLK(NULL,       "dpll5_ck",     &dpll5_ck),
-       CLK(NULL,       "dpll5_m2_ck",  &dpll5_m2_ck),
-       CLK(NULL,       "sgx_fck",      &sgx_fck),
-       CLK(NULL,       "sgx_ick",      &sgx_ick),
-       CLK(NULL,       "cpefuse_fck",  &cpefuse_fck),
-       CLK(NULL,       "ts_fck",       &ts_fck),
-       CLK(NULL,       "usbtll_fck",   &usbtll_fck),
-       CLK(NULL,       "usbtll_ick",   &usbtll_ick),
-       CLK("omap_hsmmc.2",     "ick",  &mmchs3_ick),
-       CLK(NULL,       "mmchs3_ick",   &mmchs3_ick),
-       CLK(NULL,       "mmchs3_fck",   &mmchs3_fck),
-       CLK(NULL,       "dss1_alwon_fck",       &dss1_alwon_fck_3430es2),
-       CLK("omapdss_dss",      "ick",          &dss_ick_3430es2),
-       CLK(NULL,       "dss_ick",              &dss_ick_3430es2),
-       CLK(NULL,       "usbhost_120m_fck", &usbhost_120m_fck),
-       CLK(NULL,       "usbhost_48m_fck", &usbhost_48m_fck),
-       CLK(NULL,       "usbhost_ick",  &usbhost_ick),
-};
-
-/*
- * common clocks
- */
-static struct omap_clk omap3xxx_clks[] = {
-       CLK(NULL,       "apb_pclk",     &dummy_apb_pclk),
-       CLK(NULL,       "omap_32k_fck", &omap_32k_fck),
-       CLK(NULL,       "virt_12m_ck",  &virt_12m_ck),
-       CLK(NULL,       "virt_13m_ck",  &virt_13m_ck),
-       CLK(NULL,       "virt_19200000_ck", &virt_19200000_ck),
-       CLK(NULL,       "virt_26000000_ck", &virt_26000000_ck),
-       CLK(NULL,       "virt_38_4m_ck", &virt_38_4m_ck),
-       CLK(NULL,       "osc_sys_ck",   &osc_sys_ck),
-       CLK("twl",      "fck",          &osc_sys_ck),
-       CLK(NULL,       "sys_ck",       &sys_ck),
-       CLK(NULL,       "omap_96m_alwon_fck", &omap_96m_alwon_fck),
-       CLK("etb",      "emu_core_alwon_ck", &emu_core_alwon_ck),
-       CLK(NULL,       "sys_altclk",   &sys_altclk),
-       CLK(NULL,       "mcbsp_clks",   &mcbsp_clks),
-       CLK(NULL,       "sys_clkout1",  &sys_clkout1),
-       CLK(NULL,       "dpll1_ck",     &dpll1_ck),
-       CLK(NULL,       "dpll1_x2_ck",  &dpll1_x2_ck),
-       CLK(NULL,       "dpll1_x2m2_ck", &dpll1_x2m2_ck),
-       CLK(NULL,       "dpll3_ck",     &dpll3_ck),
-       CLK(NULL,       "core_ck",      &core_ck),
-       CLK(NULL,       "dpll3_x2_ck",  &dpll3_x2_ck),
-       CLK(NULL,       "dpll3_m2_ck",  &dpll3_m2_ck),
-       CLK(NULL,       "dpll3_m2x2_ck", &dpll3_m2x2_ck),
-       CLK(NULL,       "dpll3_m3_ck",  &dpll3_m3_ck),
-       CLK(NULL,       "dpll3_m3x2_ck", &dpll3_m3x2_ck),
-       CLK(NULL,       "dpll4_ck",     &dpll4_ck),
-       CLK(NULL,       "dpll4_x2_ck",  &dpll4_x2_ck),
-       CLK(NULL,       "omap_96m_fck", &omap_96m_fck),
-       CLK(NULL,       "cm_96m_fck",   &cm_96m_fck),
-       CLK(NULL,       "omap_54m_fck", &omap_54m_fck),
-       CLK(NULL,       "omap_48m_fck", &omap_48m_fck),
-       CLK(NULL,       "omap_12m_fck", &omap_12m_fck),
-       CLK(NULL,       "dpll4_m2_ck",  &dpll4_m2_ck),
-       CLK(NULL,       "dpll4_m2x2_ck", &dpll4_m2x2_ck),
-       CLK(NULL,       "dpll4_m3_ck",  &dpll4_m3_ck),
-       CLK(NULL,       "dpll4_m3x2_ck", &dpll4_m3x2_ck),
-       CLK(NULL,       "dpll4_m4_ck",  &dpll4_m4_ck),
-       CLK(NULL,       "dpll4_m4x2_ck", &dpll4_m4x2_ck),
-       CLK(NULL,       "dpll4_m5_ck",  &dpll4_m5_ck),
-       CLK(NULL,       "dpll4_m5x2_ck", &dpll4_m5x2_ck),
-       CLK(NULL,       "dpll4_m6_ck",  &dpll4_m6_ck),
-       CLK(NULL,       "dpll4_m6x2_ck", &dpll4_m6x2_ck),
-       CLK("etb",      "emu_per_alwon_ck", &emu_per_alwon_ck),
-       CLK(NULL,       "clkout2_src_ck", &clkout2_src_ck),
-       CLK(NULL,       "sys_clkout2",  &sys_clkout2),
-       CLK(NULL,       "corex2_fck",   &corex2_fck),
-       CLK(NULL,       "dpll1_fck",    &dpll1_fck),
-       CLK(NULL,       "mpu_ck",       &mpu_ck),
-       CLK(NULL,       "arm_fck",      &arm_fck),
-       CLK("etb",      "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
-       CLK(NULL,       "l3_ick",       &l3_ick),
-       CLK(NULL,       "l4_ick",       &l4_ick),
-       CLK(NULL,       "rm_ick",       &rm_ick),
-       CLK(NULL,       "gpt10_fck",    &gpt10_fck),
-       CLK(NULL,       "gpt11_fck",    &gpt11_fck),
-       CLK(NULL,       "core_96m_fck", &core_96m_fck),
-       CLK(NULL,       "mmchs2_fck",   &mmchs2_fck),
-       CLK(NULL,       "mmchs1_fck",   &mmchs1_fck),
-       CLK(NULL,       "i2c3_fck",     &i2c3_fck),
-       CLK(NULL,       "i2c2_fck",     &i2c2_fck),
-       CLK(NULL,       "i2c1_fck",     &i2c1_fck),
-       CLK(NULL,       "mcbsp5_fck",   &mcbsp5_fck),
-       CLK(NULL,       "mcbsp1_fck",   &mcbsp1_fck),
-       CLK(NULL,       "core_48m_fck", &core_48m_fck),
-       CLK(NULL,       "mcspi4_fck",   &mcspi4_fck),
-       CLK(NULL,       "mcspi3_fck",   &mcspi3_fck),
-       CLK(NULL,       "mcspi2_fck",   &mcspi2_fck),
-       CLK(NULL,       "mcspi1_fck",   &mcspi1_fck),
-       CLK(NULL,       "uart2_fck",    &uart2_fck),
-       CLK(NULL,       "uart1_fck",    &uart1_fck),
-       CLK(NULL,       "core_12m_fck", &core_12m_fck),
-       CLK("omap_hdq.0",       "fck",  &hdq_fck),
-       CLK(NULL,       "hdq_fck",      &hdq_fck),
-       CLK(NULL,       "core_l3_ick",  &core_l3_ick),
-       CLK(NULL,       "sdrc_ick",     &sdrc_ick),
-       CLK(NULL,       "gpmc_fck",     &gpmc_fck),
-       CLK(NULL,       "core_l4_ick",  &core_l4_ick),
-       CLK("omap_hsmmc.1",     "ick",  &mmchs2_ick),
-       CLK("omap_hsmmc.0",     "ick",  &mmchs1_ick),
-       CLK(NULL,       "mmchs2_ick",   &mmchs2_ick),
-       CLK(NULL,       "mmchs1_ick",   &mmchs1_ick),
-       CLK("omap_hdq.0", "ick",        &hdq_ick),
-       CLK(NULL,       "hdq_ick",      &hdq_ick),
-       CLK("omap2_mcspi.4", "ick",     &mcspi4_ick),
-       CLK("omap2_mcspi.3", "ick",     &mcspi3_ick),
-       CLK("omap2_mcspi.2", "ick",     &mcspi2_ick),
-       CLK("omap2_mcspi.1", "ick",     &mcspi1_ick),
-       CLK(NULL,       "mcspi4_ick",   &mcspi4_ick),
-       CLK(NULL,       "mcspi3_ick",   &mcspi3_ick),
-       CLK(NULL,       "mcspi2_ick",   &mcspi2_ick),
-       CLK(NULL,       "mcspi1_ick",   &mcspi1_ick),
-       CLK("omap_i2c.3", "ick",        &i2c3_ick),
-       CLK("omap_i2c.2", "ick",        &i2c2_ick),
-       CLK("omap_i2c.1", "ick",        &i2c1_ick),
-       CLK(NULL,       "i2c3_ick",     &i2c3_ick),
-       CLK(NULL,       "i2c2_ick",     &i2c2_ick),
-       CLK(NULL,       "i2c1_ick",     &i2c1_ick),
-       CLK(NULL,       "uart2_ick",    &uart2_ick),
-       CLK(NULL,       "uart1_ick",    &uart1_ick),
-       CLK(NULL,       "gpt11_ick",    &gpt11_ick),
-       CLK(NULL,       "gpt10_ick",    &gpt10_ick),
-       CLK("omap-mcbsp.5", "ick",      &mcbsp5_ick),
-       CLK("omap-mcbsp.1", "ick",      &mcbsp1_ick),
-       CLK(NULL,       "mcbsp5_ick",   &mcbsp5_ick),
-       CLK(NULL,       "mcbsp1_ick",   &mcbsp1_ick),
-       CLK(NULL,       "omapctrl_ick", &omapctrl_ick),
-       CLK(NULL,       "dss_tv_fck",   &dss_tv_fck),
-       CLK(NULL,       "dss_96m_fck",  &dss_96m_fck),
-       CLK(NULL,       "dss2_alwon_fck",       &dss2_alwon_fck),
-       CLK(NULL,       "init_60m_fclk",        &dummy_ck),
-       CLK(NULL,       "gpt1_fck",     &gpt1_fck),
-       CLK(NULL,       "aes2_ick",     &aes2_ick),
-       CLK(NULL,       "wkup_32k_fck", &wkup_32k_fck),
-       CLK(NULL,       "gpio1_dbck",   &gpio1_dbck),
-       CLK(NULL,       "sha12_ick",    &sha12_ick),
-       CLK(NULL,       "wdt2_fck",             &wdt2_fck),
-       CLK("omap_wdt", "ick",          &wdt2_ick),
-       CLK(NULL,       "wdt2_ick",     &wdt2_ick),
-       CLK(NULL,       "wdt1_ick",     &wdt1_ick),
-       CLK(NULL,       "gpio1_ick",    &gpio1_ick),
-       CLK(NULL,       "omap_32ksync_ick", &omap_32ksync_ick),
-       CLK(NULL,       "gpt12_ick",    &gpt12_ick),
-       CLK(NULL,       "gpt1_ick",     &gpt1_ick),
-       CLK(NULL,       "per_96m_fck",  &per_96m_fck),
-       CLK(NULL,       "per_48m_fck",  &per_48m_fck),
-       CLK(NULL,       "uart3_fck",    &uart3_fck),
-       CLK(NULL,       "gpt2_fck",     &gpt2_fck),
-       CLK(NULL,       "gpt3_fck",     &gpt3_fck),
-       CLK(NULL,       "gpt4_fck",     &gpt4_fck),
-       CLK(NULL,       "gpt5_fck",     &gpt5_fck),
-       CLK(NULL,       "gpt6_fck",     &gpt6_fck),
-       CLK(NULL,       "gpt7_fck",     &gpt7_fck),
-       CLK(NULL,       "gpt8_fck",     &gpt8_fck),
-       CLK(NULL,       "gpt9_fck",     &gpt9_fck),
-       CLK(NULL,       "per_32k_alwon_fck", &per_32k_alwon_fck),
-       CLK(NULL,       "gpio6_dbck",   &gpio6_dbck),
-       CLK(NULL,       "gpio5_dbck",   &gpio5_dbck),
-       CLK(NULL,       "gpio4_dbck",   &gpio4_dbck),
-       CLK(NULL,       "gpio3_dbck",   &gpio3_dbck),
-       CLK(NULL,       "gpio2_dbck",   &gpio2_dbck),
-       CLK(NULL,       "wdt3_fck",     &wdt3_fck),
-       CLK(NULL,       "per_l4_ick",   &per_l4_ick),
-       CLK(NULL,       "gpio6_ick",    &gpio6_ick),
-       CLK(NULL,       "gpio5_ick",    &gpio5_ick),
-       CLK(NULL,       "gpio4_ick",    &gpio4_ick),
-       CLK(NULL,       "gpio3_ick",    &gpio3_ick),
-       CLK(NULL,       "gpio2_ick",    &gpio2_ick),
-       CLK(NULL,       "wdt3_ick",     &wdt3_ick),
-       CLK(NULL,       "uart3_ick",    &uart3_ick),
-       CLK(NULL,       "uart4_ick",    &uart4_ick),
-       CLK(NULL,       "gpt9_ick",     &gpt9_ick),
-       CLK(NULL,       "gpt8_ick",     &gpt8_ick),
-       CLK(NULL,       "gpt7_ick",     &gpt7_ick),
-       CLK(NULL,       "gpt6_ick",     &gpt6_ick),
-       CLK(NULL,       "gpt5_ick",     &gpt5_ick),
-       CLK(NULL,       "gpt4_ick",     &gpt4_ick),
-       CLK(NULL,       "gpt3_ick",     &gpt3_ick),
-       CLK(NULL,       "gpt2_ick",     &gpt2_ick),
-       CLK("omap-mcbsp.2", "ick",      &mcbsp2_ick),
-       CLK("omap-mcbsp.3", "ick",      &mcbsp3_ick),
-       CLK("omap-mcbsp.4", "ick",      &mcbsp4_ick),
-       CLK(NULL,       "mcbsp4_ick",   &mcbsp2_ick),
-       CLK(NULL,       "mcbsp3_ick",   &mcbsp3_ick),
-       CLK(NULL,       "mcbsp2_ick",   &mcbsp4_ick),
-       CLK(NULL,       "mcbsp2_fck",   &mcbsp2_fck),
-       CLK(NULL,       "mcbsp3_fck",   &mcbsp3_fck),
-       CLK(NULL,       "mcbsp4_fck",   &mcbsp4_fck),
-       CLK("etb",      "emu_src_ck",   &emu_src_ck),
-       CLK(NULL,       "emu_src_ck",   &emu_src_ck),
-       CLK(NULL,       "pclk_fck",     &pclk_fck),
-       CLK(NULL,       "pclkx2_fck",   &pclkx2_fck),
-       CLK(NULL,       "atclk_fck",    &atclk_fck),
-       CLK(NULL,       "traceclk_src_fck", &traceclk_src_fck),
-       CLK(NULL,       "traceclk_fck", &traceclk_fck),
-       CLK(NULL,       "secure_32k_fck", &secure_32k_fck),
-       CLK(NULL,       "gpt12_fck",    &gpt12_fck),
-       CLK(NULL,       "wdt1_fck",     &wdt1_fck),
-       CLK(NULL,       "timer_32k_ck", &omap_32k_fck),
-       CLK(NULL,       "timer_sys_ck", &sys_ck),
-       CLK(NULL,       "cpufreq_ck",   &dpll1_ck),
-};
-
-static const char *enable_init_clks[] = {
-       "sdrc_ick",
-       "gpmc_fck",
-       "omapctrl_ick",
-};
-
-int __init omap3xxx_clk_init(void)
-{
-       if (omap3_has_192mhz_clk())
-               omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
-
-       if (cpu_is_omap3630()) {
-               dpll3_m3x2_ck = dpll3_m3x2_ck_3630;
-               dpll4_m2x2_ck = dpll4_m2x2_ck_3630;
-               dpll4_m3x2_ck = dpll4_m3x2_ck_3630;
-               dpll4_m4x2_ck = dpll4_m4x2_ck_3630;
-               dpll4_m5x2_ck = dpll4_m5x2_ck_3630;
-               dpll4_m6x2_ck = dpll4_m6x2_ck_3630;
-       }
-
-       /*
-        * XXX This type of dynamic rewriting of the clock tree is
-        * deprecated and should be revised soon.
-        */
-       if (cpu_is_omap3630())
-               dpll4_dd = dpll4_dd_3630;
-       else
-               dpll4_dd = dpll4_dd_34xx;
-
-
-       /*
-        * 3505 must be tested before 3517, since 3517 returns true
-        * for both AM3517 chips and AM3517 family chips, which
-        * includes 3505.  Unfortunately there's no obvious family
-        * test for 3517/3505 :-(
-        */
-       if (soc_is_am35xx()) {
-               cpu_mask = RATE_IN_34XX;
-               omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks));
-               omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-               omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-       } else if (cpu_is_omap3630()) {
-               cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
-               omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks));
-               omap_clocks_register(omap36xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-               omap_clocks_register(omap34xx_omap36xx_clks,
-                                    ARRAY_SIZE(omap34xx_omap36xx_clks));
-               omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-               omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-       } else if (cpu_is_omap34xx()) {
-               if (omap_rev() == OMAP3430_REV_ES1_0) {
-                       cpu_mask = RATE_IN_3430ES1;
-                       omap_clocks_register(omap3430es1_clks,
-                                            ARRAY_SIZE(omap3430es1_clks));
-                       omap_clocks_register(omap34xx_omap36xx_clks,
-                                            ARRAY_SIZE(omap34xx_omap36xx_clks));
-                       omap_clocks_register(omap3xxx_clks,
-                                            ARRAY_SIZE(omap3xxx_clks));
-               } else {
-                       /*
-                        * Assume that anything that we haven't matched yet
-                        * has 3430ES2-type clocks.
-                        */
-                       cpu_mask = RATE_IN_3430ES2PLUS;
-                       omap_clocks_register(omap34xx_omap36xx_clks,
-                                            ARRAY_SIZE(omap34xx_omap36xx_clks));
-                       omap_clocks_register(omap36xx_omap3430es2plus_clks,
-                                            ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-                       omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                            ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-                       omap_clocks_register(omap3xxx_clks,
-                                            ARRAY_SIZE(omap3xxx_clks));
-               }
-       } else {
-               WARN(1, "clock: could not identify OMAP3 variant\n");
-       }
-
-               omap2_clk_disable_autoidle_all();
-
-       omap2_clk_enable_init_clocks(enable_init_clks,
-                                    ARRAY_SIZE(enable_init_clks));
-
-       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
-               (clk_get_rate(&osc_sys_ck) / 1000000),
-               (clk_get_rate(&osc_sys_ck) / 100000) % 10,
-               (clk_get_rate(&core_ck) / 1000000),
-               (clk_get_rate(&arm_fck) / 1000000));
-
-       /*
-        * Lock DPLL5 -- here only until other device init code can
-        * handle this
-        */
-       if (omap_rev() >= OMAP3430_REV_ES2_0)
-               omap3_clk_lock_dpll5();
-
-       /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
-       sdrc_ick_p = clk_get(NULL, "sdrc_ick");
-       arm_fck_p = clk_get(NULL, "arm_fck");
-
-       return 0;
-}
index 4ae4ccebced285e0598282028a3ad909fe3ed26c..6124db5c37aebf5d3092b66c355c326bdf6815f9 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
-#include <linux/clk-private.h>
 #include <asm/cpu.h>
 
 #include <trace/events/power.h>
@@ -632,21 +631,6 @@ const struct clk_hw_omap_ops clkhwops_wait = {
        .find_companion = omap2_clk_dflt_find_companion,
 };
 
-/**
- * omap_clocks_register - register an array of omap_clk
- * @ocs: pointer to an array of omap_clk to register
- */
-void __init omap_clocks_register(struct omap_clk oclks[], int cnt)
-{
-       struct omap_clk *c;
-
-       for (c = oclks; c < oclks + cnt; c++) {
-               clkdev_add(&c->lk);
-               if (!__clk_init(NULL, c->lk.clk))
-                       omap2_init_clk_hw_omap_clocks(c->lk.clk);
-       }
-}
-
 /**
  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
  * @mpurate_ck_name: clk name of the clock to change rate
index 1cf9dd85248abbe6511ac8c59133f9f290327de7..a56742f96000a64eb125010903304fb0d7e37414 100644 (file)
@@ -40,23 +40,29 @@ struct omap_clk {
 struct clockdomain;
 
 #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name)     \
-       static struct clk _name = {                             \
+       static struct clk_core _name##_core = {                 \
                .name = #_name,                                 \
                .hw = &_name##_hw.hw,                           \
                .parent_names = _parent_array_name,             \
                .num_parents = ARRAY_SIZE(_parent_array_name),  \
                .ops = &_clkops_name,                           \
+       };                                                      \
+       static struct clk _name = {                             \
+               .core = &_name##_core,                          \
        };
 
 #define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,     \
                                _clkops_name, _flags)           \
-       static struct clk _name = {                             \
+       static struct clk_core _name##_core = {                 \
                .name = #_name,                                 \
                .hw = &_name##_hw.hw,                           \
                .parent_names = _parent_array_name,             \
                .num_parents = ARRAY_SIZE(_parent_array_name),  \
                .ops = &_clkops_name,                           \
                .flags = _flags,                                \
+       };                                                      \
+       static struct clk _name = {                             \
+               .core = &_name##_core,                          \
        };
 
 #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)          \
@@ -238,7 +244,6 @@ struct ti_clk_features {
 extern struct ti_clk_features ti_clk_features;
 
 extern const struct clkops clkops_omap2_dflt_wait;
-extern const struct clkops clkops_dummy;
 extern const struct clkops clkops_omap2_dflt;
 
 extern struct clk_functions omap2_clk_functions;
@@ -247,7 +252,6 @@ extern const struct clksel_rate gpt_32k_rates[];
 extern const struct clksel_rate gpt_sys_rates[];
 extern const struct clksel_rate gfx_l3_rates[];
 extern const struct clksel_rate dsp_ick_rates[];
-extern struct clk dummy_ck;
 
 extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
 extern const struct clk_hw_omap_ops clkhwops_wait;
@@ -272,7 +276,5 @@ extern void __iomem *clk_memmaps[];
 extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
 extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 
-extern void omap_clocks_register(struct omap_clk *oclks, int cnt);
-
 void __init ti_clk_init_features(void);
 #endif
index ef4d21bfb96478da0b9ef681c931303aa9fb1bf4..61b60dfb14ce8a69e7d316aa17d831c7469d0222 100644 (file)
@@ -16,7 +16,6 @@
  * OMAP3xxx clock definition files.
  */
 
-#include <linux/clk-private.h>
 #include "clock.h"
 
 /* clksel_rate data common to 24xx/343x */
@@ -114,13 +113,3 @@ const struct clksel_rate div31_1to31_rates[] = {
        { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
        { .div = 0 },
 };
-
-/* Clocks shared between various OMAP SoCs */
-
-static struct clk_ops dummy_ck_ops = {};
-
-struct clk dummy_ck = {
-       .name = "dummy_clk",
-       .ops = &dummy_ck_ops,
-       .flags = CLK_IS_BASIC,
-};
index c2da2a0fe5ad64df80d6290f45658c697cfd4c96..44e57ec225d4401c1e2a81fcbfd929e499e65d58 100644 (file)
@@ -410,7 +410,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        int r;
        struct dpll_data *dd;
-       struct clk *parent;
+       struct clk_hw *parent;
 
        dd = clk->dpll_data;
        if (!dd)
@@ -427,13 +427,13 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
                }
        }
 
-       parent = __clk_get_parent(hw->clk);
+       parent = __clk_get_hw(__clk_get_parent(hw->clk));
 
        if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
-               WARN_ON(parent != dd->clk_bypass);
+               WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
                r = _omap3_noncore_dpll_bypass(clk);
        } else {
-               WARN_ON(parent != dd->clk_ref);
+               WARN_ON(parent != __clk_get_hw(dd->clk_ref));
                r = _omap3_noncore_dpll_lock(clk);
        }
 
@@ -473,6 +473,8 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw)
  * in failure.
  */
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_clk)
 {
@@ -549,7 +551,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
        if (!dd)
                return -EINVAL;
 
-       if (__clk_get_parent(hw->clk) != dd->clk_ref)
+       if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
+           __clk_get_hw(dd->clk_ref))
                return -EINVAL;
 
        if (dd->last_rounded_rate == 0)
index fc712240e5fd9d5173daca2df199d46e3ae521d9..f231be05b9a638de8e52cfe03d78765433815764 100644 (file)
@@ -202,6 +202,8 @@ out:
  * in failure.
  */
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
index e60780f0537492fb570e646e9bc4897101cab022..c4871c55bd8b641544a86a281121cad4c9219e42 100644 (file)
@@ -461,7 +461,17 @@ void __init omap3_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_soc_init = omap3xxx_clk_init;
+       if (!of_have_populated_dt()) {
+               omap3_prcm_legacy_iomaps_init();
+               if (soc_is_am35xx())
+                       omap_clk_soc_init = am35xx_clk_legacy_init;
+               else if (cpu_is_omap3630())
+                       omap_clk_soc_init = omap36xx_clk_legacy_init;
+               else if (omap_rev() == OMAP3430_REV_ES1_0)
+                       omap_clk_soc_init = omap3430es1_clk_legacy_init;
+               else
+                       omap_clk_soc_init = omap3430_clk_legacy_init;
+       }
 }
 
 void __init omap3430_init_early(void)
@@ -753,15 +763,17 @@ int __init omap_clk_init(void)
 
        ti_clk_init_features();
 
-       ret = of_prcm_init();
-       if (ret)
-               return ret;
+       if (of_have_populated_dt()) {
+               ret = of_prcm_init();
+               if (ret)
+                       return ret;
 
-       of_clk_init(NULL);
+               of_clk_init(NULL);
 
-       ti_dt_clk_init_retry_clks();
+               ti_dt_clk_init_retry_clks();
 
-       ti_dt_clockdomains_setup();
+               ti_dt_clockdomains_setup();
+       }
 
        ret = omap_clk_soc_init();
 
index 2418bdf28ca271599ae108824abe3b6493d0bbf4..cee0fe1ee6ffb0d3e5026a7328458feb34dc2732 100644 (file)
@@ -242,7 +242,7 @@ static int __init omap4_sar_ram_init(void)
 }
 omap_early_initcall(omap4_sar_ram_init);
 
-static struct of_device_id gic_match[] = {
+static const struct of_device_id gic_match[] = {
        { .compatible = "arm,cortex-a9-gic", },
        { .compatible = "arm,cortex-a15-gic", },
        { },
index 92afb723dcfc2364aca9e711a3fb4bf9c5245e12..355b089368715427627dd39f1014ed7024ae8459 100644 (file)
@@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
        if (ret == -EBUSY)
                pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name);
 
-       if (!ret) {
+       if (oh->clkdm) {
                /*
                 * Set the clockdomain to HW_AUTO, assuming that the
                 * previous state was HW_AUTO.
                 */
-               if (oh->clkdm && hwsup)
+               if (hwsup)
                        clkdm_allow_idle(oh->clkdm);
-       } else {
-               if (oh->clkdm)
-                       clkdm_hwmod_disable(oh->clkdm, oh);
+
+               clkdm_hwmod_disable(oh->clkdm, oh);
        }
 
        return ret;
@@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh)
        INIT_LIST_HEAD(&oh->master_ports);
        INIT_LIST_HEAD(&oh->slave_ports);
        spin_lock_init(&oh->_lock);
+       lockdep_set_class(&oh->_lock, &oh->hwmod_key);
 
        oh->_state = _HWMOD_STATE_REGISTERED;
 
index 9d4bec6ee7424c2643600ded08e0a0d9bef093fa..9611c91d9b82154e6d5d7f46c75c1b1ab6ffd588 100644 (file)
@@ -674,6 +674,7 @@ struct omap_hwmod {
        u32                             _sysc_cache;
        void __iomem                    *_mpu_rt_va;
        spinlock_t                      _lock;
+       struct lock_class_key           hwmod_key; /* unique lock class */
        struct list_head                node;
        struct omap_hwmod_ocp_if        *_mpu_port;
        unsigned int                    (*xlate_irq)(unsigned int);
index e8692e7675b865833e31b0b07c130d8512c3b221..16fe7a1b7a3578069746371dc789e51308cc2134 100644 (file)
@@ -1466,53 +1466,16 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = {
  *
  */
 
-static struct omap_hwmod_class dra7xx_pcie_hwmod_class = {
+static struct omap_hwmod_class dra7xx_pciess_hwmod_class = {
        .name   = "pcie",
 };
 
 /* pcie1 */
-static struct omap_hwmod dra7xx_pcie1_hwmod = {
+static struct omap_hwmod dra7xx_pciess1_hwmod = {
        .name           = "pcie1",
-       .class          = &dra7xx_pcie_hwmod_class,
+       .class          = &dra7xx_pciess_hwmod_class,
        .clkdm_name     = "pcie_clkdm",
        .main_clk       = "l4_root_clk_div",
-       .prcm = {
-               .omap4 = {
-                       .clkctrl_offs   = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
-                       .modulemode     = MODULEMODE_SWCTRL,
-               },
-       },
-};
-
-/* pcie2 */
-static struct omap_hwmod dra7xx_pcie2_hwmod = {
-       .name           = "pcie2",
-       .class          = &dra7xx_pcie_hwmod_class,
-       .clkdm_name     = "pcie_clkdm",
-       .main_clk       = "l4_root_clk_div",
-       .prcm = {
-               .omap4 = {
-                       .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
-                       .modulemode   = MODULEMODE_SWCTRL,
-               },
-       },
-};
-
-/*
- * 'PCIE PHY' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = {
-       .name   = "pcie-phy",
-};
-
-/* pcie1 phy */
-static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
-       .name           = "pcie1-phy",
-       .class          = &dra7xx_pcie_phy_hwmod_class,
-       .clkdm_name     = "l3init_clkdm",
-       .main_clk       = "l4_root_clk_div",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET,
@@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
        },
 };
 
-/* pcie2 phy */
-static struct omap_hwmod dra7xx_pcie2_phy_hwmod = {
-       .name           = "pcie2-phy",
-       .class          = &dra7xx_pcie_phy_hwmod_class,
-       .clkdm_name     = "l3init_clkdm",
+/* pcie2 */
+static struct omap_hwmod dra7xx_pciess2_hwmod = {
+       .name           = "pcie2",
+       .class          = &dra7xx_pciess_hwmod_class,
+       .clkdm_name     = "pcie_clkdm",
        .main_clk       = "l4_root_clk_div",
        .prcm = {
                .omap4 = {
@@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = {
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l3_main_1 -> pcie1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = {
+/* l3_main_1 -> pciess1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = {
        .master         = &dra7xx_l3_main_1_hwmod,
-       .slave          = &dra7xx_pcie1_hwmod,
+       .slave          = &dra7xx_pciess1_hwmod,
        .clk            = "l3_iclk_div",
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> pcie1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = {
+/* l4_cfg -> pciess1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = {
        .master         = &dra7xx_l4_cfg_hwmod,
-       .slave          = &dra7xx_pcie1_hwmod,
+       .slave          = &dra7xx_pciess1_hwmod,
        .clk            = "l4_root_clk_div",
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l3_main_1 -> pcie2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = {
+/* l3_main_1 -> pciess2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = {
        .master         = &dra7xx_l3_main_1_hwmod,
-       .slave          = &dra7xx_pcie2_hwmod,
+       .slave          = &dra7xx_pciess2_hwmod,
        .clk            = "l3_iclk_div",
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> pcie2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = {
-       .master         = &dra7xx_l4_cfg_hwmod,
-       .slave          = &dra7xx_pcie2_hwmod,
-       .clk            = "l4_root_clk_div",
-       .user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pcie1 phy */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = {
-       .master         = &dra7xx_l4_cfg_hwmod,
-       .slave          = &dra7xx_pcie1_phy_hwmod,
-       .clk            = "l4_root_clk_div",
-       .user           = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pcie2 phy */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = {
+/* l4_cfg -> pciess2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = {
        .master         = &dra7xx_l4_cfg_hwmod,
-       .slave          = &dra7xx_pcie2_phy_hwmod,
+       .slave          = &dra7xx_pciess2_hwmod,
        .clk            = "l4_root_clk_div",
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
        &dra7xx_l4_cfg__mpu,
        &dra7xx_l4_cfg__ocp2scp1,
        &dra7xx_l4_cfg__ocp2scp3,
-       &dra7xx_l3_main_1__pcie1,
-       &dra7xx_l4_cfg__pcie1,
-       &dra7xx_l3_main_1__pcie2,
-       &dra7xx_l4_cfg__pcie2,
-       &dra7xx_l4_cfg__pcie1_phy,
-       &dra7xx_l4_cfg__pcie2_phy,
+       &dra7xx_l3_main_1__pciess1,
+       &dra7xx_l4_cfg__pciess1,
+       &dra7xx_l3_main_1__pciess2,
+       &dra7xx_l4_cfg__pciess2,
        &dra7xx_l3_main_1__qspi,
        &dra7xx_l4_per3__rtcss,
        &dra7xx_l4_cfg__sata,
index 190fa43e74796809e47b73a9a84e8b6aa589aead..e642b079e9f313ac97876e8f74a8b10612a80b83 100644 (file)
@@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void)
 
 static void __init omap3_evm_legacy_init(void)
 {
+       hsmmc2_internal_input_clk();
        legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149);
 }
 
index 77752e49d8d4c666a2f48fb816546ccfbf53b212..b9061a6a2db8998314cf83dc0aa98dab2899ce43 100644 (file)
@@ -20,6 +20,7 @@ extern void __iomem *prm_base;
 extern u16 prm_features;
 extern void omap2_set_globals_prm(void __iomem *prm);
 int of_prcm_init(void);
+void omap3_prcm_legacy_iomaps_init(void);
 # endif
 
 /*
index c5e00c6714b1d99fc5afaabe3f41985e1fce4bad..5713bbdf83bc57ac7314f6e27455e3851772bc09 100644 (file)
@@ -674,7 +674,7 @@ int __init omap3xxx_prm_init(void)
        return prm_register(&omap3xxx_prm_ll_data);
 }
 
-static struct of_device_id omap3_prm_dt_match_table[] = {
+static const struct of_device_id omap3_prm_dt_match_table[] = {
        { .compatible = "ti,omap3-prm" },
        { }
 };
index 408c64efb80700868fa4c8b0138a2763a78bc161..d6d6bc39e05c962d80b3b777b50c1d802764af0b 100644 (file)
@@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
 {
        saved_mask[0] =
                omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
-                                       OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+                                       OMAP4_PRM_IRQENABLE_MPU_OFFSET);
        saved_mask[1] =
                omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
-                                       OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+                                       OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
 
        omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
                                 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
@@ -712,7 +712,7 @@ int __init omap44xx_prm_init(void)
        return prm_register(&omap44xx_prm_ll_data);
 }
 
-static struct of_device_id omap_prm_dt_match_table[] = {
+static const struct of_device_id omap_prm_dt_match_table[] = {
        { .compatible = "ti,omap4-prm" },
        { .compatible = "ti,omap5-prm" },
        { .compatible = "ti,dra7-prm" },
index 264b5e29404d0eded3c9eca764384e8d93a5e563..bfaa7ba595cc832ec7783e759db4425c5e1e58c0 100644 (file)
@@ -35,6 +35,8 @@
 #include "prm44xx.h"
 #include "common.h"
 #include "clock.h"
+#include "cm.h"
+#include "control.h"
 
 /*
  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
@@ -641,6 +643,15 @@ int __init of_prcm_init(void)
        return 0;
 }
 
+void __init omap3_prcm_legacy_iomaps_init(void)
+{
+       ti_clk_ll_ops = &omap_clk_ll_ops;
+
+       clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD;
+       clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD;
+       clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get();
+}
+
 static int __init prm_late_init(void)
 {
        if (prm_ll_data->late_init)
index a219dc310d5de545527030fe169b88a494c0578a..e03d8b5c9ad0aa174b46c2e54cf2ade518d4cfdc 100644 (file)
@@ -27,7 +27,6 @@ config ARCH_ATLAS7
        select CPU_V7
        select HAVE_ARM_SCU if SMP
        select HAVE_SMP
-       select SMP_ON_UP if SMP
        help
           Support for CSR SiRFSoC ARM Cortex A7 Platform
 
index 0c819bb88418369bab10f336766c9974c89f48dc..8cadb302a7d2f54a3bbcddaf7296606293d69e4e 100644 (file)
@@ -21,7 +21,7 @@ static void __init sirfsoc_init_late(void)
 }
 
 #ifdef CONFIG_ARCH_ATLAS6
-static const char *atlas6_dt_match[] __initconst = {
+static const char *const atlas6_dt_match[] __initconst = {
        "sirf,atlas6",
        NULL
 };
@@ -36,7 +36,7 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_PRIMA2
-static const char *prima2_dt_match[] __initconst = {
+static const char *const prima2_dt_match[] __initconst = {
        "sirf,prima2",
        NULL
 };
@@ -52,7 +52,7 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_ATLAS7
-static const char *atlas7_dt_match[] __initdata = {
+static const char *const atlas7_dt_match[] __initconst = {
        "sirf,atlas7",
        NULL
 };
index fc2b03c81e5f57f2f6be7146afa7022f3f22d476..e46c91094dde3c66065b4d7e040ef7a057d9d04a 100644 (file)
@@ -40,7 +40,7 @@ static void sirfsoc_secondary_init(unsigned int cpu)
        spin_unlock(&boot_lock);
 }
 
-static struct of_device_id clk_ids[]  = {
+static const struct of_device_id clk_ids[]  = {
        { .compatible = "sirf,atlas7-clkc" },
        {},
 };
index 343c4e3a7c5d1aceb136a196dcae946a7c8f5a69..f6d02e4cbcda4e06954c1c960a54b945e28fe58b 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/platform_data/video-pxafb.h>
 #include <mach/bitfield.h>
 #include <linux/platform_data/mmc-pxamci.h>
+#include <linux/smc91x.h>
 
 #include "generic.h"
 #include "devices.h"
@@ -81,11 +82,16 @@ static struct resource smc91x_resources[] = {
        }
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static void idp_backlight_power(int on)
index ad777b353bd5234797d93031ab6815747b65e363..eaee2c20b18956863557e0aba66e24f09e3f5070 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pwm_backlight.h>
+#include <linux/smc91x.h>
 
 #include <asm/types.h>
 #include <asm/setup.h>
@@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
        [1] = {
                .start  = LPD270_ETHERNET_IRQ,
                .end    = LPD270_ETHERNET_IRQ,
-               .flags  = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
        },
 };
 
+struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static struct resource lpd270_flash_resources[] = {
index 850e506926dfb8adbc137f652353d8d84cc40197..c309593abdb223e9c9499c0469f080344ea40fd7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_data/video-clcd-versatile.h>
 #include <linux/io.h>
 #include <linux/smsc911x.h>
+#include <linux/smc91x.h>
 #include <linux/ata_platform.h>
 #include <linux/amba/mmci.h>
 #include <linux/gfp.h>
@@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
        .phy_interface  = PHY_INTERFACE_MODE_MII,
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device realview_eth_device = {
        .name           = "smsc911x",
        .id             = 0,
@@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
        realview_eth_device.resource = res;
        if (strcmp(realview_eth_device.name, "smsc911x") == 0)
                realview_eth_device.dev.platform_data = &smsc911x_config;
+       else
+               realview_eth_device.dev.platform_data = &smc91x_platdata;
 
        return platform_device_register(&realview_eth_device);
 }
index 64c88d657f9efc6360600380910248a51fd3c73b..b3869cbbcc6858c5ddb6b8ab9808773cde4dfae6 100644 (file)
@@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
        [1] = {
                .start          = IRQ_EB_ETH,
                .end            = IRQ_EB_ETH,
-               .flags          = IORESOURCE_IRQ,
+               .flags          = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
        },
 };
 
index 5078932c1683278c45cb57a491483427132679c1..ae4eb7cc4bcc5a1c5dffa8a26f98e5c58b6cf3f8 100644 (file)
@@ -11,6 +11,7 @@ config ARCH_ROCKCHIP
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select DW_APB_TIMER_OF
+       select REGULATOR if PM
        select ROCKCHIP_TIMER
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
index 7d752ff39f91f4a7d737e460f3d09e089e1383f6..7c889c04604b5ec0d9faaec30d0b612275c4cda4 100644 (file)
@@ -24,7 +24,13 @@ extern unsigned long rkpm_bootdata_ddr_data;
 extern unsigned long rk3288_bootram_sz;
 
 void rockchip_slp_cpu_resume(void);
+#ifdef CONFIG_PM_SLEEP
 void __init rockchip_suspend_init(void);
+#else
+static inline void rockchip_suspend_init(void)
+{
+}
+#endif
 
 /****** following is rk3288 defined **********/
 #define RK3288_PMU_WAKEUP_CFG0         0x00
index 43eb1eaea0c927f8e24e94f27831d92931172ab1..83e656ea95ae13f1ed23003d6872479370e43abc 100644 (file)
@@ -63,7 +63,7 @@ static void __init s5pv210_dt_init_late(void)
        s5pv210_pm_init();
 }
 
-static char const *s5pv210_dt_compat[] __initconst = {
+static char const *const s5pv210_dt_compat[] __initconst = {
        "samsung,s5pc110",
        "samsung,s5pv210",
        NULL
index 169262e3040dd77b25ae268bc39880929805f63c..af868d258e664b5a50a4c678506bf7da7ac6dfd0 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/serial_core.h>
 #include <linux/slab.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
                        0x02000000, "smc91x-attrib"),
                { .flags = IORESOURCE_IRQ },
        };
+       struct smc91x_platdata smc91x_platdata = {
+               .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
+       };
        struct platform_device_info smc91x_devinfo = {
                .parent = &dev->dev,
                .name = "smc91x",
                .id = 0,
                .res = smc91x_resources,
                .num_res = ARRAY_SIZE(smc91x_resources),
+               .data = &smc91x_platdata,
+               .size_data = sizeof(smc91x_platdata),
        };
        int ret, irq;
 
index 091261878effde2e56d1b4f81a157f7d696de765..1525d7b5f1b74b6d06ac1276a56a45c23781e060 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/setup.h>
@@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
 #endif
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
 
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev = {
+               .platform_data  = &smc91x_platdata,
+       },
 };
 
 static struct platform_device *devices[] __initdata = {
index aad97be9cbe1b0fabe069136e2e264cc51de8a24..37f7b15c01bc073678b49189c4b9d4a5a9aacd63 100644 (file)
@@ -37,7 +37,7 @@ static void __init emev2_map_io(void)
        iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
 }
 
-static const char *emev2_boards_compat_dt[] __initconst = {
+static const char *const emev2_boards_compat_dt[] __initconst = {
        "renesas,emev2",
        NULL,
 };
index 483cb467bf65a13d1f414a88c0233b5e46e184d1..a0f3b1cd497cc70656637c6dd2215a07942c0b1e 100644 (file)
@@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end;
 
 extern unsigned long socfpga_cpu1start_addr;
 
-#define SOCFPGA_SCU_VIRT_BASE   0xfffec000
+#define SOCFPGA_SCU_VIRT_BASE   0xfee00000
 
 #endif
index 383d61e138af1e9dfeee1ccac39b6adb42f74236..f5e597c207b9e47d26c0a7d021563cc6bdc8bf35 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
+#include <asm/cacheflush.h>
 
 #include "core.h"
 
@@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void)
                        (u32 *) &socfpga_cpu1start_addr))
                pr_err("SMP: Need cpu1-start-addr in device tree.\n");
 
+       /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */
+       smp_wmb();
+       sync_cache_w(&socfpga_cpu1start_addr);
+
        sys_manager_base_addr = of_iomap(np, 0);
 
        np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr");
index 8825bc9e2553057145a9d2839bf4001e71960bd8..3b1ac463a4947f21f3e82de66d8853a902367fa4 100644 (file)
@@ -13,6 +13,7 @@ menuconfig ARCH_STI
        select ARM_ERRATA_775420
        select PL310_ERRATA_753970 if CACHE_L2X0
        select PL310_ERRATA_769419 if CACHE_L2X0
+       select RESET_CONTROLLER
        help
          Include support for STiH41x SOCs like STiH415/416 using the device tree
          for discovery
index b067390cef4ed5c8432c77ab5ed4e7e78682bdcc..b373acade338ad7c64780da2f94bd3817b976f3e 100644 (file)
@@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = {
        "st,stih415",
        "st,stih416",
        "st,stih407",
+       "st,stih410",
        "st,stih418",
        NULL
 };
index ef016af1c9e769176378e2930f24dc2f060adc09..914341bcef25faf08631113ce98b90228d728d9a 100644 (file)
@@ -91,8 +91,6 @@ static void __init tegra_dt_init(void)
        struct soc_device *soc_dev;
        struct device *parent = NULL;
 
-       tegra_clocks_apply_init_table();
-
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                goto out;
index 0d4b5b46f15b551f191bedaa6f0b9394da84dad1..4d71c90f801caf6ac5a6e4d150c7751e6066f2da 100644 (file)
@@ -49,7 +49,7 @@ static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = {
        [DOMAIN_VAPE] = &ux500_pm_domain_vape,
 };
 
-static struct of_device_id ux500_pm_domain_matches[] = {
+static const struct of_device_id ux500_pm_domain_matches[] __initconst = {
        { .compatible = "stericsson,ux500-pm-domains", },
        { },
 };
index 9f9bc61ca64bc6af4ddf2e7bfe7e2d6ccac3fe55..7de3e92a13b0ef8896c56a9101e21ea7a8db6411 100644 (file)
@@ -35,7 +35,7 @@ static void __init versatile_dt_init(void)
                             versatile_auxdata_lookup, NULL);
 }
 
-static const char *versatile_dt_match[] __initconst = {
+static const char *const versatile_dt_match[] __initconst = {
        "arm,versatile-ab",
        "arm,versatile-pb",
        NULL,
index d6b16d9a78380e78ff7d33855f1c6caa557d2e83..3c2509b4b6946bfcfd9b4e7a325b3945ba6c244c 100644 (file)
@@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM
        depends on MCPM
        select ARM_CCI
        select ARCH_VEXPRESS_SPC
+       select ARM_CPU_SUSPEND
        help
          Support for CPU and cluster power management on Versatile Express
          with a TC2 (A15x2 A7x3) big.LITTLE core tile.
index c43c714555661337048b72a5a21a6b5357659567..9b4f29e595a423f6d00540a9a169decae0f3b4ea 100644 (file)
@@ -892,13 +892,6 @@ config CACHE_L2X0
 
 if CACHE_L2X0
 
-config CACHE_PL310
-       bool
-       default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
-       help
-         This option enables optimisations for the PL310 cache
-         controller.
-
 config PL310_ERRATA_588369
        bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
        help
index c6c7696b8db97e33344fdec64767d9409800cb06..8f15f70622a6aa30a9ff7f20fb6cea3a963fc53b 100644 (file)
@@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np,
        }
 
        ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
-       if (ret)
-               return;
-
-       switch (assoc) {
-       case 16:
-               *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-               *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
-               *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-               break;
-       case 8:
-               *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-               *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-               break;
-       default:
-               pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
-                      assoc);
-               break;
+       if (!ret) {
+               switch (assoc) {
+               case 16:
+                       *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+                       *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
+                       *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+                       break;
+               case 8:
+                       *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+                       *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+                       break;
+               default:
+                       pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
+                              assoc);
+                       break;
+               }
        }
 
        prefetch = l2x0_saved_regs.prefetch_ctrl;
index 903dba064a034c7e5d9fff950d3fa334301130d9..c27447653903f1134594fe309dc885285ea43bb3 100644 (file)
@@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn)
         */
        if (sizeof(mask) != sizeof(dma_addr_t) &&
            mask > (dma_addr_t)~0 &&
-           dma_to_pfn(dev, ~0) < max_pfn) {
+           dma_to_pfn(dev, ~0) < max_pfn - 1) {
                if (warn) {
                        dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
                                 mask);
@@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
        int i = 0;
 
        if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, gfp);
+               pages = kzalloc(array_size, GFP_KERNEL);
        else
                pages = vzalloc(array_size);
        if (!pages)
index a982dc3190dfb3a841bbe196f76f58cdbb554f84..6333d9c178757fe4f365b8e765b2a9ea75e2b80b 100644 (file)
@@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
                inf->name, fsr, addr);
+       show_pte(current->mm, addr);
 
        info.si_signo = inf->sig;
        info.si_errno = 0;
index 004e35cdcfffea6fa4add161e7e4da0a17bf6a44..cf30daff8932504f95422c2aa3b5882b154713b5 100644 (file)
@@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages,
                WARN_ON_ONCE(1);
        }
 
-       if (!is_module_address(start) || !is_module_address(end - 1))
+       if (start < MODULES_VADDR || start >= MODULES_END)
+               return -EINVAL;
+
+       if (end < MODULES_VADDR || start >= MODULES_END)
                return -EINVAL;
 
        data.set_mask = set_mask;
index f1ad9c2ab2e917197f3a3a00d7b8b8499138e933..a857794432d6756ac628d55f362512ed375bcabd 100644 (file)
                };
 
                sgenet0: ethernet@1f210000 {
-                       compatible = "apm,xgene-enet";
+                       compatible = "apm,xgene1-sgenet";
                        status = "disabled";
                        reg = <0x0 0x1f210000 0x0 0xd100>,
                              <0x0 0x1f200000 0x0 0Xc300>,
                };
 
                xgenet: ethernet@1f610000 {
-                       compatible = "apm,xgene-enet";
+                       compatible = "apm,xgene1-xgenet";
                        status = "disabled";
                        reg = <0x0 0x1f610000 0x0 0xd100>,
                              <0x0 0x1f600000 0x0 0Xc300>,
index 27f32962e55c60f8b0e28407f12746513473926f..4eac8dcea423e2ff50b0101555fa475507769c91 100644 (file)
@@ -34,6 +34,7 @@
                        reg = <0x0 0x0>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@1 {
                        device_type = "cpu";
@@ -41,6 +42,7 @@
                        reg = <0x0 0x1>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@2 {
                        device_type = "cpu";
@@ -48,6 +50,7 @@
                        reg = <0x0 0x2>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@3 {
                        device_type = "cpu";
                        reg = <0x0 0x3>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache0 {
+                       compatible = "cache";
                };
        };
 
index d429129ecb3d03fe3a7460ecd3ed9d02950cb193..133ee59de2d70672db3ca648952006cdfae7c7cb 100644 (file)
@@ -39,6 +39,7 @@
                        reg = <0x0 0x0>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A57_L2>;
                };
 
                A57_1: cpu@1 {
@@ -46,6 +47,7 @@
                        reg = <0x0 0x1>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A57_L2>;
                };
 
                A53_0: cpu@100 {
@@ -53,6 +55,7 @@
                        reg = <0x0 0x100>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
                };
 
                A53_1: cpu@101 {
@@ -60,6 +63,7 @@
                        reg = <0x0 0x101>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
                };
 
                A53_2: cpu@102 {
@@ -67,6 +71,7 @@
                        reg = <0x0 0x102>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
                };
 
                A53_3: cpu@103 {
                        reg = <0x0 0x103>;
                        device_type = "cpu";
                        enable-method = "psci";
+                       next-level-cache = <&A53_L2>;
+               };
+
+               A57_L2: l2-cache0 {
+                       compatible = "cache";
+               };
+
+               A53_L2: l2-cache1 {
+                       compatible = "cache";
                };
        };
 
index efc59b3baf63fb0eb374383a3eb9ebbd73dcbdb8..20addabbd127c89acf70399c5605dfedccb55203 100644 (file)
@@ -37,6 +37,7 @@
                        reg = <0x0 0x0>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@1 {
                        device_type = "cpu";
@@ -44,6 +45,7 @@
                        reg = <0x0 0x1>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@2 {
                        device_type = "cpu";
@@ -51,6 +53,7 @@
                        reg = <0x0 0x2>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
                };
                cpu@3 {
                        device_type = "cpu";
                        reg = <0x0 0x3>;
                        enable-method = "spin-table";
                        cpu-release-addr = <0x0 0x8000fff8>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache0 {
+                       compatible = "cache";
                };
        };
 
index 5720608c50b1b7f969f881b8695a78742868f379..abb79b3cfcfea158cdcaa8ac1ffcbd32699da9b0 100644 (file)
@@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o
 obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
 aes-neon-blk-y := aes-glue-neon.o aes-neon.o
 
-AFLAGS_aes-ce.o                := -DINTERLEAVE=2 -DINTERLEAVE_INLINE
+AFLAGS_aes-ce.o                := -DINTERLEAVE=4
 AFLAGS_aes-neon.o      := -DINTERLEAVE=4
 
 CFLAGS_aes-glue-ce.o   := -DUSE_V8_CRYPTO_EXTENSIONS
index 5901480bfdcaf1cd65aeb5b8ffd31c337ea73aed..750bac4e637e5323f29bd4859b6e655b3a035d95 100644 (file)
@@ -20,6 +20,9 @@
 #error "Only include this from assembly code"
 #endif
 
+#ifndef __ASM_ASSEMBLER_H
+#define __ASM_ASSEMBLER_H
+
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -155,3 +158,5 @@ lr  .req    x30             // link register
 #endif
        orr     \rd, \lbits, \hbits, lsl #32
        .endm
+
+#endif /* __ASM_ASSEMBLER_H */
index 0710654631e789121f7b0d2247b485ebf585685a..c60643f14cda97e7ba6dd9676f10b133af4b0627 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __ASM_CPUIDLE_H
 #define __ASM_CPUIDLE_H
 
+#include <asm/proc-fns.h>
+
 #ifdef CONFIG_CPU_IDLE
 extern int cpu_init_idle(unsigned int cpu);
 extern int cpu_suspend(unsigned long arg);
index e2ff32a93b5cefc2c6fc2866d4d7befed27259fc..d2f49423c5dcbad70f63cbe777d522edbd41da75 100644 (file)
@@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
 __AARCH64_INSN_FUNCS(bics,     0x7F200000, 0x6A200000)
 __AARCH64_INSN_FUNCS(b,                0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,       0xFC000000, 0x94000000)
-__AARCH64_INSN_FUNCS(cbz,      0xFE000000, 0x34000000)
-__AARCH64_INSN_FUNCS(cbnz,     0xFE000000, 0x35000000)
+__AARCH64_INSN_FUNCS(cbz,      0x7F000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz,     0x7F000000, 0x35000000)
+__AARCH64_INSN_FUNCS(tbz,      0x7F000000, 0x36000000)
+__AARCH64_INSN_FUNCS(tbnz,     0x7F000000, 0x37000000)
 __AARCH64_INSN_FUNCS(bcond,    0xFF000010, 0x54000000)
 __AARCH64_INSN_FUNCS(svc,      0xFFE0001F, 0xD4000001)
 __AARCH64_INSN_FUNCS(hvc,      0xFFE0001F, 0xD4000002)
index 94674eb7e7bb3cebaf671ef28ec7de05145bb0c9..54bb4ba974417e269656d50adb524654851fbbd2 100644 (file)
  * 40 bits wide (T0SZ = 24).  Systems with a PARange smaller than 40 bits are
  * not known to exist and will break with this configuration.
  *
+ * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
+ * (see hyp-init.S).
+ *
  * Note that when using 4K pages, we concatenate two first level page tables
  * together.
  *
 #ifdef CONFIG_ARM64_64K_PAGES
 /*
  * Stage2 translation configuration:
- * 40bits output (PS = 2)
  * 40bits input  (T0SZ = 24)
  * 64kB pages (TG0 = 1)
  * 2 level page tables (SL = 1)
 #else
 /*
  * Stage2 translation configuration:
- * 40bits output (PS = 2)
  * 40bits input  (T0SZ = 24)
  * 4kB pages (TG0 = 0)
  * 3 level page tables (SL = 1)
index 6458b53731421343e7640a922a202a2b4c9682be..bbfb600fa82295a8a81c85603254946422b70992 100644 (file)
@@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 #define PTRS_PER_S2_PGD                (1 << PTRS_PER_S2_PGD_SHIFT)
 #define S2_PGD_ORDER           get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
 
+#define kvm_pgd_index(addr)    (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
+
 /*
  * If we are concatenating first level stage-2 page tables, we would have less
  * than or equal to 16 pointers in the fake PGD, because that's what the
@@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 #define KVM_PREALLOC_LEVEL     (0)
 #endif
 
-/**
- * kvm_prealloc_hwpgd - allocate inital table for VTTBR
- * @kvm:       The KVM struct pointer for the VM.
- * @pgd:       The kernel pseudo pgd
- *
- * When the kernel uses more levels of page tables than the guest, we allocate
- * a fake PGD and pre-populate it to point to the next-level page table, which
- * will be the real initial page table pointed to by the VTTBR.
- *
- * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
- * the kernel will use folded pud.  When KVM_PREALLOC_LEVEL==1, we
- * allocate 2 consecutive PUD pages.
- */
-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
-{
-       unsigned int i;
-       unsigned long hwpgd;
-
-       if (KVM_PREALLOC_LEVEL == 0)
-               return 0;
-
-       hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
-       if (!hwpgd)
-               return -ENOMEM;
-
-       for (i = 0; i < PTRS_PER_S2_PGD; i++) {
-               if (KVM_PREALLOC_LEVEL == 1)
-                       pgd_populate(NULL, pgd + i,
-                                    (pud_t *)hwpgd + i * PTRS_PER_PUD);
-               else if (KVM_PREALLOC_LEVEL == 2)
-                       pud_populate(NULL, pud_offset(pgd, 0) + i,
-                                    (pmd_t *)hwpgd + i * PTRS_PER_PMD);
-       }
-
-       return 0;
-}
-
 static inline void *kvm_get_hwpgd(struct kvm *kvm)
 {
        pgd_t *pgd = kvm->arch.pgd;
@@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
        return pmd_offset(pud, 0);
 }
 
-static inline void kvm_free_hwpgd(struct kvm *kvm)
+static inline unsigned int kvm_get_hwpgd_size(void)
 {
-       if (KVM_PREALLOC_LEVEL > 0) {
-               unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
-               free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
-       }
+       if (KVM_PREALLOC_LEVEL > 0)
+               return PTRS_PER_S2_PGD * PAGE_SIZE;
+       return PTRS_PER_S2_PGD * sizeof(pgd_t);
 }
 
 static inline bool kvm_page_empty(void *ptr)
index 16449c535e50fdcf358df430d67f38cce3893df3..800ec0e87ed955bbd38e93e448b19b6b4401625b 100644 (file)
@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-                             PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+                             PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index 9a8fd84f8fb2b67fe4bc2d845e7f6b584cbf0799..941c375616e2072d3b9fbf6442426c3491f00c01 100644 (file)
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
 #include <asm/memory.h>
 
-#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+#define cpu_switch_mm(pgd,mm)                          \
+do {                                                   \
+       BUG_ON(pgd == swapper_pg_dir);                  \
+       cpu_do_switch_mm(virt_to_phys(pgd),mm);         \
+} while (0)
 
 #define cpu_get_pgd()                                  \
 ({                                                     \
index f9be30ea1cbd8bc5b00cf2627c2e0be47ab54d98..20e9591a60cff97c5ef3c93cbadab1aa1312fe09 100644 (file)
@@ -45,7 +45,8 @@
 #define STACK_TOP              STACK_TOP_MAX
 #endif /* CONFIG_COMPAT */
 
-#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
+extern phys_addr_t arm64_dma_phys_limit;
+#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
 #endif /* __KERNEL__ */
 
 struct debug_info {
index c028fe37456feade18c0c5737cf974899a8b6ee4..53d9c354219f9737c4d6e47ee3d1123c8e9588ce 100644 (file)
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
                                  unsigned long addr)
 {
+       __flush_tlb_pgtable(tlb->mm, addr);
        pgtable_page_dtor(pte);
        tlb_remove_entry(tlb, pte);
 }
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
                                  unsigned long addr)
 {
+       __flush_tlb_pgtable(tlb->mm, addr);
        tlb_remove_entry(tlb, virt_to_page(pmdp));
 }
 #endif
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
                                  unsigned long addr)
 {
+       __flush_tlb_pgtable(tlb->mm, addr);
        tlb_remove_entry(tlb, virt_to_page(pudp));
 }
 #endif
index 73f0ce570fb31caa23fe7da9b99edd68d4ef6679..c3bb05b98616789a4143bb6d48333cf818653ab2 100644 (file)
 #include <linux/sched.h>
 #include <asm/cputype.h>
 
-extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
-extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
-
-extern struct cpu_tlb_fns cpu_tlb;
-
 /*
  *     TLB Management
  *     ==============
@@ -148,6 +143,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
                flush_tlb_all();
 }
 
+/*
+ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
+ * table levels (pgd/pud/pmd).
+ */
+static inline void __flush_tlb_pgtable(struct mm_struct *mm,
+                                      unsigned long uaddr)
+{
+       unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+
+       dsb(ishst);
+       asm("tlbi       vae1is, %0" : : "r" (addr));
+       dsb(ish);
+}
 /*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
  */
index bef04afd603190135972256e52981b0889b047bf..5ee07eee80c2b0c1f5ba2b1292420eb396103be0 100644 (file)
@@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg
 arm64-obj-y            := cputable.o debug-monitors.o entry.o irq.o fpsimd.o   \
                           entry-fpsimd.o process.o ptrace.o setup.o signal.o   \
                           sys.o stacktrace.o time.o traps.o io.o vdso.o        \
-                          hyp-stub.o psci.o cpu_ops.o insn.o return_address.o  \
-                          cpuinfo.o cpu_errata.o alternative.o cacheinfo.o
+                          hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o       \
+                          return_address.o cpuinfo.o cpu_errata.o              \
+                          alternative.o cacheinfo.o
 
 arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o         \
                                           sys_compat.o entry32.o               \
index b42c7b480e1ee3da6f3e0896480c7888668bfe1d..ab21e0d58278825e5dfc0199cd18b853f07232c1 100644 (file)
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
 
 static void efi_set_pgd(struct mm_struct *mm)
 {
-       cpu_switch_mm(mm->pgd, mm);
+       if (mm == &init_mm)
+               cpu_set_reserved_ttbr0();
+       else
+               cpu_switch_mm(mm->pgd, mm);
+
        flush_tlb_all();
        if (icache_is_aivivt())
                __flush_icache_all();
@@ -354,3 +358,12 @@ void efi_virtmap_unload(void)
        efi_set_pgd(current->active_mm);
        preempt_enable();
 }
+
+/*
+ * UpdateCapsule() depends on the system being shutdown via
+ * ResetSystem().
+ */
+bool efi_poweroff_required(void)
+{
+       return efi_enabled(EFI_RUNTIME_SERVICES);
+}
index cf8556ae09d04ad0c81855870aa052cef746f8e7..c851be795080336938f4826cc0608234b0e34bfa 100644 (file)
@@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable)
 
        branch = aarch64_insn_gen_branch_imm(pc,
                                             (unsigned long)ftrace_graph_caller,
-                                            AARCH64_INSN_BRANCH_LINK);
+                                            AARCH64_INSN_BRANCH_NOLINK);
        nop = aarch64_insn_gen_nop();
 
        if (enable)
index 8ce88e08c030e16b90d2772d377160a12465fce2..07f930540f4a8b96b520cb630226ce700f79e732 100644 (file)
@@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag)
  * zeroing of .bss would clobber it.
  */
        .pushsection    .data..cacheline_aligned
-ENTRY(__boot_cpu_mode)
        .align  L1_CACHE_SHIFT
+ENTRY(__boot_cpu_mode)
        .long   BOOT_CPU_MODE_EL2
        .long   0
        .popsection
index 27d4864577e5d47cca67f626ea3638b9f3f8b4ec..c8eca88f12e6b2702df24bc758ac99815226827c 100644 (file)
@@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap)
 
        if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
                page = vmalloc_to_page(addr);
-       else
+       else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
                page = virt_to_page(addr);
+       else
+               return addr;
 
        BUG_ON(!page);
        set_fixmap(fixmap, page_to_phys(page));
index fde9923af859c5764110b1accf7a9da567559dd4..c6b1f3b96f4581f329a11805118797afa9556e1d 100644 (file)
@@ -21,6 +21,7 @@
 #include <stdarg.h>
 
 #include <linux/compat.h>
+#include <linux/efi.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -150,6 +151,13 @@ void machine_restart(char *cmd)
        local_irq_disable();
        smp_send_stop();
 
+       /*
+        * UpdateCapsule() depends on the system being reset via
+        * ResetSystem().
+        */
+       if (efi_enabled(EFI_RUNTIME_SERVICES))
+               efi_reboot(reboot_mode, NULL);
+
        /* Now call the architecture specific reboot code. */
        if (arm_pm_restart)
                arm_pm_restart(reboot_mode, cmd);
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
new file mode 100644 (file)
index 0000000..cf83e61
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+
+/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_hvc)
+       hvc     #0
+       ret
+ENDPROC(__invoke_psci_fn_hvc)
+
+/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_smc)
+       smc     #0
+       ret
+ENDPROC(__invoke_psci_fn_smc)
index 3425f311c49ed99588998d3a9b42e035bf592d84..9b8a70ae64a187d2647a10576a789ebba213788c 100644 (file)
@@ -57,6 +57,9 @@ static struct psci_operations psci_ops;
 static int (*invoke_psci_fn)(u64, u64, u64, u64);
 typedef int (*psci_initcall_t)(const struct device_node *);
 
+asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+
 enum psci_function {
        PSCI_FN_CPU_SUSPEND,
        PSCI_FN_CPU_ON,
@@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state,
                        PSCI_0_2_POWER_STATE_AFFL_SHIFT;
 }
 
-/*
- * The following two functions are invoked via the invoke_psci_fn pointer
- * and will not be inlined, allowing us to piggyback on the AAPCS.
- */
-static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
-                                        u64 arg2)
-{
-       asm volatile(
-                       __asmeq("%0", "x0")
-                       __asmeq("%1", "x1")
-                       __asmeq("%2", "x2")
-                       __asmeq("%3", "x3")
-                       "hvc    #0\n"
-               : "+r" (function_id)
-               : "r" (arg0), "r" (arg1), "r" (arg2));
-
-       return function_id;
-}
-
-static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
-                                        u64 arg2)
-{
-       asm volatile(
-                       __asmeq("%0", "x0")
-                       __asmeq("%1", "x1")
-                       __asmeq("%2", "x2")
-                       __asmeq("%3", "x3")
-                       "smc    #0\n"
-               : "+r" (function_id)
-               : "r" (arg0), "r" (arg1), "r" (arg2));
-
-       return function_id;
-}
-
 static int psci_get_version(void)
 {
        int err;
index c20a300e22137f741a236cb8385864b90fac9df4..d26fcd4cd6e6219cae5213b4b329d53d90a86a9b 100644 (file)
@@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
        case __SI_TIMER:
                 err |= __put_user(from->si_tid, &to->si_tid);
                 err |= __put_user(from->si_overrun, &to->si_overrun);
-                err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
-                                  &to->si_ptr);
+                err |= __put_user(from->si_int, &to->si_int);
                break;
        case __SI_POLL:
                err |= __put_user(from->si_band, &to->si_band);
@@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
        case __SI_MESGQ: /* But this is */
                err |= __put_user(from->si_pid, &to->si_pid);
                err |= __put_user(from->si_uid, &to->si_uid);
-               err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
+               err |= __put_user(from->si_int, &to->si_int);
                break;
        case __SI_SYS:
                err |= __put_user((compat_uptr_t)(unsigned long)
index fe652ffd34c28090076b8d8358c6e40f7d77034d..efa79e8d4196d01318779cd76f2e926c40765ae9 100644 (file)
@@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime)
 /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
 ENTRY(__kernel_clock_getres)
        .cfi_startproc
-       cbz     w1, 3f
-
        cmp     w0, #CLOCK_REALTIME
        ccmp    w0, #CLOCK_MONOTONIC, #0x4, ne
        b.ne    1f
@@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres)
        b.ne    4f
        ldr     x2, 6f
 2:
+       cbz     w1, 3f
        stp     xzr, x2, [x1]
 
 3:     /* res == NULL. */
index 0a24b9b8c6982ddc675bbbd40b7baaecc247f1f6..ef7d112f5ce0df9ca1c1a793301c2c1b3874e23f 100644 (file)
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
 }
 early_param("coherent_pool", early_coherent_pool);
 
-static void *__alloc_from_pool(size_t size, struct page **ret_page)
+static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
 {
        unsigned long val;
        void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
 
                *ret_page = phys_to_page(phys);
                ptr = (void *)val;
+               if (flags & __GFP_ZERO)
+                       memset(ptr, 0, size);
        }
 
        return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
                flags |= GFP_DMA;
        if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
                struct page *page;
+               void *addr;
 
                size = PAGE_ALIGN(size);
                page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
                        return NULL;
 
                *dma_handle = phys_to_dma(dev, page_to_phys(page));
-               return page_address(page);
+               addr = page_address(page);
+               if (flags & __GFP_ZERO)
+                       memset(addr, 0, size);
+               return addr;
        } else {
                return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
        }
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
 
        if (!coherent && !(flags & __GFP_WAIT)) {
                struct page *page = NULL;
-               void *addr = __alloc_from_pool(size, &page);
+               void *addr = __alloc_from_pool(size, &page, flags);
 
                if (addr)
                        *dma_handle = phys_to_dma(dev, page_to_phys(page));
@@ -348,8 +354,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
        .mapping_error = swiotlb_dma_mapping_error,
 };
 
-extern int swiotlb_late_init_with_default_size(size_t default_size);
-
 static int __init atomic_pool_init(void)
 {
        pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -411,21 +415,13 @@ out:
        return -ENOMEM;
 }
 
-static int __init swiotlb_late_init(void)
+static int __init arm64_dma_init(void)
 {
-       size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+       int ret;
 
        dma_ops = &swiotlb_dma_ops;
 
-       return swiotlb_late_init_with_default_size(swiotlb_size);
-}
-
-static int __init arm64_dma_init(void)
-{
-       int ret = 0;
-
-       ret |= swiotlb_late_init();
-       ret |= atomic_pool_init();
+       ret = atomic_pool_init();
 
        return ret;
 }
index 71145f952070ebcd6067fcb49c5ad29e5c2ec11f..ae85da6307bb921e286bb7a99a218cab5dbe137e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 #include <linux/efi.h>
+#include <linux/swiotlb.h>
 
 #include <asm/fixmap.h>
 #include <asm/memory.h>
@@ -45,6 +46,7 @@
 #include "mm.h"
 
 phys_addr_t memstart_addr __read_mostly = 0;
+phys_addr_t arm64_dma_phys_limit __read_mostly;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 static int __init early_initrd(char *p)
@@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               max_dma = PFN_DOWN(max_zone_dma_phys());
+               max_dma = PFN_DOWN(arm64_dma_phys_limit);
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -156,8 +158,6 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
-       phys_addr_t dma_phys_limit = 0;
-
        memblock_enforce_memory_limit(memory_limit);
 
        /*
@@ -174,8 +174,10 @@ void __init arm64_memblock_init(void)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
-               dma_phys_limit = max_zone_dma_phys();
-       dma_contiguous_reserve(dma_phys_limit);
+               arm64_dma_phys_limit = max_zone_dma_phys();
+       else
+               arm64_dma_phys_limit = PHYS_MASK + 1;
+       dma_contiguous_reserve(arm64_dma_phys_limit);
 
        memblock_allow_resize();
        memblock_dump_all();
@@ -276,6 +278,8 @@ static void __init free_unused_memmap(void)
  */
 void __init mem_init(void)
 {
+       swiotlb_init(1);
+
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
index bb0ea94c4ba1a563383f92f0f6f212c770b82d23..1d3ec3ddd84b72e654819b7818d03cbaf082815e 100644 (file)
@@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages,
                WARN_ON_ONCE(1);
        }
 
-       if (!is_module_address(start) || !is_module_address(end - 1))
+       if (start < MODULES_VADDR || start >= MODULES_END)
+               return -EINVAL;
+
+       if (end < MODULES_VADDR || end >= MODULES_END)
                return -EINVAL;
 
        data.set_mask = set_mask;
diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/arch/blackfin/include/asm/bfin_rotary.h
deleted file mode 100644 (file)
index 8895a75..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * board initialization should put one of these structures into platform_data
- * and place the bfin-rotary onto platform_bus named "bfin-rotary".
- *
- * Copyright 2008-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _BFIN_ROTARY_H
-#define _BFIN_ROTARY_H
-
-/* mode bitmasks */
-#define ROT_QUAD_ENC   CNTMODE_QUADENC /* quadrature/grey code encoder mode */
-#define ROT_BIN_ENC    CNTMODE_BINENC  /* binary encoder mode */
-#define ROT_UD_CNT     CNTMODE_UDCNT   /* rotary counter mode */
-#define ROT_DIR_CNT    CNTMODE_DIRCNT  /* direction counter mode */
-
-#define ROT_DEBE       DEBE            /* Debounce Enable */
-
-#define ROT_CDGINV     CDGINV          /* CDG Pin Polarity Invert */
-#define ROT_CUDINV     CUDINV          /* CUD Pin Polarity Invert */
-#define ROT_CZMINV     CZMINV          /* CZM Pin Polarity Invert */
-
-struct bfin_rotary_platform_data {
-       /* set rotary UP KEY_### or BTN_### in case you prefer
-        * bfin-rotary to send EV_KEY otherwise set 0
-        */
-       unsigned int rotary_up_key;
-       /* set rotary DOWN KEY_### or BTN_### in case you prefer
-        * bfin-rotary to send EV_KEY otherwise set 0
-        */
-       unsigned int rotary_down_key;
-       /* set rotary BUTTON KEY_### or BTN_### */
-       unsigned int rotary_button_key;
-       /* set rotary Relative Axis REL_### in case you prefer
-        * bfin-rotary to send EV_REL otherwise set 0
-        */
-       unsigned int rotary_rel_code;
-       unsigned short debounce;        /* 0..17 */
-       unsigned short mode;
-       unsigned short pm_wakeup;
-};
-
-/* CNT_CONFIG bitmasks */
-#define CNTE           (1 << 0)        /* Counter Enable */
-#define DEBE           (1 << 1)        /* Debounce Enable */
-#define CDGINV         (1 << 4)        /* CDG Pin Polarity Invert */
-#define CUDINV         (1 << 5)        /* CUD Pin Polarity Invert */
-#define CZMINV         (1 << 6)        /* CZM Pin Polarity Invert */
-#define CNTMODE_SHIFT  8
-#define CNTMODE                (0x7 << CNTMODE_SHIFT)  /* Counter Operating Mode */
-#define ZMZC           (1 << 1)        /* CZM Zeroes Counter Enable */
-#define BNDMODE_SHIFT  12
-#define BNDMODE                (0x3 << BNDMODE_SHIFT)  /* Boundary register Mode */
-#define INPDIS         (1 << 15)       /* CUG and CDG Input Disable */
-
-#define CNTMODE_QUADENC        (0 << CNTMODE_SHIFT)    /* quadrature encoder mode */
-#define CNTMODE_BINENC (1 << CNTMODE_SHIFT)    /* binary encoder mode */
-#define CNTMODE_UDCNT  (2 << CNTMODE_SHIFT)    /* up/down counter mode */
-#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT)    /* direction counter mode */
-#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT)    /* direction timer mode */
-
-#define BNDMODE_COMP   (0 << BNDMODE_SHIFT)    /* boundary compare mode */
-#define BNDMODE_ZERO   (1 << BNDMODE_SHIFT)    /* boundary compare and zero mode */
-#define BNDMODE_CAPT   (2 << BNDMODE_SHIFT)    /* boundary capture mode */
-#define BNDMODE_AEXT   (3 << BNDMODE_SHIFT)    /* boundary auto-extend mode */
-
-/* CNT_IMASK bitmasks */
-#define ICIE           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Enable */
-#define UCIE           (1 << 1)        /* Up count Interrupt Enable */
-#define DCIE           (1 << 2)        /* Down count Interrupt Enable */
-#define MINCIE         (1 << 3)        /* Min Count Interrupt Enable */
-#define MAXCIE         (1 << 4)        /* Max Count Interrupt Enable */
-#define COV31IE                (1 << 5)        /* Bit 31 Overflow Interrupt Enable */
-#define COV15IE                (1 << 6)        /* Bit 15 Overflow Interrupt Enable */
-#define CZEROIE                (1 << 7)        /* Count to Zero Interrupt Enable */
-#define CZMIE          (1 << 8)        /* CZM Pin Interrupt Enable */
-#define CZMEIE         (1 << 9)        /* CZM Error Interrupt Enable */
-#define CZMZIE         (1 << 10)       /* CZM Zeroes Counter Interrupt Enable */
-
-/* CNT_STATUS bitmasks */
-#define ICII           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Identifier */
-#define UCII           (1 << 1)        /* Up count Interrupt Identifier */
-#define DCII           (1 << 2)        /* Down count Interrupt Identifier */
-#define MINCII         (1 << 3)        /* Min Count Interrupt Identifier */
-#define MAXCII         (1 << 4)        /* Max Count Interrupt Identifier */
-#define COV31II                (1 << 5)        /* Bit 31 Overflow Interrupt Identifier */
-#define COV15II                (1 << 6)        /* Bit 15 Overflow Interrupt Identifier */
-#define CZEROII                (1 << 7)        /* Count to Zero Interrupt Identifier */
-#define CZMII          (1 << 8)        /* CZM Pin Interrupt Identifier */
-#define CZMEII         (1 << 9)        /* CZM Error Interrupt Identifier */
-#define CZMZII         (1 << 10)       /* CZM Zeroes Counter Interrupt Identifier */
-
-/* CNT_COMMAND bitmasks */
-#define W1LCNT         0xf             /* Load Counter Register */
-#define W1LMIN         0xf0            /* Load Min Register */
-#define W1LMAX         0xf00           /* Load Max Register */
-#define W1ZMONCE       (1 << 12)       /* Enable CZM Clear Counter Once */
-
-#define W1LCNT_ZERO    (1 << 0)        /* write 1 to load CNT_COUNTER with zero */
-#define W1LCNT_MIN     (1 << 2)        /* write 1 to load CNT_COUNTER from CNT_MIN */
-#define W1LCNT_MAX     (1 << 3)        /* write 1 to load CNT_COUNTER from CNT_MAX */
-
-#define W1LMIN_ZERO    (1 << 4)        /* write 1 to load CNT_MIN with zero */
-#define W1LMIN_CNT     (1 << 5)        /* write 1 to load CNT_MIN from CNT_COUNTER */
-#define W1LMIN_MAX     (1 << 7)        /* write 1 to load CNT_MIN from CNT_MAX */
-
-#define W1LMAX_ZERO    (1 << 8)        /* write 1 to load CNT_MAX with zero */
-#define W1LMAX_CNT     (1 << 9)        /* write 1 to load CNT_MAX from CNT_COUNTER */
-#define W1LMAX_MIN     (1 << 10)       /* write 1 to load CNT_MAX from CNT_MIN */
-
-/* CNT_DEBOUNCE bitmasks */
-#define DPRESCALE      0xf             /* Load Counter Register */
-
-#endif
index 9501bd8d9cd193e94f5a7a1896fb3ab8b1cb27fe..68f2a8a806ead46ab1567d0c724c5109378da264 100644 (file)
@@ -666,7 +666,14 @@ static struct platform_device bfin_sport1_uart_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+       P_CNT_CUD,
+       P_CNT_CDG,
+       P_CNT_CZM,
+       0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -676,9 +683,15 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
        .debounce          = 10,        /* 0..17 */
        .mode              = ROT_QUAD_ENC | ROT_DEBE,
        .pm_wakeup         = 1,
+       .pin_list          = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index d64f565dc2a0aaafcbaf53af5f33b1738d7ef2f7..d4219e8e5ab865fb196d4689ed315bdf2d255e85 100644 (file)
@@ -1092,7 +1092,14 @@ static struct platform_device bfin_device_gpiokeys = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+       P_CNT_CUD,
+       P_CNT_CDG,
+       P_CNT_CZM,
+       0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -1102,9 +1109,15 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
        .debounce          = 10,        /* 0..17 */
        .mode              = ROT_QUAD_ENC | ROT_DEBE,
        .pm_wakeup         = 1,
+       .pin_list          = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index 1fe7ff286619f693c113faeac592ce3167d8de72..4204b9842532134e7a657bd0d7f9b5c8f1aeb7d7 100644 (file)
@@ -159,7 +159,7 @@ static struct platform_device bf54x_kpad_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -172,6 +172,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index e2c0b024ce88f2593f4551b711b3821d8ae3879e..7f9fc272ec30576827ad62922afb0ff799ab89c3 100644 (file)
@@ -75,7 +75,7 @@ static struct platform_device bfin_isp1760_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -87,6 +87,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index 78d4483ba40c83fcd458fcda183c5b3d6a828267..ec4db6df5e0dde575278b3d11a6a90a2f5ffb224 100644 (file)
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page;
  */
 #define pgtable_cache_init()   do { } while (0)
 
+/*
+ * c6x is !MMU, so define the simpliest implementation
+ */
+#define pgprot_writecombine pgprot_noncached
+
 #include <asm-generic/pgtable.h>
 
 #endif /* _ASM_C6X_PGTABLE_H */
index 93bcf2abd1a15d4df394ed2a6b2d203dcbea620a..07d7a7ef8bd59c7387328e6add1b6426b0b51a52 100644 (file)
@@ -123,12 +123,14 @@ extern unsigned long empty_zero_page;
 #define PGDIR_MASK             (~(PGDIR_SIZE - 1))
 #define PTRS_PER_PGD           64
 
+#define __PAGETABLE_PUD_FOLDED
 #define PUD_SHIFT              26
 #define PTRS_PER_PUD           1
 #define PUD_SIZE               (1UL << PUD_SHIFT)
 #define PUD_MASK               (~(PUD_SIZE - 1))
 #define PUE_SIZE               256
 
+#define __PAGETABLE_PMD_FOLDED
 #define PMD_SHIFT              26
 #define PMD_SIZE               (1UL << PMD_SHIFT)
 #define PMD_MASK               (~(PMD_SIZE - 1))
index 8fd8ee70266a13cb61646b3b59530ee4181291cf..421e6ba3a173794d5a9cc8ba3b0465f2056a328f 100644 (file)
@@ -13,6 +13,7 @@
  * the M32R is two-level, so we don't really have any
  * PMD directory physically.
  */
+#define __PAGETABLE_PMD_FOLDED
 #define PMD_SHIFT      22
 #define PTRS_PER_PMD   1
 
index 28a145bfbb7151a567dd825026eac10f07845e74..35ed4a9981aefb627ed785ce311198c59000c396 100644 (file)
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE   512
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   1024
 #else
index 881071c0794221475308604464cd7bd441b95945..13272fd5a5baec8e3b1a4de778a6982abf0adae7 100644 (file)
@@ -149,8 +149,8 @@ extern void exit_thread(void);
 
 unsigned long get_wchan(struct task_struct *p);
 
-#define        KSTK_EIP(tsk)   ((tsk)->thread.kernel_context->CurrPC)
-#define        KSTK_ESP(tsk)   ((tsk)->thread.kernel_context->AX[0].U0)
+#define        KSTK_EIP(tsk)   (task_pt_regs(tsk)->ctx.CurrPC)
+#define        KSTK_ESP(tsk)   (task_pt_regs(tsk)->ctx.AX[0].U0)
 
 #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
 
index 0536bc021cc6c66667f0cbb66601c86b0ace9c10..ef548510b951b306d7b70cb941d08dc0dec3e4dd 100644 (file)
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception):
  * The LP register should point to the location where the called function
  * should return.  [note that MAKE_SYS_CALL uses label 1] */
        /* See if the system call number is valid */
+       blti    r12, 5f
        addi    r11, r12, -__NR_syscalls;
-       bgei    r11,5f;
+       bgei    r11, 5f;
        /* Figure out which function to use for this system call.  */
        /* Note Microblaze barrel shift is optional, so don't rely on it */
        add     r12, r12, r12;                  /* convert num -> ptr */
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception):
 
        /* The syscall number is invalid, return an error.  */
 5:
-       rtsd    r15, 8;         /* looks like a normal subroutine return */
+       braid   ret_from_trap
        addi    r3, r0, -ENOSYS;
 
 /* Entry point used to return from a syscall/trap */
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap):
        bri     1b
 
        /* Maybe handle a signal */
-5:     
+5:
        andi    r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
        beqi    r11, 4f;                /* Signals to handle, handle them */
 
index 843713c05b79fe69f8bbc057a17a5e200dfc54da..c7a16904cd03c705333f645419ec07888fc6fd87 100644 (file)
@@ -54,6 +54,7 @@ config MIPS
        select CPU_PM if CPU_IDLE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_BINFMT_ELF_STATE
+       select SYSCTL_EXCEPTION_TRACE
 
 menu "Machine selection"
 
@@ -376,8 +377,10 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS32_R3_5
+       select SYS_HAS_CPU_MIPS32_R6
        select SYS_HAS_CPU_MIPS64_R1
        select SYS_HAS_CPU_MIPS64_R2
+       select SYS_HAS_CPU_MIPS64_R6
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_SUPPORTS_32BIT_KERNEL
@@ -1033,6 +1036,9 @@ config MIPS_MACHINE
 config NO_IOPORT_MAP
        def_bool n
 
+config GENERIC_CSUM
+       bool
+
 config GENERIC_ISA_DMA
        bool
        select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1146,6 +1152,9 @@ config SOC_PNX8335
        bool
        select SOC_PNX833X
 
+config MIPS_SPRAM
+       bool
+
 config SWAP_IO_SPACE
        bool
 
@@ -1304,6 +1313,22 @@ config CPU_MIPS32_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
+config CPU_MIPS32_R6
+       bool "MIPS32 Release 6 (EXPERIMENTAL)"
+       depends on SYS_HAS_CPU_MIPS32_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       select HAVE_KVM
+       select MIPS_O32_FP64_SUPPORT
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.  New MIPS processors, starting with the Warrior
+         family, are based on a MIPS32r6 processor. If you own an older
+         processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
 config CPU_MIPS64_R1
        bool "MIPS64 Release 1"
        depends on SYS_HAS_CPU_MIPS64_R1
@@ -1339,6 +1364,21 @@ config CPU_MIPS64_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
+config CPU_MIPS64_R6
+       bool "MIPS64 Release 6 (EXPERIMENTAL)"
+       depends on SYS_HAS_CPU_MIPS64_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_64BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS64 architecture.  New MIPS processors, starting with the Warrior
+         family, are based on a MIPS64r6 processor. If you own an older
+         processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
+
 config CPU_R3000
        bool "R3000"
        depends on SYS_HAS_CPU_R3000
@@ -1539,7 +1579,7 @@ endchoice
 config CPU_MIPS32_3_5_FEATURES
        bool "MIPS32 Release 3.5 Features"
        depends on SYS_HAS_CPU_MIPS32_R3_5
-       depends on CPU_MIPS32_R2
+       depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture including features from the 3.5 release such as
@@ -1659,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2
 config SYS_HAS_CPU_MIPS32_R3_5
        bool
 
+config SYS_HAS_CPU_MIPS32_R6
+       bool
+
 config SYS_HAS_CPU_MIPS64_R1
        bool
 
 config SYS_HAS_CPU_MIPS64_R2
        bool
 
+config SYS_HAS_CPU_MIPS64_R6
+       bool
+
 config SYS_HAS_CPU_R3000
        bool
 
@@ -1764,11 +1810,11 @@ endmenu
 #
 config CPU_MIPS32
        bool
-       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
 
 config CPU_MIPS64
        bool
-       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
 
 #
 # These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1780,6 +1826,12 @@ config CPU_MIPSR1
 config CPU_MIPSR2
        bool
        default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+       select MIPS_SPRAM
+
+config CPU_MIPSR6
+       bool
+       default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+       select MIPS_SPRAM
 
 config EVA
        bool
@@ -2013,6 +2065,19 @@ config MIPS_MT_FPAFF
        default y
        depends on MIPS_MT_SMP
 
+config MIPSR2_TO_R6_EMULATOR
+       bool "MIPS R2-to-R6 emulator"
+       depends on CPU_MIPSR6 && !SMP
+       default y
+       help
+         Choose this option if you want to run non-R6 MIPS userland code.
+         Even if you say 'Y' here, the emulator will still be disabled by
+         default. You can enable it using the 'mipsr2emul' kernel option.
+         The only reason this is a build-time option is to save ~14K from the
+         final kernel image.
+comment "MIPS R2-to-R6 emulator is only available for UP kernels"
+       depends on SMP && CPU_MIPSR6
+
 config MIPS_VPE_LOADER
        bool "VPE loader support."
        depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2148,7 +2213,7 @@ config CPU_HAS_SMARTMIPS
          here.
 
 config CPU_MICROMIPS
-       depends on 32BIT && SYS_SUPPORTS_MICROMIPS
+       depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
        bool "microMIPS"
        help
          When this option is enabled the kernel will be built using the
index 88a9f433f6fc3ca7affc6c9a011622d26e10ada9..3a2b775e845893513e2ab187ca95955e0be17848 100644 (file)
@@ -122,17 +122,4 @@ config SPINLOCK_TEST
        help
          Add several files to the debugfs to test spinlock speed.
 
-config FP32XX_HYBRID_FPRS
-       bool "Run FP32 & FPXX code with hybrid FPRs"
-       depends on MIPS_O32_FP64_SUPPORT
-       help
-         The hybrid FPR scheme is normally used only when a program needs to
-         execute a mix of FP32 & FP64A code, since the trapping & emulation
-         that it entails is expensive. When enabled, this option will lead
-         to the kernel running programs which use the FP32 & FPXX FP ABIs
-         using the hybrid FPR scheme, which can be useful for debugging
-         purposes.
-
-         If unsure, say N.
-
 endmenu
index 2563a088d3b867037fa4f46332ca010f6984547d..8f57fc72d62c8334d35e91ca48f1aa6ead7d08d4 100644 (file)
@@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
-# For smartmips configurations, there are hundreds of warnings due to ISA overrides
-# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
-# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
-# similar directives in the kernel will spam the build logs with the following warnings:
-# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
-# or
-# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
-# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
-# been fixed properly.
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips) -Wa,--no-warn
-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
-
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
-
-ifeq ($(CONFIG_CPU_HAS_MSA),y)
-toolchain-msa  := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
-cflags-$(toolchain-msa)                += -DTOOLCHAIN_SUPPORTS_MSA
-endif
-
 #
 # CPU-dependent compiler/assembler options for optimization.
 #
@@ -156,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1)    += $(call cc-option,-march=mips32,-mips32 -U_MIPS
                        -Wa,-mips32 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
                        -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)     += -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)     += $(call cc-option,-march=r5400,-march=r5000) \
                        -Wa,--trap
@@ -182,6 +166,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
 endif
 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
 cflags-$(CONFIG_CPU_BMIPS)     += -march=mips32 -Wa,-mips32 -Wa,--trap
+#
+# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+# as MIPS64 R1; older versions as just R1.  This leaves the possibility open
+# that GCC might generate R2 code for -march=loongson3a which then is rejected
+# by GAS.  The cc-option can't probe for this behaviour so -march=loongson3a
+# can't easily be used safely within the kbuild framework.
+#
+cflags-$(CONFIG_CPU_LOONGSON3)  +=                                     \
+       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+       -Wa,-mips64r2 -Wa,--trap
 
 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
@@ -194,6 +188,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
 endif
 endif
 
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+mips-cflags                            := "$(cflags-y)"
+cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
+cflags-$(CONFIG_CPU_MICROMIPS)         += $(call cc-option,$(mips-cflags),-mmicromips)
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa                          := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
+cflags-$(toolchain-msa)                        += -DTOOLCHAIN_SUPPORTS_MSA
+endif
+
 #
 # Firmware support
 #
@@ -287,7 +298,11 @@ boot-y                     += vmlinux.ecoff
 boot-y                 += vmlinux.srec
 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
 boot-y                 += uImage
+boot-y                 += uImage.bin
+boot-y                 += uImage.bz2
 boot-y                 += uImage.gz
+boot-y                 += uImage.lzma
+boot-y                 += uImage.lzo
 endif
 
 # compressed boot image targets (arch/mips/boot/compressed/)
@@ -386,7 +401,11 @@ define archhelp
        echo '  vmlinuz.bin          - Raw binary zboot image'
        echo '  vmlinuz.srec         - SREC zboot image'
        echo '  uImage               - U-Boot image'
+       echo '  uImage.bin           - U-Boot image (uncompressed)'
+       echo '  uImage.bz2           - U-Boot image (bz2)'
        echo '  uImage.gz            - U-Boot image (gzip)'
+       echo '  uImage.lzma          - U-Boot image (lzma)'
+       echo '  uImage.lzo           - U-Boot image (lzo)'
        echo '  dtbs                 - Device-tree blobs for enabled boards'
        echo
        echo '  These will be default as appropriate for a configured platform.'
index 48a9dfc55b51aa4a3819bae6c686b0cd5f38a7f0..6a98d2cb402ccb9c69ad505d819ed3f82f6e4932 100644 (file)
@@ -127,12 +127,20 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
                t = 396000000;
        else {
                t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
+               if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
+                       t &= 0x3f;
                t *= parent_rate;
        }
 
        return t;
 }
 
+void __init alchemy_set_lpj(void)
+{
+       preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
+       preset_lpj /= 2 * HZ;
+}
+
 static struct clk_ops alchemy_clkops_cpu = {
        .recalc_rate    = alchemy_clk_cpu_recalc,
 };
@@ -315,17 +323,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
 
 /* lrclk: external synchronous static bus clock ***********************/
 
-static struct clk __init *alchemy_clk_setup_lrclk(const char *pn)
+static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
 {
-       /* MEM_STCFG0[15:13] = divisor.
+       /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
+        * otherwise lrclk=pclk/4.
+        * All other variants: MEM_STCFG0[15:13] = divisor.
         * L/RCLK = periph_clk / (divisor + 1)
         * On Au1000, Au1500, Au1100 it's called LCLK,
         * on later models it's called RCLK, but it's the same thing.
         */
        struct clk *c;
-       unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13;
+       unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
 
-       v = (v & 7) + 1;
+       switch (t) {
+       case ALCHEMY_CPU_AU1000:
+       case ALCHEMY_CPU_AU1500:
+               v = 4 + ((v >> 11) & 1);
+               break;
+       default:        /* all other models */
+               v = ((v >> 13) & 7) + 1;
+       }
        c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
                                      pn, 0, 1, v);
        if (!IS_ERR(c))
@@ -546,6 +563,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
 }
 
 static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -678,6 +697,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
 }
 
 static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -897,6 +918,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
 }
 
 static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -1060,7 +1083,7 @@ static int __init alchemy_clk_init(void)
        ERRCK(c)
 
        /* L/RCLK: external static bus clock for synchronous mode */
-       c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK);
+       c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
        ERRCK(c)
 
        /* Frequency dividers 0-5 */
index 4e72daf12c325063a62da8c9255cee504a4e5fd4..2902138b3e0f56f639896c571e5bc87172f74d2f 100644 (file)
 #include <au1000.h>
 
 extern void __init board_setup(void);
-extern void set_cpuspec(void);
+extern void __init alchemy_set_lpj(void);
 
 void __init plat_mem_setup(void)
 {
+       alchemy_set_lpj();
+
        if (au1xxx_cpu_needs_config_od())
                /* Various early Au1xx0 errata corrected by this */
                set_c0_config(1 << 19); /* Set Config[OD] */
index 0fb5134fb83247a2395162ffc3b93597cc7d7614..fd94fe849af680f3edc24a3dcb184c308fa0613e 100644 (file)
@@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node,
 
 static struct of_device_id of_irq_ids[] __initdata = {
        { .compatible = "mti,cpu-interrupt-controller",
-         .data = mips_cpu_intc_init },
+         .data = mips_cpu_irq_of_init },
        { .compatible = "brcm,bcm3384-intc",
          .data = intc_of_init },
        {},
index 1466c00260936c7e387877c8c9e296d430cd6240..acb1988f354edc58072399a076656c0f2ffd149e 100644 (file)
@@ -23,6 +23,12 @@ strip-flags   := $(addprefix --remove-section=,$(drop-sections))
 
 hostprogs-y := elf2ecoff
 
+suffix-y                       := bin
+suffix-$(CONFIG_KERNEL_BZIP2)  := bz2
+suffix-$(CONFIG_KERNEL_GZIP)   := gz
+suffix-$(CONFIG_KERNEL_LZMA)   := lzma
+suffix-$(CONFIG_KERNEL_LZO)    := lzo
+
 targets := vmlinux.ecoff
 quiet_cmd_ecoff = ECOFF          $@
       cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
@@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE
 UIMAGE_LOADADDR  = $(VMLINUX_LOAD_ADDRESS)
 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
 
+#
+# Compressed vmlinux images
+#
+
+extra-y += vmlinux.bin.bz2
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
+extra-y += vmlinux.bin.lzo
+
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,bzip2)
+
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
 
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzma)
+
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzo)
+
+#
+# Compressed u-boot images
+#
+
+targets += uImage
+targets += uImage.bin
+targets += uImage.bz2
 targets += uImage.gz
+targets += uImage.lzma
+targets += uImage.lzo
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,uimage,none)
+
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
+       $(call if_changed,uimage,bzip2)
+
 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-targets += uImage
-$(obj)/uImage: $(obj)/uImage.gz FORCE
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+       $(call if_changed,uimage,lzma)
+
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
+       $(call if_changed,uimage,lzo)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
        @echo '  Image $@ is ready'
index 2a4c52e27f416e146e5c268edad9fd867e79c5fe..266c8137e859d418faed5e6fa3a8549b0aeff9df 100644 (file)
@@ -268,7 +268,6 @@ int main(int argc, char *argv[])
        Elf32_Ehdr ex;
        Elf32_Phdr *ph;
        Elf32_Shdr *sh;
-       char *shstrtab;
        int i, pad;
        struct sect text, data, bss;
        struct filehdr efh;
@@ -336,9 +335,6 @@ int main(int argc, char *argv[])
                                     "sh");
        if (must_convert_endian)
                convert_elf_shdrs(sh, ex.e_shnum);
-       /* Read in the section string table. */
-       shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset,
-                           sh[ex.e_shstrndx].sh_size, "shstrtab");
 
        /* Figure out if we can cram the program header into an ECOFF
           header...  Basically, we can't handle anything but loadable
index b752c4ed0b797938c6ff58e7e1583982cdbdd5ac..1882e6475dd093d546c70cf9f7345f8121589921 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-ipd-defs.h>
 #include <asm/octeon/cvmx-mio-defs.h>
-
+#include <asm/octeon/cvmx-rst-defs.h>
 
 static u64 f;
 static u64 rdiv;
@@ -39,11 +39,20 @@ void __init octeon_setup_delays(void)
 
        if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
                union cvmx_mio_rst_boot rst_boot;
+
                rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
                rdiv = rst_boot.s.c_mul;        /* CPU clock */
                sdiv = rst_boot.s.pnr_mul;      /* I/O clock */
                f = (0x8000000000000000ull / sdiv) * 2;
+       } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
+               union cvmx_rst_boot rst_boot;
+
+               rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+               rdiv = rst_boot.s.c_mul;        /* CPU clock */
+               sdiv = rst_boot.s.pnr_mul;      /* I/O clock */
+               f = (0x8000000000000000ull / sdiv) * 2;
        }
+
 }
 
 /*
index 3778655c4a375215fddea8fd0969960d096d0d9c..7d8987818ccf51ed6aa82463fbe3de5dd599557e 100644 (file)
@@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void)
                        continue;
 
                /* These addresses map low for PCI. */
-               if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX))
+               if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
                        continue;
 
                addr_size += e->size;
@@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void)
 #endif
 #ifdef CONFIG_USB_OCTEON_OHCI
        /* OCTEON II ohci is only 32-bit. */
-       if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul)
+       if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
                swiotlbsize = 64 * (1<<20);
 #endif
        swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
index 5dfef84b95767502a8cf9f5b4578b6f97a3bab76..9eb0feef441721362ee5e699e2788c1ffaf3b391 100644 (file)
@@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
                break;
        }
        /* Most boards except NIC10e use a 12MHz crystal */
-       if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+       if (OCTEON_IS_OCTEON2())
                return USB_CLOCK_TYPE_CRYSTAL_12;
        return USB_CLOCK_TYPE_REF_48;
 }
index 2bc4aa95944e462d84673bb974e2dde119fb6bdf..10f762557b925d419de87351836f25db4004f04f 100644 (file)
@@ -3,12 +3,14 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2012 Cavium, Inc.
+ * Copyright (C) 2004-2014 Cavium, Inc.
  */
 
+#include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/bitops.h>
+#include <linux/of_irq.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
@@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
 
+struct octeon_irq_ciu_domain_data {
+       int num_sum;  /* number of sum registers (2 or 3). */
+};
+
 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
 
-union octeon_ciu_chip_data {
-       void *p;
-       unsigned long l;
-       struct {
-               unsigned long line:6;
-               unsigned long bit:6;
-               unsigned long gpio_line:6;
-       } s;
+struct octeon_ciu_chip_data {
+       union {
+               struct {                /* only used for ciu3 */
+                       u64 ciu3_addr;
+                       unsigned int intsn;
+               };
+               struct {                /* only used for ciu/ciu2 */
+                       u8 line;
+                       u8 bit;
+                       u8 gpio_line;
+               };
+       };
+       int current_cpu;        /* Next CPU expected to take this irq */
 };
 
 struct octeon_core_chip_data {
@@ -45,27 +56,40 @@ struct octeon_core_chip_data {
 
 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
 
-static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
-                                      struct irq_chip *chip,
-                                      irq_flow_handler_t handler)
+static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
+                                     struct irq_chip *chip,
+                                     irq_flow_handler_t handler)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       if (!cd)
+               return -ENOMEM;
 
        irq_set_chip_and_handler(irq, chip, handler);
 
-       cd.l = 0;
-       cd.s.line = line;
-       cd.s.bit = bit;
-       cd.s.gpio_line = gpio_line;
+       cd->line = line;
+       cd->bit = bit;
+       cd->gpio_line = gpio_line;
 
-       irq_set_chip_data(irq, cd.p);
+       irq_set_chip_data(irq, cd);
        octeon_irq_ciu_to_irq[line][bit] = irq;
+       return 0;
 }
 
-static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
-                                        int irq, int line, int bit)
+static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
 {
-       irq_domain_associate(domain, irq, line << 6 | bit);
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+       irq_set_chip_data(irq, NULL);
+       kfree(cd);
+}
+
+static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+                                       int irq, int line, int bit)
+{
+       return irq_domain_associate(domain, irq, line << 6 | bit);
 }
 
 static int octeon_coreid_for_cpu(int cpu)
@@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data)
 #ifdef CONFIG_SMP
        int cpu;
        int weight = cpumask_weight(data->affinity);
+       struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
        if (weight > 1) {
-               cpu = smp_processor_id();
+               cpu = cd->current_cpu;
                for (;;) {
                        cpu = cpumask_next(cpu, data->affinity);
                        if (cpu >= nr_cpu_ids) {
@@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data)
        } else {
                cpu = smp_processor_id();
        }
+       cd->current_cpu = cpu;
        return cpu;
 #else
        return smp_processor_id();
@@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
        int coreid = octeon_coreid_for_cpu(cpu);
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
        } else {
                pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
 {
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
        } else {
                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
 {
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
        } else {
                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data)
        unsigned long flags;
        unsigned long *pen;
        int cpu;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        for_each_online_cpu(cpu) {
                int coreid = octeon_coreid_for_cpu(cpu);
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                raw_spin_lock_irqsave(lock, flags);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
                 */
                wmb();
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data)
        unsigned long flags;
        unsigned long *pen;
        int cpu;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        for_each_online_cpu(cpu) {
                int coreid = octeon_coreid_for_cpu(cpu);
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                raw_spin_lock_irqsave(lock, flags);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
                 */
                wmb();
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -397,26 +423,87 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 {
        u64 mask;
        int cpu = next_cpu_for_irq(data);
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
        /*
         * Called under the desc lock, so these should never get out
         * of sync.
         */
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = octeon_coreid_for_cpu(cpu) * 2;
-               set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+               set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
        } else {
                int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-               set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+               set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
        }
 }
 
+/*
+ * Enable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+}
+
+/*
+ * Disable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+}
+
+static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
+}
+
+static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
+{
+       int cpu;
+       struct octeon_ciu_chip_data *cd;
+       u64 mask;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       for_each_online_cpu(cpu) {
+               int coreid = octeon_coreid_for_cpu(cpu);
+
+               cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
+       }
+}
+
 /*
  * Enable the irq on the current CPU for chips that
  * have the EN*_W1{S,C} registers.
@@ -424,18 +511,18 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
-               set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+               set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
        } else {
                int index = cvmx_get_core_num() * 2 + 1;
-               set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+               set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
        }
 }
@@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
-               clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+               clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
        } else {
                int index = cvmx_get_core_num() * 2 + 1;
-               clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+               clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
        }
 }
@@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 static void octeon_irq_ciu_ack(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
                cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
        } else {
@@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2;
-                       clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+                       clear_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
                }
        } else {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-                       clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+                       clear_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
                }
        }
@@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2;
-                       set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+                       set_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
                }
        } else {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-                       set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+                       set_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
                }
        }
@@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 static void octeon_irq_gpio_setup(struct irq_data *data)
 {
        union cvmx_gpio_bit_cfgx cfg;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        u32 t = irqd_get_trigger_type(data);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        cfg.u64 = 0;
        cfg.s.int_en = 1;
@@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data)
        cfg.s.fil_cnt = 7;
        cfg.s.fil_sel = 3;
 
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
 }
 
 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
@@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
 
 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cd = irq_data_get_irq_chip_data(data);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu_disable_all_v2(data);
 }
 
 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cd = irq_data_get_irq_chip_data(data);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu_disable_all(data);
 }
 
 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        u64 mask;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.gpio_line);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->gpio_line);
 
        cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
+static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
 {
        if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
                handle_edge_irq(irq, desc);
@@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        unsigned long *pen;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        /*
         * For non-v2 CIU, we will allow only single CPU affinity.
@@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
                raw_spin_lock_irqsave(lock, flags);
 
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
                        enable_one = 0;
-                       __set_bit(cd.s.bit, pen);
+                       __set_bit(cd->bit, pen);
                } else {
-                       __clear_bit(cd.s.bit, pen);
+                       __clear_bit(cd->bit, pen);
                }
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                 */
                wmb();
 
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
        if (!enable_one)
                return 0;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << cd.s.bit;
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                        int index = octeon_coreid_for_cpu(cpu) * 2;
                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
                                enable_one = false;
-                               set_bit(cd.s.bit, pen);
+                               set_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
                        } else {
-                               clear_bit(cd.s.bit, pen);
+                               clear_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
                        }
                }
@@ -733,22 +824,62 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
                                enable_one = false;
-                               set_bit(cd.s.bit, pen);
+                               set_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
                        } else {
-                               clear_bit(cd.s.bit, pen);
+                               clear_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
                        }
                }
        }
        return 0;
 }
+
+static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
+                                           const struct cpumask *dest,
+                                           bool force)
+{
+       int cpu;
+       bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+       u64 mask;
+       struct octeon_ciu_chip_data *cd;
+
+       if (!enable_one)
+               return 0;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
+
+       for_each_online_cpu(cpu) {
+               int index = octeon_coreid_for_cpu(cpu);
+
+               if (cpumask_test_cpu(cpu, dest) && enable_one) {
+                       enable_one = false;
+                       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+               } else {
+                       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+               }
+       }
+       return 0;
+}
 #endif
 
 /*
  * Newer octeon chips have support for lockless CIU operation.
  */
 static struct irq_chip octeon_irq_chip_ciu_v2 = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_v2,
+       .irq_disable = octeon_irq_ciu_disable_all_v2,
+       .irq_mask = octeon_irq_ciu_disable_local_v2,
+       .irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
        .name = "CIU",
        .irq_enable = octeon_irq_ciu_enable_v2,
        .irq_disable = octeon_irq_ciu_disable_all_v2,
@@ -761,7 +892,47 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
 #endif
 };
 
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_sum2 = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_sum2,
+       .irq_disable = octeon_irq_ciu_disable_all_sum2,
+       .irq_mask = octeon_irq_ciu_disable_local_sum2,
+       .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_sum2,
+       .irq_disable = octeon_irq_ciu_disable_all_sum2,
+       .irq_ack = octeon_irq_ciu_ack_sum2,
+       .irq_mask = octeon_irq_ciu_disable_local_sum2,
+       .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
 static struct irq_chip octeon_irq_chip_ciu = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable,
+       .irq_disable = octeon_irq_ciu_disable_all,
+       .irq_mask = octeon_irq_ciu_disable_local,
+       .irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_edge = {
        .name = "CIU",
        .irq_enable = octeon_irq_ciu_enable,
        .irq_disable = octeon_irq_ciu_disable_all,
@@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
                               unsigned int *out_type)
 {
        unsigned int ciu, bit;
+       struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
        ciu = intspec[0];
        bit = intspec[1];
 
-       if (ciu > 1 || bit > 63)
+       if (ciu >= dd->num_sum || bit > 63)
                return -EINVAL;
 
        *out_hwirq = (ciu << 6) | bit;
@@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
 }
 
 static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_ciu_chip_edge;
 static struct irq_chip *octeon_irq_gpio_chip;
 
 static bool octeon_irq_virq_in_range(unsigned int virq)
@@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq)
 static int octeon_irq_ciu_map(struct irq_domain *d,
                              unsigned int virq, irq_hw_number_t hw)
 {
+       int rv;
        unsigned int line = hw >> 6;
        unsigned int bit = hw & 63;
+       struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
@@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
        if (line == 0 && bit >= 16 && bit <32)
                return 0;
 
-       if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
+       if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
-       if (octeon_irq_ciu_is_edge(line, bit))
-               octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          octeon_irq_ciu_chip,
-                                          handle_edge_irq);
-       else
-               octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          octeon_irq_ciu_chip,
-                                          handle_level_irq);
-
-       return 0;
+       if (line == 2) {
+               if (octeon_irq_ciu_is_edge(line, bit))
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               &octeon_irq_chip_ciu_sum2_edge,
+                               handle_edge_irq);
+               else
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               &octeon_irq_chip_ciu_sum2,
+                               handle_level_irq);
+       } else {
+               if (octeon_irq_ciu_is_edge(line, bit))
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               octeon_irq_ciu_chip_edge,
+                               handle_edge_irq);
+               else
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               octeon_irq_ciu_chip,
+                               handle_level_irq);
+       }
+       return rv;
 }
 
-static int octeon_irq_gpio_map_common(struct irq_domain *d,
-                                     unsigned int virq, irq_hw_number_t hw,
-                                     int line_limit, struct irq_chip *chip)
+static int octeon_irq_gpio_map(struct irq_domain *d,
+                              unsigned int virq, irq_hw_number_t hw)
 {
        struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
        unsigned int line, bit;
+       int r;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
        line = (hw + gpiod->base_hwirq) >> 6;
        bit = (hw + gpiod->base_hwirq) & 63;
-       if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
+       if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+               octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
-       octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-                                  chip, octeon_irq_handle_gpio);
-       return 0;
-}
-
-static int octeon_irq_gpio_map(struct irq_domain *d,
-                              unsigned int virq, irq_hw_number_t hw)
-{
-       return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
+       r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
+               octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+       return r;
 }
 
 static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
        .map = octeon_irq_ciu_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_ciu_xlat,
 };
 
 static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
        .map = octeon_irq_gpio_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_gpio_xlat,
 };
 
@@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void)
        }
 }
 
+static void octeon_irq_ip4_ciu(void)
+{
+       int coreid = cvmx_get_core_num();
+       u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
+       u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
+
+       ciu_sum &= ciu_en;
+       if (likely(ciu_sum)) {
+               int bit = fls64(ciu_sum) - 1;
+               int irq = octeon_irq_ciu_to_irq[2][bit];
+
+               if (likely(irq))
+                       do_IRQ(irq);
+               else
+                       spurious_interrupt();
+       } else {
+               spurious_interrupt();
+       }
+}
+
 static bool octeon_irq_use_ip4;
 
 static void octeon_irq_local_enable_ip4(void *arg)
@@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void)
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-       clear_c0_status(STATUSF_IP4);
+       if (octeon_irq_use_ip4)
+               set_c0_status(STATUSF_IP4);
+       else
+               clear_c0_status(STATUSF_IP4);
 }
 
 static void octeon_irq_setup_secondary_ciu2(void)
@@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void)
                clear_c0_status(STATUSF_IP4);
 }
 
-static void __init octeon_irq_init_ciu(void)
+static int __init octeon_irq_init_ciu(
+       struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i;
+       unsigned int i, r;
        struct irq_chip *chip;
+       struct irq_chip *chip_edge;
        struct irq_chip *chip_mbox;
        struct irq_chip *chip_wd;
-       struct device_node *gpio_node;
-       struct device_node *ciu_node;
        struct irq_domain *ciu_domain = NULL;
+       struct octeon_irq_ciu_domain_data *dd;
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       if (!dd)
+               return -ENOMEM;
 
        octeon_irq_init_ciu_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
 
        octeon_irq_ip2 = octeon_irq_ip2_ciu;
        octeon_irq_ip3 = octeon_irq_ip3_ciu;
+       if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+               && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+               octeon_irq_ip4 =  octeon_irq_ip4_ciu;
+               dd->num_sum = 3;
+               octeon_irq_use_ip4 = true;
+       } else {
+               octeon_irq_ip4 = octeon_irq_ip4_mask;
+               dd->num_sum = 2;
+               octeon_irq_use_ip4 = false;
+       }
        if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
            OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
            OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
-           OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+           OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
                chip = &octeon_irq_chip_ciu_v2;
+               chip_edge = &octeon_irq_chip_ciu_v2_edge;
                chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
                chip_wd = &octeon_irq_chip_ciu_wd_v2;
                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
        } else {
                chip = &octeon_irq_chip_ciu;
+               chip_edge = &octeon_irq_chip_ciu_edge;
                chip_mbox = &octeon_irq_chip_ciu_mbox;
                chip_wd = &octeon_irq_chip_ciu_wd;
                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
        }
        octeon_irq_ciu_chip = chip;
-       octeon_irq_ip4 = octeon_irq_ip4_mask;
+       octeon_irq_ciu_chip_edge = chip_edge;
 
        /* Mips internal */
        octeon_irq_init_core();
 
-       gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-       if (gpio_node) {
-               struct octeon_irq_gpio_domain_data *gpiod;
-
-               gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-               if (gpiod) {
-                       /* gpio domain host_data is the base hwirq number. */
-                       gpiod->base_hwirq = 16;
-                       irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
-                       of_node_put(gpio_node);
-               } else
-                       pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-       } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-       ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
-       if (ciu_node) {
-               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
-               irq_set_default_host(ciu_domain);
-               of_node_put(ciu_node);
-       } else
-               panic("Cannot find device node for cavium,octeon-3860-ciu.");
+       ciu_domain = irq_domain_add_tree(
+               ciu_node, &octeon_irq_domain_ciu_ops, dd);
+       irq_set_default_host(ciu_domain);
 
        /* CIU_0 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+       for (i = 0; i < 16; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+               if (r)
+                       goto err;
+       }
+
+       r = octeon_irq_set_ciu_mapping(
+               OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
+       if (r)
+               goto err;
+       r = octeon_irq_set_ciu_mapping(
+               OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+       if (r)
+               goto err;
+
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+               if (r)
+                       goto err;
+       }
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
+       if (r)
+               goto err;
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+       if (r)
+               goto err;
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+               if (r)
+                       goto err;
+       }
+
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+       if (r)
+               goto err;
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+       if (r)
+               goto err;
 
        /* CIU_1 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
+       for (i = 0; i < 16; i++) {
+               r = octeon_irq_set_ciu_mapping(
+                       i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
+                       handle_level_irq);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+       if (r)
+               goto err;
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-       clear_c0_status(STATUSF_IP4);
+       if (octeon_irq_use_ip4)
+               set_c0_status(STATUSF_IP4);
+       else
+               clear_c0_status(STATUSF_IP4);
+
+       return 0;
+err:
+       return r;
 }
 
+static int __init octeon_irq_init_gpio(
+       struct device_node *gpio_node, struct device_node *parent)
+{
+       struct octeon_irq_gpio_domain_data *gpiod;
+       u32 interrupt_cells;
+       unsigned int base_hwirq;
+       int r;
+
+       r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
+       if (r)
+               return r;
+
+       if (interrupt_cells == 1) {
+               u32 v;
+
+               r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               base_hwirq = v;
+       } else if (interrupt_cells == 2) {
+               u32 v0, v1;
+
+               r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               base_hwirq = (v0 << 6) | v1;
+       } else {
+               pr_warn("Bad \"#interrupt-cells\" property: %u\n",
+                       interrupt_cells);
+               return -EINVAL;
+       }
+
+       gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+       if (gpiod) {
+               /* gpio domain host_data is the base hwirq number. */
+               gpiod->base_hwirq = base_hwirq;
+               irq_domain_add_linear(
+                       gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+       } else {
+               pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
 /*
  * Watchdog interrupts are special.  They are associated with a single
  * core, so we hardwire the affinity to that core.
@@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = data->irq - OCTEON_IRQ_WDOG0;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data)
        u64 en_addr;
        int cpu = next_cpu_for_irq(data);
        int coreid = octeon_coreid_for_cpu(cpu);
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 }
 
@@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+                       octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
+                       octeon_coreid_for_cpu(cpu));
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
+                       octeon_coreid_for_cpu(cpu));
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
        if (!enable_one)
                return 0;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << cd.s.bit;
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
 
        for_each_online_cpu(cpu) {
                u64 en_addr;
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
                        enable_one = false;
-                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
+                               octeon_coreid_for_cpu(cpu)) +
+                               (0x1000ull * cd->line);
                } else {
-                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+                               octeon_coreid_for_cpu(cpu)) +
+                               (0x1000ull * cd->line);
                }
                cvmx_write_csr(en_addr, mask);
        }
@@ -1461,15 +1776,28 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
 
 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
-       cd.p = irq_data_get_irq_chip_data(data);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
 
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu2_disable_all(data);
 }
 
 static struct irq_chip octeon_irq_chip_ciu2 = {
+       .name = "CIU2-E",
+       .irq_enable = octeon_irq_ciu2_enable,
+       .irq_disable = octeon_irq_ciu2_disable_all,
+       .irq_mask = octeon_irq_ciu2_disable_local,
+       .irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu2_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_edge = {
        .name = "CIU2-E",
        .irq_enable = octeon_irq_ciu2_enable,
        .irq_disable = octeon_irq_ciu2_disable_all,
@@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 
        if (octeon_irq_ciu2_is_edge(line, bit))
                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          &octeon_irq_chip_ciu2,
+                                          &octeon_irq_chip_ciu2_edge,
                                           handle_edge_irq);
        else
                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
@@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 
        return 0;
 }
-static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
-                                   unsigned int virq, irq_hw_number_t hw)
-{
-       return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
-}
 
 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
        .map = octeon_irq_ciu2_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_ciu2_xlat,
 };
 
-static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
-       .map = octeon_irq_ciu2_gpio_map,
-       .xlate = octeon_irq_gpio_xlat,
-};
-
 static void octeon_irq_ciu2(void)
 {
        int line;
@@ -1674,16 +1993,16 @@ out:
        return;
 }
 
-static void __init octeon_irq_init_ciu2(void)
+static int __init octeon_irq_init_ciu2(
+       struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i;
-       struct device_node *gpio_node;
-       struct device_node *ciu_node;
+       unsigned int i, r;
        struct irq_domain *ciu_domain = NULL;
 
        octeon_irq_init_ciu2_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
 
+       octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
        octeon_irq_ip2 = octeon_irq_ciu2;
        octeon_irq_ip3 = octeon_irq_ciu2_mbox;
        octeon_irq_ip4 = octeon_irq_ip4_mask;
@@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void)
        /* Mips internal */
        octeon_irq_init_core();
 
-       gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-       if (gpio_node) {
-               struct octeon_irq_gpio_domain_data *gpiod;
-
-               gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-               if (gpiod) {
-                       /* gpio domain host_data is the base hwirq number. */
-                       gpiod->base_hwirq = 7 << 6;
-                       irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
-                       of_node_put(gpio_node);
-               } else
-                       pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-       } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-       ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
-       if (ciu_node) {
-               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
-               irq_set_default_host(ciu_domain);
-               of_node_put(ciu_node);
-       } else
-               panic("Cannot find device node for cavium,octeon-6880-ciu2.");
+       ciu_domain = irq_domain_add_tree(
+               ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+       irq_set_default_host(ciu_domain);
 
        /* CUI2 */
-       for (i = 0; i < 64; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+       for (i = 0; i < 64; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 32; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
-                                          &octeon_irq_chip_ciu2_wd, handle_level_irq);
+       for (i = 0; i < 32; i++) {
+               r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
+                       &octeon_irq_chip_ciu2_wd, handle_level_irq);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+       if (r)
+               goto err;
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+               if (r)
+                       goto err;
+       }
 
        irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
        irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
@@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void)
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
        clear_c0_status(STATUSF_IP4);
+       return 0;
+err:
+       return r;
+}
+
+struct octeon_irq_cib_host_data {
+       raw_spinlock_t lock;
+       u64 raw_reg;
+       u64 en_reg;
+       int max_bits;
+};
+
+struct octeon_irq_cib_chip_data {
+       struct octeon_irq_cib_host_data *host_data;
+       int bit;
+};
+
+static void octeon_irq_cib_enable(struct irq_data *data)
+{
+       unsigned long flags;
+       u64 en;
+       struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+       struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+       raw_spin_lock_irqsave(&host_data->lock, flags);
+       en = cvmx_read_csr(host_data->en_reg);
+       en |= 1ull << cd->bit;
+       cvmx_write_csr(host_data->en_reg, en);
+       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static void octeon_irq_cib_disable(struct irq_data *data)
+{
+       unsigned long flags;
+       u64 en;
+       struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+       struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+       raw_spin_lock_irqsave(&host_data->lock, flags);
+       en = cvmx_read_csr(host_data->en_reg);
+       en &= ~(1ull << cd->bit);
+       cvmx_write_csr(host_data->en_reg, en);
+       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
+{
+       irqd_set_trigger_type(data, t);
+       return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip octeon_irq_chip_cib = {
+       .name = "CIB",
+       .irq_enable = octeon_irq_cib_enable,
+       .irq_disable = octeon_irq_cib_disable,
+       .irq_mask = octeon_irq_cib_disable,
+       .irq_unmask = octeon_irq_cib_enable,
+       .irq_set_type = octeon_irq_cib_set_type,
+};
+
+static int octeon_irq_cib_xlat(struct irq_domain *d,
+                                  struct device_node *node,
+                                  const u32 *intspec,
+                                  unsigned int intsize,
+                                  unsigned long *out_hwirq,
+                                  unsigned int *out_type)
+{
+       unsigned int type = 0;
+
+       if (intsize == 2)
+               type = intspec[1];
+
+       switch (type) {
+       case 0: /* unofficial value, but we might as well let it work. */
+       case 4: /* official value for level triggering. */
+               *out_type = IRQ_TYPE_LEVEL_HIGH;
+               break;
+       case 1: /* official value for edge triggering. */
+               *out_type = IRQ_TYPE_EDGE_RISING;
+               break;
+       default: /* Nothing else is acceptable. */
+               return -EINVAL;
+       }
+
+       *out_hwirq = intspec[0];
+
+       return 0;
+}
+
+static int octeon_irq_cib_map(struct irq_domain *d,
+                             unsigned int virq, irq_hw_number_t hw)
+{
+       struct octeon_irq_cib_host_data *host_data = d->host_data;
+       struct octeon_irq_cib_chip_data *cd;
+
+       if (hw >= host_data->max_bits) {
+               pr_err("ERROR: %s mapping %u is to big!\n",
+                      d->of_node->name, (unsigned)hw);
+               return -EINVAL;
+       }
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       cd->host_data = host_data;
+       cd->bit = hw;
+
+       irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
+                                handle_simple_irq);
+       irq_set_chip_data(virq, cd);
+       return 0;
 }
 
+static struct irq_domain_ops octeon_irq_domain_cib_ops = {
+       .map = octeon_irq_cib_map,
+       .unmap = octeon_irq_free_cd,
+       .xlate = octeon_irq_cib_xlat,
+};
+
+/* Chain to real handler. */
+static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
+{
+       u64 en;
+       u64 raw;
+       u64 bits;
+       int i;
+       int irq;
+       struct irq_domain *cib_domain = data;
+       struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
+
+       en = cvmx_read_csr(host_data->en_reg);
+       raw = cvmx_read_csr(host_data->raw_reg);
+
+       bits = en & raw;
+
+       for (i = 0; i < host_data->max_bits; i++) {
+               if ((bits & 1ull << i) == 0)
+                       continue;
+               irq = irq_find_mapping(cib_domain, i);
+               if (!irq) {
+                       unsigned long flags;
+
+                       pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
+                               i, host_data->raw_reg);
+                       raw_spin_lock_irqsave(&host_data->lock, flags);
+                       en = cvmx_read_csr(host_data->en_reg);
+                       en &= ~(1ull << i);
+                       cvmx_write_csr(host_data->en_reg, en);
+                       cvmx_write_csr(host_data->raw_reg, 1ull << i);
+                       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+               } else {
+                       struct irq_desc *desc = irq_to_desc(irq);
+                       struct irq_data *irq_data = irq_desc_get_irq_data(desc);
+                       /* If edge, acknowledge the bit we will be sending. */
+                       if (irqd_get_trigger_type(irq_data) &
+                               IRQ_TYPE_EDGE_BOTH)
+                               cvmx_write_csr(host_data->raw_reg, 1ull << i);
+                       generic_handle_irq_desc(irq, desc);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+                                     struct device_node *parent)
+{
+       const __be32 *addr;
+       u32 val;
+       struct octeon_irq_cib_host_data *host_data;
+       int parent_irq;
+       int r;
+       struct irq_domain *cib_domain;
+
+       parent_irq = irq_of_parse_and_map(ciu_node, 0);
+       if (!parent_irq) {
+               pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
+                       ciu_node->name);
+               return -EINVAL;
+       }
+
+       host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+       raw_spin_lock_init(&host_data->lock);
+
+       addr = of_get_address(ciu_node, 0, NULL, NULL);
+       if (!addr) {
+               pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
+               return -EINVAL;
+       }
+       host_data->raw_reg = (u64)phys_to_virt(
+               of_translate_address(ciu_node, addr));
+
+       addr = of_get_address(ciu_node, 1, NULL, NULL);
+       if (!addr) {
+               pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
+               return -EINVAL;
+       }
+       host_data->en_reg = (u64)phys_to_virt(
+               of_translate_address(ciu_node, addr));
+
+       r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+       if (r) {
+               pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
+                       ciu_node->name);
+               return r;
+       }
+       host_data->max_bits = val;
+
+       cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
+                                          &octeon_irq_domain_cib_ops,
+                                          host_data);
+       if (!cib_domain) {
+               pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
+               return -ENOMEM;
+       }
+
+       cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
+       cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
+
+       r = request_irq(parent_irq, octeon_irq_cib_handler,
+                       IRQF_NO_THREAD, "cib", cib_domain);
+       if (r) {
+               pr_err("request_irq cib failed %d\n", r);
+               return r;
+       }
+       pr_info("CIB interrupt controller probed: %llx %d\n",
+               host_data->raw_reg, host_data->max_bits);
+       return 0;
+}
+
+static struct of_device_id ciu_types[] __initdata = {
+       {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
+       {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
+       {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+       {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
+       {}
+};
+
 void __init arch_init_irq(void)
 {
 #ifdef CONFIG_SMP
@@ -1750,10 +2305,7 @@ void __init arch_init_irq(void)
        cpumask_clear(irq_default_affinity);
        cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 #endif
-       if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-               octeon_irq_init_ciu2();
-       else
-               octeon_irq_init_ciu();
+       of_irq_init(ciu_types);
 }
 
 asmlinkage void plat_irq_dispatch(void)
@@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void)
                cop0_cause &= cop0_status;
                cop0_cause &= ST0_IM;
 
-               if (unlikely(cop0_cause & STATUSF_IP2))
+               if (cop0_cause & STATUSF_IP2)
                        octeon_irq_ip2();
-               else if (unlikely(cop0_cause & STATUSF_IP3))
+               else if (cop0_cause & STATUSF_IP3)
                        octeon_irq_ip3();
-               else if (unlikely(cop0_cause & STATUSF_IP4))
+               else if (cop0_cause & STATUSF_IP4)
                        octeon_irq_ip4();
-               else if (likely(cop0_cause))
+               else if (cop0_cause)
                        do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
                else
                        break;
index 94f888d3384e247b542b15faae2da18147088f24..a42110e7edbcabefdea9a011de9e032f88388a87 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/pci-octeon.h>
 #include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rst-defs.h>
 
 extern struct plat_smp_ops octeon_smp_ops;
 
@@ -579,12 +580,10 @@ void octeon_user_io_init(void)
        /* R/W If set, CVMSEG is available for loads/stores in user
         * mode. */
        cvmmemctl.s.cvmsegenau = 0;
-       /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
-        * is max legal value. */
-       cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
 
        write_c0_cvmmemctl(cvmmemctl.u64);
 
+       /* Setup of CVMSEG is done in kernel-entry-init.h */
        if (smp_processor_id() == 0)
                pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
                          CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
@@ -615,6 +614,7 @@ void __init prom_init(void)
        const char *arg;
        char *p;
        int i;
+       u64 t;
        int argc;
 #ifdef CONFIG_CAVIUM_RESERVE32
        int64_t addr = -1;
@@ -654,15 +654,56 @@ void __init prom_init(void)
        sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
        sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 
-       if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+       if (OCTEON_IS_OCTEON2()) {
                /* I/O clock runs at a different rate than the CPU. */
                union cvmx_mio_rst_boot rst_boot;
                rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
                octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+       } else if (OCTEON_IS_OCTEON3()) {
+               /* I/O clock runs at a different rate than the CPU. */
+               union cvmx_rst_boot rst_boot;
+               rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+               octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
        } else {
                octeon_io_clock_rate = sysinfo->cpu_clock_hz;
        }
 
+       t = read_c0_cvmctl();
+       if ((t & (1ull << 27)) == 0) {
+               /*
+                * Setup the multiplier save/restore code if
+                * CvmCtl[NOMUL] clear.
+                */
+               void *save;
+               void *save_end;
+               void *restore;
+               void *restore_end;
+               int save_len;
+               int restore_len;
+               int save_max = (char *)octeon_mult_save_end -
+                       (char *)octeon_mult_save;
+               int restore_max = (char *)octeon_mult_restore_end -
+                       (char *)octeon_mult_restore;
+               if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
+                       save = octeon_mult_save3;
+                       save_end = octeon_mult_save3_end;
+                       restore = octeon_mult_restore3;
+                       restore_end = octeon_mult_restore3_end;
+               } else {
+                       save = octeon_mult_save2;
+                       save_end = octeon_mult_save2_end;
+                       restore = octeon_mult_restore2;
+                       restore_end = octeon_mult_restore2_end;
+               }
+               save_len = (char *)save_end - (char *)save;
+               restore_len = (char *)restore_end - (char *)restore;
+               if (!WARN_ON(save_len > save_max ||
+                               restore_len > restore_max)) {
+                       memcpy(octeon_mult_save, save, save_len);
+                       memcpy(octeon_mult_restore, restore, restore_len);
+               }
+       }
+
        /*
         * Only enable the LED controller if we're running on a CN38XX, CN58XX,
         * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar);
 
 void prom_free_prom_memory(void)
 {
-       if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
+       if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
                /* Check for presence of Core-14449 fix.  */
                u32 insn;
                u32 *foo;
@@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void)
                        panic("No PREF instruction at Core-14449 probe point.");
 
                if (((insn >> 16) & 0x1f) != 28)
-                       panic("Core-14449 WAR not in place (%04x).\n"
-                             "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
+                       panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
+                             "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
+                             insn);
        }
 }
 
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644 (file)
index 0000000..4bce1f8
--- /dev/null
@@ -0,0 +1,193 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index f9f5307434c276a35624f5c137333867e2357794..19f710117d974bf2a0da97764e7bc3ec428b3fc8 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
 #include <asm/sgialib.h>
 #include <asm/bootinfo.h>
 
-VOID
+VOID __noreturn
 ArcHalt(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(halt);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcPowerDown(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(pdown);
-never: goto never;
+
+       unreachable();
 }
 
 /* XXX is this a soft reset basically? XXX */
-VOID
+VOID __noreturn
 ArcRestart(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(restart);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcReboot(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(reboot);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcEnterInteractiveMode(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(imode);
-never: goto never;
+
+       unreachable();
 }
 
 LONG
index 200efeac41813c2a17e237156c6410253f488d8b..526539cbc99f6792b21d35dbfc1d044f0f817a87 100644 (file)
@@ -1,4 +1,5 @@
 # MIPS headers
+generic-(CONFIG_GENERIC_CSUM) += checksum.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += dma-contiguous.h
index 6caf8766b80f161ea7a620ecc33cca7768e1d4b0..0cae4595e985bbc3d8043b3bb85aef66c582615b 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/asmmacro-64.h>
 #endif
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  local_irq_enable reg=t0
        ei
        irq_enable_hazard
        .endm
 
        .macro  fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        sll     \tmp, \status, 5
        bgez    \tmp, 10f
        fpu_save_16odd \thread
        .endm
 
        .macro  fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        sll     \tmp, \status, 5
        bgez    \tmp, 10f                               # 16 register mode?
 
        fpu_restore_16even \thread \tmp
        .endm
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  _EXT    rd, rs, p, s
        ext     \rd, \rs, \p, \s
        .endm
-#else /* !CONFIG_CPU_MIPSR2 */
+#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
        .macro  _EXT    rd, rs, p, s
        srl     \rd, \rs, \p
        andi    \rd, \rd, (1 << \s) - 1
        .endm
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 
 /*
  * Temporary until all gas have MT ASE support
        .set    push
        .set    noat
        SET_HARDFLOAT
-       add     $1, \base, \off
+       addu    $1, \base, \off
        .word   LDD_MSA_INSN | (\wd << 6)
        .set    pop
        .endm
        .set    push
        .set    noat
        SET_HARDFLOAT
-       add     $1, \base, \off
+       addu    $1, \base, \off
        .word   STD_MSA_INSN | (\wd << 6)
        .set    pop
        .endm
index 857da84cfc92eb20bd7f29cb5d9b3c1e16b86203..26d436336f2e18c9e8a3e5fd888e2f97bdb0a2dd 100644 (file)
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                           \
                "       sc      %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       ll      %0, %1          # atomic_" #op "\n"   \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       sc      %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
-                 "+" GCC_OFF12_ASM() (v->counter)                            \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       ll      %1, %2  # atomic_" #op "_return \n"   \
                        "       " #asm_op " %0, %1, %3                  \n"   \
                        "       sc      %0, %2                          \n"   \
                        "       .set    mips0                           \n"   \
                        : "=&r" (result), "=&r" (temp),                       \
-                         "+" GCC_OFF12_ASM() (v->counter)                    \
+                         "+" GCC_OFF_SMALL_ASM() (v->counter)                \
                        : "Ir" (i));                                          \
                } while (unlikely(!result));                                  \
                                                                              \
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
                : "memory");
        } else if (kernel_uses_llsc) {
                int temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
                : "Ir" (i));
        } else {
                unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)                    \
                "       scd     %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       lld     %0, %1          # atomic64_" #op "\n" \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       scd     %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
-                 "+" GCC_OFF12_ASM() (v->counter)                            \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       lld     %1, %2  # atomic64_" #op "_return\n"  \
                        "       " #asm_op " %0, %1, %3                  \n"   \
                        "       scd     %0, %2                          \n"   \
                        "       .set    mips0                           \n"   \
                        : "=&r" (result), "=&r" (temp),                       \
-                         "=" GCC_OFF12_ASM() (v->counter)                    \
-                       : "Ir" (i), GCC_OFF12_ASM() (v->counter)              \
+                         "=" GCC_OFF_SMALL_ASM() (v->counter)                \
+                       : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
                        : "memory");                                          \
                } while (unlikely(!result));                                  \
                                                                              \
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "=" GCC_OFF12_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+                 "=" GCC_OFF_SMALL_ASM() (v->counter)
+               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
                : "memory");
        } else if (kernel_uses_llsc) {
                long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
                : "Ir" (i));
        } else {
                unsigned long flags;
index 6663bcca9d0c626886529ae5eb7a92e75b75cd46..9f935f6aa996ddfd573b9ccb4f6261118934a415 100644 (file)
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC  "%0, %1                                 \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
-               : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
-#ifdef CONFIG_CPU_MIPSR2
+               : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+               : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       " __INS "%0, %3, %2, 1                  \n"
                        "       " __SC "%0, %1                          \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (bit), "r" (~0));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       or      %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
        } else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC "%0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                : "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       " __INS "%0, $0, %2, 1                  \n"
                        "       " __SC "%0, %1                          \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (bit));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       and     %0, %2                          \n"
                        "       " __SC "%0, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (~(1UL << bit)));
                } while (unlikely(!temp));
        } else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC  "%0, %1                         \n"
                "       beqzl   %0, 1b                          \n"
                "       .set    mips0                           \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                : "ir" (1UL << bit));
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # change_bit    \n"
                        "       xor     %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
        } else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
        } else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                        "       " __EXT "%2, %0, %3, 1                  \n"
                        "       " __INS "%0, $0, %3, 1                  \n"
                        "       " __SC  "%0, %1                         \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "ir" (bit)
                        : "memory");
                } while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       or      %2, %0, %3                      \n"
                        "       xor     %2, %3                          \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
        } else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL  "%0, %1 # test_and_change_bit   \n"
                        "       xor     %2, %0, %3                      \n"
                        "       " __SC  "\t%2, %1                       \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips32                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips64                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       dclz    %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
        if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips32                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (x)
index 3418c51e11512ed2a3957448fbb68d83ebac1858..5c585c5c1c3e3fe3ee6bacdfd8ce0f59be82e27e 100644 (file)
 #ifndef _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
@@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
  */
 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
                                       int len, __wsum sum);
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
 
 /*
  *     Fold a partial checksum without adding pseudo headers
  */
-static inline __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
-       __asm__(
-       "       .set    push            # csum_fold\n"
-       "       .set    noat            \n"
-       "       sll     $1, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       sltu    $1, %0, $1      \n"
-       "       srl     %0, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       xori    %0, 0xffff      \n"
-       "       .set    pop"
-       : "=r" (sum)
-       : "0" (sum));
+       u32 sum = (__force u32)csum;;
 
-       return (__force __sum16)sum;
+       sum += (sum << 16);
+       csum = (sum < csum);
+       sum >>= 16;
+       sum += csum;
+
+       return (__force __sum16)~sum;
 }
+#define csum_fold csum_fold
 
 /*
  *     This is a version of ip_compute_csum() optimized for IP headers,
@@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 
        return csum_fold(csum);
 }
+#define ip_fast_csum ip_fast_csum
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
        __be32 daddr, unsigned short len, unsigned short proto,
@@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
 
        return sum;
 }
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-                                                  unsigned short len,
-                                                  unsigned short proto,
-                                                  __wsum sum)
-{
-       return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
@@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
        return csum_fold(sum);
 }
 
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
 #endif /* _ASM_CHECKSUM_H */
index 28b1edf195016b80a89854b9ae11c28a1a9c04ce..d0a2a68ca600670ead4d5535751267f6889e93f4 100644 (file)
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                "       sc      %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF12_ASM() (*m), "Jr" (val)
+               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       ll      %0, %3          # xchg_u32      \n"
                        "       .set    mips0                           \n"
                        "       move    %2, %z4                         \n"
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       sc      %2, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+                       : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
                          "=&r" (dummy)
-                       : GCC_OFF12_ASM() (*m), "Jr" (val)
+                       : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                        : "memory");
                } while (unlikely(!dummy));
        } else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                "       scd     %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF12_ASM() (*m), "Jr" (val)
+               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       lld     %0, %3          # xchg_u64      \n"
                        "       move    %2, %z4                         \n"
                        "       scd     %2, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+                       : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
                          "=&r" (dummy)
-                       : GCC_OFF12_ASM() (*m), "Jr" (val)
+                       : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                        : "memory");
                } while (unlikely(!dummy));
        } else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       beqzl   $1, 1b                          \n"     \
                "2:                                             \n"     \
                "       .set    pop                             \n"     \
-               : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)               \
-               : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)          \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)              \
                : "memory");                                            \
        } else if (kernel_uses_llsc) {                                  \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "       " st "  $1, %1                          \n"     \
                "       beqz    $1, 1b                          \n"     \
                "       .set    pop                             \n"     \
                "2:                                             \n"     \
-               : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)               \
-               : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)          \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)              \
                : "memory");                                            \
        } else {                                                        \
                unsigned long __flags;                                  \
index c73815e0123a756bc0579806c7e8333b26712d08..e081a265f4227475d17fef0d53cf07f7e9ffb6a3 100644 (file)
 #define GCC_REG_ACCUM "accum"
 #endif
 
+#ifdef CONFIG_CPU_MIPSR6
+/* All MIPS R6 toolchains support the ZC constrain */
+#define GCC_OFF_SMALL_ASM() "ZC"
+#else
 #ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF12_ASM() "R"
+#define GCC_OFF_SMALL_ASM() "R"
 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF12_ASM() "ZC"
+#define GCC_OFF_SMALL_ASM() "ZC"
 #else
 #error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif
+#endif /* CONFIG_CPU_MICROMIPS */
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #endif /* _ASM_COMPILER_H */
index 2897cfafcaf097f01e17a561488eafe032924933..0d8208de9a3fadaff9308544af2c955f7b967a34 100644 (file)
@@ -38,6 +38,9 @@
 #ifndef cpu_has_maar
 #define cpu_has_maar           (cpu_data[0].options & MIPS_CPU_MAAR)
 #endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb         (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#endif
 
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
 #endif
 #endif
 
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1                (!cpu_has_mips_r6)
+#endif
 #ifndef cpu_has_mips_2
 # define cpu_has_mips_2                (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
 #endif
 #ifndef cpu_has_mips32r2
 # define cpu_has_mips32r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 #endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+#endif
 #ifndef cpu_has_mips64r1
 # define cpu_has_mips64r1      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 #endif
 #ifndef cpu_has_mips64r2
 # define cpu_has_mips64r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 #endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+#endif
 
 /*
  * Shortcuts ...
 #define cpu_has_mips_4_5_r     (cpu_has_mips_4 | cpu_has_mips_5_r)
 #define cpu_has_mips_5_r       (cpu_has_mips_5 | cpu_has_mips_r)
 
-#define cpu_has_mips_4_5_r2    (cpu_has_mips_4_5 | cpu_has_mips_r2)
+#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
+                                cpu_has_mips_r6)
 
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6        (cpu_has_mips32r6 | cpu_has_mips64r6)
 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
-                        cpu_has_mips64r1 | cpu_has_mips64r2)
+                        cpu_has_mips32r6 | cpu_has_mips64r1 | \
+                        cpu_has_mips64r2 | cpu_has_mips64r6)
+
+/* MIPSR2 and MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6     (cpu_has_mips_r2 | cpu_has_mips_r6)
 
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
 #endif
 
 /*
index a6c9ccb33c5c9a35ceaac1485fb10f02a97da4b2..c3f4f2d2e1088459b2aa10c6292e5de76665d5bc 100644 (file)
@@ -84,6 +84,11 @@ struct cpuinfo_mips {
         * (shifted by _CACHE_SHIFT)
         */
        unsigned int            writecombine;
+       /*
+        * Simple counter to prevent enabling HTW in nested
+        * htw_start/htw_stop calls
+        */
+       unsigned int            htw_seq;
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
index b4e2bd87df5030457b2f397b7e52565b7d2dc4b5..8245875f8b33be3156cfe42b63b0f1d788afebf8 100644 (file)
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
        case CPU_M5150:
 #endif
 
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+       case CPU_QEMU_GENERIC:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
        case CPU_5KC:
        case CPU_5KE:
index 33866fce4d633a177636c1d386e180bdbd8ef02a..15687234d70a6bcd1dd211543fa8ac494ca44902 100644 (file)
@@ -93,6 +93,7 @@
  * These are the PRID's for when 23:16 == PRID_COMP_MIPS
  */
 
+#define PRID_IMP_QEMU_GENERIC  0x0000
 #define PRID_IMP_4KC           0x8000
 #define PRID_IMP_5KC           0x8100
 #define PRID_IMP_20KC          0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
        CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
        CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
+       CPU_QEMU_GENERIC,
+
        CPU_LAST
 };
 
@@ -329,11 +332,14 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M32R2     0x00000020
 #define MIPS_CPU_ISA_M64R1     0x00000040
 #define MIPS_CPU_ISA_M64R2     0x00000080
+#define MIPS_CPU_ISA_M32R6     0x00000100
+#define MIPS_CPU_ISA_M64R6     0x00000200
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
-       MIPS_CPU_ISA_M32R2)
+       MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
-       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+       MIPS_CPU_ISA_M64R6)
 
 /*
  * CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_RIXIEX                0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
 #define MIPS_CPU_MAAR          0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_FRE           0x800000000ull /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB                0x1000000000ull /* LLADDR/LLB writes are allowed */
 
 /*
  * CPU ASE encodings
index ae6fedcb0060f22c69091480f55228dd2ef4383d..94105d3f58f4882849643cfcb8668857198f2117 100644 (file)
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
                "       sc      %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
-               : GCC_OFF12_ASM() (*virt_addr));
+               : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+               : GCC_OFF_SMALL_ASM() (*virt_addr));
 
                virt_addr++;
        }
index eb4d95de619c5dca7543ed551267e43799f4ca11..535f196ffe02da7ad769ab0b7053b4dd5253dd82 100644 (file)
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
 struct arch_elf_state {
        int fp_abi;
        int interp_fp_abi;
-       int overall_abi;
+       int overall_fp_mode;
 };
 
+#define MIPS_ABI_FP_UNKNOWN    (-1)    /* Unknown FP ABI (kernel internal) */
+
 #define INIT_ARCH_ELF_STATE {                  \
-       .fp_abi = -1,                           \
-       .interp_fp_abi = -1,                    \
-       .overall_abi = -1,                      \
+       .fp_abi = MIPS_ABI_FP_UNKNOWN,          \
+       .interp_fp_abi = MIPS_ABI_FP_UNKNOWN,   \
+       .overall_fp_mode = -1,                  \
 }
 
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
index affebb78f5d6573dbf97f62630ad1e6a35026602..dd083e999b08a14ffdbef46d5f5f4a0731e9f18e 100644 (file)
@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
                goto fr_common;
 
        case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+      || defined(CONFIG_64BIT))
                /* we only have a 32-bit FPU */
                return SIGFPE;
 #endif
index ef9987a61d88c62e79e38e3c406a3bbcc42e1a23..1de190bdfb9c9fef90b8976503242e09b97e7f60 100644 (file)
                "       "__UA_ADDR "\t2b, 4b                    \n"     \
                "       .previous                               \n"     \
                : "=r" (ret), "=&r" (oldval),                           \
-                 "=" GCC_OFF12_ASM() (*uaddr)                          \
-               : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),      \
+                 "=" GCC_OFF_SMALL_ASM() (*uaddr)                              \
+               : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),  \
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else if (cpu_has_llsc) {                                      \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "1:     "user_ll("%1", "%4")" # __futex_atomic_op\n"    \
                "       .set    mips0                           \n"     \
                "       " insn  "                               \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "2:     "user_sc("$1", "%2")"                   \n"     \
                "       beqz    $1, 1b                          \n"     \
                __WEAK_LLSC_MB                                          \
@@ -74,8 +74,8 @@
                "       "__UA_ADDR "\t2b, 4b                    \n"     \
                "       .previous                               \n"     \
                : "=r" (ret), "=&r" (oldval),                           \
-                 "=" GCC_OFF12_ASM() (*uaddr)                          \
-               : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),      \
+                 "=" GCC_OFF_SMALL_ASM() (*uaddr)                              \
+               : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),  \
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else                                                          \
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-               : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+               : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+               : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:     "user_ll("%1", "%3")"                           \n"
                "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
                "       move    $1, %z5                                 \n"
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "2:     "user_sc("$1", "%2")"                           \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-               : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+               : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+               : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
        } else
index 4be1a57cdbb055915c862cff1fb55966b8214726..71a986e9b694d672823a87c17260f8fd39882e24 100644 (file)
@@ -25,8 +25,6 @@ struct gio_driver {
 
        int  (*probe)(struct gio_device *, const struct gio_device_id *);
        void (*remove)(struct gio_device *);
-       int  (*suspend)(struct gio_device *, pm_message_t);
-       int  (*resume)(struct gio_device *);
        void (*shutdown)(struct gio_device *);
 
        struct device_driver driver;
index e3ee92d4dbe750c7aa05a5488f7443cdd64fb387..4087b47ad1cbea16050e968a4daf9f0531b4aa6d 100644 (file)
@@ -11,6 +11,7 @@
 #define _ASM_HAZARDS_H
 
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 
 #define ___ssnop                                                       \
        sll     $0, $0, 1
@@ -21,7 +22,7 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do {                                                                  \
        unsigned long tmp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "       .set    mips64r2                                \n"     \
+       "       .set "MIPS_ISA_LEVEL"                           \n"     \
        "       dla     %0, 1f                                  \n"     \
        "       jr.hb   %0                                      \n"     \
        "       .set    mips0                                   \n"     \
@@ -132,7 +133,7 @@ do {                                                                        \
 
 #define instruction_hazard()                                           \
 do {                                                                   \
-       if (cpu_has_mips_r2)                                            \
+       if (cpu_has_mips_r2_r6)                                         \
                __instruction_hazard();                                 \
 } while (0)
 
@@ -240,7 +241,7 @@ do {                                                                        \
 
 #define __disable_fpu_hazard
 
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 #define __enable_fpu_hazard                                            \
        ___ehb
index 0fa5fdcd1f01f273da67b1530aa67ff5ee1646c6..d60cc68fa31e4f908685dd9c9f332e8df3d36242 100644 (file)
 
 #include <linux/compiler.h>
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 #include <asm/hazards.h>
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
 
 static inline void arch_local_irq_disable(void)
 {
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 static inline void arch_local_irq_enable(void)
 {
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#if   defined(CONFIG_CPU_MIPSR2)
+#if   defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        "       ei                                                      \n"
 #else
        "       mfc0    $1,$12                                          \n"
index 46dfc3c1fd49777a41b3158c77b1fc5c49955087..8feaed62a2abab216da39e8dd99786f2340d3f60 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/bitops.h>
 #include <linux/atomic.h>
 #include <asm/cmpxchg.h>
+#include <asm/compiler.h>
 #include <asm/war.h>
 
 typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:"    __LL    "%1, %2         # local_add_return      \n"
                "       addu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:"    __LL    "%1, %2         # local_sub_return      \n"
                "       subu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
index 1668ee57acb90b82e679b50b4d687940d0a9f39f..cf92fe7339952b43f0a8585bc9a51d351fca0f50 100644 (file)
@@ -8,11 +8,10 @@
 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 
-
-#define CP0_CYCLE_COUNTER $9, 6
 #define CP0_CVMCTL_REG $9, 7
 #define CP0_CVMMEMCTL_REG $11,7
 #define CP0_PRID_REG $15, 0
+#define CP0_DCACHE_ERR_REG $27, 1
 #define CP0_PRID_OCTEON_PASS1 0x000d0000
 #define CP0_PRID_OCTEON_CN30XX 0x000d0200
 
        # Needed for octeon specific memcpy
        or  v0, v0, 0x5001
        xor v0, v0, 0x1001
-       # Read the processor ID register
-       mfc0 v1, CP0_PRID_REG
-       # Disable instruction prefetching (Octeon Pass1 errata)
-       or  v0, v0, 0x2000
-       # Skip reenable of prefetching for Octeon Pass1
-       beq v1, CP0_PRID_OCTEON_PASS1, skip
-       nop
-       # Reenable instruction prefetching, not on Pass1
-       xor v0, v0, 0x2000
-       # Strip off pass number off of processor id
-       srl v1, 8
-       sll v1, 8
-       # CN30XX needs some extra stuff turned off for better performance
-       bne v1, CP0_PRID_OCTEON_CN30XX, skip
-       nop
-       # CN30XX Use random Icache replacement
-       or  v0, v0, 0x400
-       # CN30XX Disable instruction prefetching
-       or  v0, v0, 0x2000
-skip:
        # First clear off CvmCtl[IPPCI] bit and move the performance
        # counters interrupt to IRQ 6
-       li      v1, ~(7 << 7)
+       dli     v1, ~(7 << 7)
        and     v0, v0, v1
        ori     v0, v0, (6 << 7)
+
+       mfc0    v1, CP0_PRID_REG
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9000          # 63-P1
+       beqz    t1, 4f
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9008          # 63-P2
+       beqz    t1, 4f
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9100          # 68-P1
+       beqz    t1, 4f
+       and     t1, v1, 0xff00
+       xor     t1, t1, 0x9200          # 66-PX
+       bnez    t1, 5f                  # Skip WAR for others.
+       and     t1, v1, 0x00ff
+       slti    t1, t1, 2               # 66-P1.2 and later good.
+       beqz    t1, 5f
+
+4:     # core-16057 work around
+       or      v0, v0, 0x2000          # Set IPREF bit.
+
+5:     # No core-16057 work around
        # Write the cavium control register
        dmtc0   v0, CP0_CVMCTL_REG
        sync
        # Flush dcache after config change
        cache   9, 0($0)
+       # Zero all of CVMSEG to make sure parity is correct
+       dli     v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+       dsll    v0, 7
+       beqz    v0, 2f
+1:     dsubu   v0, 8
+       sd      $0, -32768(v0)
+       bnez    v0, 1b
+2:
+       mfc0    v0, CP0_PRID_REG
+       bbit0   v0, 15, 1f
+       # OCTEON II or better have bit 15 set.  Clear the error bits.
+       and     t1, v0, 0xff00
+       dli     v0, 0x9500
+       bge     t1, v0, 1f  # OCTEON III has no DCACHE_ERR_REG COP0
+       dli     v0, 0x27
+       dmtc0   v0, CP0_DCACHE_ERR_REG
+1:
        # Get my core id
        rdhwr   v0, $0
        # Jump the master to kernel_entry
index eb72b35cf04b5bf4f45dc6668b763b05fdce0bdf..35c80be92207beef97ebc536b31ae93d4f0a133a 100644 (file)
@@ -22,4 +22,7 @@
 #define R10000_LLSC_WAR                        0
 #define MIPS34K_MISSED_ITLB_WAR                0
 
+#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR      \
+       OCTEON_IS_MODEL(OCTEON_CN6XXX)
+
 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
index 2e54b4bff5cf59e744b9cb3a83e44bca747a9136..90dbe43c8d272d2cc5a95d1e4a2fbf20a4f1d4a6 100644 (file)
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (~mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
        "       .set    arch=r4000                      \n"     \
        "1:     ll      %0, %1  #custom_read_reg32      \n"     \
        "       .set    pop                             \n"     \
-       : "=r" (tmp), "=" GCC_OFF12_ASM() (*address)            \
-       : GCC_OFF12_ASM() (*address))
+       : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)                \
+       : GCC_OFF_SMALL_ASM() (*address))
 
 #define custom_write_reg32(address, tmp)                       \
        __asm__ __volatile__(                                   \
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
        "       "__beqz"%0, 1b                          \n"     \
        "       nop                                     \n"     \
        "       .set    pop                             \n"     \
-       : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address)           \
-       : "0" (tmp), GCC_OFF12_ASM() (*address))
+       : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)               \
+       : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
 
 #endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644 (file)
index 0000000..60570f2
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+       u64 movs;
+       u64 hilo;
+       u64 muls;
+       u64 divs;
+       u64 dsps;
+       u64 bops;
+       u64 traps;
+       u64 fpus;
+       u64 loads;
+       u64 stores;
+       u64 llsc;
+       u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+       u64 jrs;
+       u64 bltzl;
+       u64 bgezl;
+       u64 bltzll;
+       u64 bgezll;
+       u64 bltzall;
+       u64 bgezall;
+       u64 bltzal;
+       u64 bgezal;
+       u64 beql;
+       u64 bnel;
+       u64 blezl;
+       u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M)                                               \
+do {                                                                   \
+       u32 nir;                                                        \
+       int err;                                                        \
+                                                                       \
+       preempt_disable();                                              \
+       __this_cpu_inc(mipsr2emustats.M);                               \
+       err = __get_user(nir, (u32 __user *)regs->cp0_epc);             \
+       if (!err) {                                                     \
+               if (nir == BREAK_MATH)                                  \
+                       __this_cpu_inc(mipsr2bdemustats.M);             \
+       }                                                               \
+       preempt_enable();                                               \
+} while (0)
+
+#define MIPS_R2BR_STATS(M)                                     \
+do {                                                           \
+       preempt_disable();                                      \
+       __this_cpu_inc(mipsr2bremustats.M);                     \
+       preempt_enable();                                       \
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M)          do { } while (0)
+#define MIPS_R2BR_STATS(M)        do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+       u32     mask;
+       u32     code;
+       int     (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+                         const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU       (cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
index 5b720d8c2745b2e8b891f38c5256db82a1232bc5..fef004434096596ebb54ecf8834dc04e9f3fd4c4 100644 (file)
 #define MIPS_CONF5_NF          (_ULCAST_(1) << 0)
 #define MIPS_CONF5_UFR         (_ULCAST_(1) << 2)
 #define MIPS_CONF5_MRP         (_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB         (_ULCAST_(1) << 4)
 #define MIPS_CONF5_MVH         (_ULCAST_(1) << 5)
 #define MIPS_CONF5_FRE         (_ULCAST_(1) << 8)
 #define MIPS_CONF5_UFE         (_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do {                                                                      \
 #define write_c0_config6(val)  __write_32bit_c0_register($16, 6, val)
 #define write_c0_config7(val)  __write_32bit_c0_register($16, 7, val)
 
+#define read_c0_lladdr()       __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val)   __write_ulong_c0_register($17, 0, val)
 #define read_c0_maar()         __read_ulong_c0_register($17, 1)
 #define write_c0_maar(val)     __write_ulong_c0_register($17, 1, val)
 #define read_c0_maari()                __read_32bit_c0_register($17, 2)
@@ -1909,6 +1912,7 @@ __BUILD_SET_C0(config5)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
+__BUILD_SET_C0(pagegrain)
 __BUILD_SET_C0(brcm_config_0)
 __BUILD_SET_C0(brcm_bus_pll)
 __BUILD_SET_C0(brcm_reset)
index c436138945a84dca9f1f311e54a801090c396853..1afa1f986df8c42a06e5f34355de8f8f1325c4bf 100644 (file)
@@ -1,9 +1,12 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#include <linux/atomic.h>
+
 typedef struct {
        unsigned long asid[NR_CPUS];
        void *vdso;
+       atomic_t fp_mode_switching;
 } mm_context_t;
 
 #endif /* __ASM_MMU_H */
index 2f82568a3ee4cf2caa9e55f3e4b1d2e25eb26090..45914b59824c11a14a9ec76e6fe016dccc3eaaaf 100644 (file)
@@ -25,7 +25,6 @@ do {                                                                  \
        if (cpu_has_htw) {                                              \
                write_c0_pwbase(pgd);                                   \
                back_to_back_c0_hazard();                               \
-               htw_reset();                                            \
        }                                                               \
 } while (0)
 
@@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
+       atomic_set(&mm->context.fp_mode_switching, 0);
+
        return 0;
 }
 
@@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        unsigned long flags;
        local_irq_save(flags);
 
+       htw_stop();
        /* Check if our ASID is of an older version and thus invalid */
        if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
                get_new_mmu_context(next, cpu);
@@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         */
        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        cpumask_set_cpu(cpu, mm_cpumask(next));
+       htw_start();
 
        local_irq_restore(flags);
 }
@@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
 
        local_irq_save(flags);
 
+       htw_stop();
        /* Unconditionally get a new ASID.  */
        get_new_mmu_context(next, cpu);
 
@@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
        /* mark mmu ownership change */
        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        cpumask_set_cpu(cpu, mm_cpumask(next));
+       htw_start();
 
        local_irq_restore(flags);
 }
@@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
        unsigned long flags;
 
        local_irq_save(flags);
+       htw_stop();
 
        if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
                get_new_mmu_context(mm, cpu);
@@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
                /* will get a new context next time */
                cpu_context(cpu, mm) = 0;
        }
+       htw_start();
        local_irq_restore(flags);
 }
 
index 800fe578dc99a5312aa78445a8420dc9c466b811..0aaf9a01ea505bad4754b6d56f7eda8f19b9de6f 100644 (file)
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
 #define MODULE_PROC_FAMILY "MIPS32_R1 "
 #elif defined CONFIG_CPU_MIPS32_R2
 #define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
 #elif defined CONFIG_CPU_MIPS64_R1
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #elif defined CONFIG_CPU_MIPS64_R2
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
 #elif defined CONFIG_CPU_R3000
 #define MODULE_PROC_FAMILY "R3000 "
 #elif defined CONFIG_CPU_TX39XX
index 75739c83f07e74bb26ab5dbc0fd32c34401c838b..8d05d90698238e4deb6bc0b649a7929e3d0e2b76 100644 (file)
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
                " lbu   %[ticket], %[now_serving]\n"
                "4:\n"
                ".set pop\n" :
-               [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+               [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
                [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
                [my_ticket] "=r"(my_ticket)
            );
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644 (file)
index 0000000..0c9c3e7
--- /dev/null
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2014 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RST_DEFS_H__
+#define __CVMX_RST_DEFS_H__
+
+#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
+#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
+#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
+#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
+#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
+#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
+#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
+#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
+#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
+#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
+#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
+#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
+
+union cvmx_rst_boot {
+       uint64_t u64;
+       struct cvmx_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t chipkill:1;
+               uint64_t jtcsrdis:1;
+               uint64_t ejtagdis:1;
+               uint64_t romen:1;
+               uint64_t ckill_ppdis:1;
+               uint64_t jt_tstmode:1;
+               uint64_t vrm_err:1;
+               uint64_t reserved_37_56:20;
+               uint64_t c_mul:7;
+               uint64_t pnr_mul:6;
+               uint64_t reserved_21_23:3;
+               uint64_t lboot_oci:3;
+               uint64_t lboot_ext:6;
+               uint64_t lboot:10;
+               uint64_t rboot:1;
+               uint64_t rboot_pin:1;
+#else
+               uint64_t rboot_pin:1;
+               uint64_t rboot:1;
+               uint64_t lboot:10;
+               uint64_t lboot_ext:6;
+               uint64_t lboot_oci:3;
+               uint64_t reserved_21_23:3;
+               uint64_t pnr_mul:6;
+               uint64_t c_mul:7;
+               uint64_t reserved_37_56:20;
+               uint64_t vrm_err:1;
+               uint64_t jt_tstmode:1;
+               uint64_t ckill_ppdis:1;
+               uint64_t romen:1;
+               uint64_t ejtagdis:1;
+               uint64_t jtcsrdis:1;
+               uint64_t chipkill:1;
+#endif
+       } s;
+       struct cvmx_rst_boot_s cn70xx;
+       struct cvmx_rst_boot_s cn70xxp1;
+       struct cvmx_rst_boot_s cn78xx;
+};
+
+union cvmx_rst_cfg {
+       uint64_t u64;
+       struct cvmx_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t bist_delay:58;
+               uint64_t reserved_3_5:3;
+               uint64_t cntl_clr_bist:1;
+               uint64_t warm_clr_bist:1;
+               uint64_t soft_clr_bist:1;
+#else
+               uint64_t soft_clr_bist:1;
+               uint64_t warm_clr_bist:1;
+               uint64_t cntl_clr_bist:1;
+               uint64_t reserved_3_5:3;
+               uint64_t bist_delay:58;
+#endif
+       } s;
+       struct cvmx_rst_cfg_s cn70xx;
+       struct cvmx_rst_cfg_s cn70xxp1;
+       struct cvmx_rst_cfg_s cn78xx;
+};
+
+union cvmx_rst_ckill {
+       uint64_t u64;
+       struct cvmx_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_47_63:17;
+               uint64_t timer:47;
+#else
+               uint64_t timer:47;
+               uint64_t reserved_47_63:17;
+#endif
+       } s;
+       struct cvmx_rst_ckill_s cn70xx;
+       struct cvmx_rst_ckill_s cn70xxp1;
+       struct cvmx_rst_ckill_s cn78xx;
+};
+
+union cvmx_rst_ctlx {
+       uint64_t u64;
+       struct cvmx_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_10_63:54;
+               uint64_t prst_link:1;
+               uint64_t rst_done:1;
+               uint64_t rst_link:1;
+               uint64_t host_mode:1;
+               uint64_t reserved_4_5:2;
+               uint64_t rst_drv:1;
+               uint64_t rst_rcv:1;
+               uint64_t rst_chip:1;
+               uint64_t rst_val:1;
+#else
+               uint64_t rst_val:1;
+               uint64_t rst_chip:1;
+               uint64_t rst_rcv:1;
+               uint64_t rst_drv:1;
+               uint64_t reserved_4_5:2;
+               uint64_t host_mode:1;
+               uint64_t rst_link:1;
+               uint64_t rst_done:1;
+               uint64_t prst_link:1;
+               uint64_t reserved_10_63:54;
+#endif
+       } s;
+       struct cvmx_rst_ctlx_s cn70xx;
+       struct cvmx_rst_ctlx_s cn70xxp1;
+       struct cvmx_rst_ctlx_s cn78xx;
+};
+
+union cvmx_rst_delay {
+       uint64_t u64;
+       struct cvmx_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_32_63:32;
+               uint64_t warm_rst_dly:16;
+               uint64_t soft_rst_dly:16;
+#else
+               uint64_t soft_rst_dly:16;
+               uint64_t warm_rst_dly:16;
+               uint64_t reserved_32_63:32;
+#endif
+       } s;
+       struct cvmx_rst_delay_s cn70xx;
+       struct cvmx_rst_delay_s cn70xxp1;
+       struct cvmx_rst_delay_s cn78xx;
+};
+
+union cvmx_rst_eco {
+       uint64_t u64;
+       struct cvmx_rst_eco_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_32_63:32;
+               uint64_t eco_rw:32;
+#else
+               uint64_t eco_rw:32;
+               uint64_t reserved_32_63:32;
+#endif
+       } s;
+       struct cvmx_rst_eco_s cn78xx;
+};
+
+union cvmx_rst_int {
+       uint64_t u64;
+       struct cvmx_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_12_63:52;
+               uint64_t perst:4;
+               uint64_t reserved_4_7:4;
+               uint64_t rst_link:4;
+#else
+               uint64_t rst_link:4;
+               uint64_t reserved_4_7:4;
+               uint64_t perst:4;
+               uint64_t reserved_12_63:52;
+#endif
+       } s;
+       struct cvmx_rst_int_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_11_63:53;
+               uint64_t perst:3;
+               uint64_t reserved_3_7:5;
+               uint64_t rst_link:3;
+#else
+               uint64_t rst_link:3;
+               uint64_t reserved_3_7:5;
+               uint64_t perst:3;
+               uint64_t reserved_11_63:53;
+#endif
+       } cn70xx;
+       struct cvmx_rst_int_cn70xx cn70xxp1;
+       struct cvmx_rst_int_s cn78xx;
+};
+
+union cvmx_rst_ocx {
+       uint64_t u64;
+       struct cvmx_rst_ocx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_3_63:61;
+               uint64_t rst_link:3;
+#else
+               uint64_t rst_link:3;
+               uint64_t reserved_3_63:61;
+#endif
+       } s;
+       struct cvmx_rst_ocx_s cn78xx;
+};
+
+union cvmx_rst_power_dbg {
+       uint64_t u64;
+       struct cvmx_rst_power_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_3_63:61;
+               uint64_t str:3;
+#else
+               uint64_t str:3;
+               uint64_t reserved_3_63:61;
+#endif
+       } s;
+       struct cvmx_rst_power_dbg_s cn78xx;
+};
+
+union cvmx_rst_pp_power {
+       uint64_t u64;
+       struct cvmx_rst_pp_power_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_48_63:16;
+               uint64_t gate:48;
+#else
+               uint64_t gate:48;
+               uint64_t reserved_48_63:16;
+#endif
+       } s;
+       struct cvmx_rst_pp_power_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_4_63:60;
+               uint64_t gate:4;
+#else
+               uint64_t gate:4;
+               uint64_t reserved_4_63:60;
+#endif
+       } cn70xx;
+       struct cvmx_rst_pp_power_cn70xx cn70xxp1;
+       struct cvmx_rst_pp_power_s cn78xx;
+};
+
+union cvmx_rst_soft_prstx {
+       uint64_t u64;
+       struct cvmx_rst_soft_prstx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_1_63:63;
+               uint64_t soft_prst:1;
+#else
+               uint64_t soft_prst:1;
+               uint64_t reserved_1_63:63;
+#endif
+       } s;
+       struct cvmx_rst_soft_prstx_s cn70xx;
+       struct cvmx_rst_soft_prstx_s cn70xxp1;
+       struct cvmx_rst_soft_prstx_s cn78xx;
+};
+
+union cvmx_rst_soft_rst {
+       uint64_t u64;
+       struct cvmx_rst_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_1_63:63;
+               uint64_t soft_rst:1;
+#else
+               uint64_t soft_rst:1;
+               uint64_t reserved_1_63:63;
+#endif
+       } s;
+       struct cvmx_rst_soft_rst_s cn70xx;
+       struct cvmx_rst_soft_rst_s cn70xxp1;
+       struct cvmx_rst_soft_rst_s cn78xx;
+};
+
+#endif
index e8a1c2fd52cdd8f3b65ffe68bf3eefa3628fbaa2..92b377e36dac260f9100d9178fe2252bed85e061 100644 (file)
@@ -45,6 +45,7 @@
  */
 
 #define OCTEON_FAMILY_MASK     0x00ffff00
+#define OCTEON_PRID_MASK       0x00ffffff
 
 /* Flag bits in top byte */
 /* Ignores revision in model checks */
 #define OM_MATCH_6XXX_FAMILY_MODELS    0x40000000
 /* Match all cnf7XXX Octeon models. */
 #define OM_MATCH_F7XXX_FAMILY_MODELS   0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS     0x10000000
+#define OM_MATCH_FAMILY_MODELS         (OM_MATCH_5XXX_FAMILY_MODELS |  \
+                                        OM_MATCH_6XXX_FAMILY_MODELS |  \
+                                        OM_MATCH_F7XXX_FAMILY_MODELS | \
+                                        OM_MATCH_7XXX_FAMILY_MODELS)
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CN73XX_PASS1_0  0x000d9700
+#define OCTEON_CN73XX          (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X  (OCTEON_CN73XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN70XX_PASS1_0  0x000d9600
+#define OCTEON_CN70XX_PASS1_1  0x000d9601
+#define OCTEON_CN70XX_PASS1_2  0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0  0x000d9608
+
+#define OCTEON_CN70XX          (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X  (OCTEON_CN70XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X  (OCTEON_CN70XX_PASS2_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX          OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0  0x000d9500
+#define OCTEON_CN78XX_PASS1_1  0x000d9501
+#define OCTEON_CN78XX_PASS2_0  0x000d9508
+
+#define OCTEON_CN78XX          (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X  (OCTEON_CN78XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X  (OCTEON_CN78XX_PASS2_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX          (0x000d9540 | OM_CHECK_SUBMODEL)
 
 /*
  * CNF7XXX models with new revision encoding
  */
 #define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_1  0x000d9401
 
 #define OCTEON_CNF71XX         (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN68XX_PASS1_1  0x000d9101
 #define OCTEON_CN68XX_PASS1_2  0x000d9102
 #define OCTEON_CN68XX_PASS2_0  0x000d9108
+#define OCTEON_CN68XX_PASS2_1   0x000d9109
+#define OCTEON_CN68XX_PASS2_2   0x000d910a
 
 #define OCTEON_CN68XX          (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN68XX_PASS1_X  (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS1_X  (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS2_X  (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX           OCTEON_CN63XX
+
 #define OCTEON_CN61XX_PASS1_0  0x000d9300
+#define OCTEON_CN61XX_PASS1_1   0x000d9301
 
 #define OCTEON_CN61XX          (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN61XX_PASS1_X  (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX           OCTEON_CN61XX
+
 /*
  * CN5XXX models with new revision encoding
  */
 #define OCTEON_CN58XX_PASS2_2  0x000d030a
 #define OCTEON_CN58XX_PASS2_3  0x000d030b
 
-#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN58XX_PASS1_X  (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS2_X  (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS1    OCTEON_CN58XX_PASS1_X
 #define OCTEON_CN3XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
 #define OCTEON_CN5XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
 #define OCTEON_CN6XXX          (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
-
-/* These are used to cover entire families of OCTEON processors */
-#define OCTEON_FAM_1           (OCTEON_CN3XXX)
-#define OCTEON_FAM_PLUS                (OCTEON_CN5XXX)
-#define OCTEON_FAM_1_PLUS      (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
-#define OCTEON_FAM_2           (OCTEON_CN6XXX)
+#define OCTEON_CNF7XXX         (OCTEON_CNF71XX_PASS1_0 | \
+                                OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX          (OCTEON_CN78XX_PASS1_0 | \
+                                OM_MATCH_7XXX_FAMILY_MODELS)
 
 /* The revision byte (low byte) has two different encodings.
  * CN3XXX:
  *     <4>:   alternate package
  *     <3:0>: revision
  *
- * CN5XXX:
+ * CN5XXX and older models:
  *
  *     bits
  *     <7>:   reserved (0)
 /* CN5XXX and later use different layout of bits in the revision ID field */
 #define OCTEON_58XX_FAMILY_MASK             OCTEON_38XX_FAMILY_MASK
 #define OCTEON_58XX_FAMILY_REV_MASK  0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK      0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK      0x00ffff40
 #define OCTEON_58XX_MODEL_REV_MASK   (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
-#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
 #define OCTEON_5XXX_MODEL_MASK      0x00ff0fc0
 
-/* forward declarations */
 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
 static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
 
 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
 
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
 /* NOTE: This for internal use only! */
 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)             \
 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0)  && ( \
@@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
                ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
                        && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
                ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL)  \
-                       && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
+                       && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
                ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
-                       && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
                ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
-                       && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) ||  \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+               ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+               ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
                ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
                        && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
                )))
@@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
 {
        uint32_t cpuid = cvmx_get_proc_id();
 
-       /*
-        * Check for special case of mismarked 3005 samples. We only
-        * need to check if the sub model isn't being ignored
-        */
-       if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
-               if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
-                       cpuid |= 0x10;
-       }
        return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
 }
 
@@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
 #define OCTEON_IS_COMMON_BINARY() 1
 #undef OCTEON_MODEL
 
+#define OCTEON_IS_OCTEON1()    OCTEON_IS_MODEL(OCTEON_CN3XXX)
+#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX)
+#define OCTEON_IS_OCTEON2()                                            \
+       (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3()    OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+#define OCTEON_IS_OCTEON1PLUS()        (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
+
 const char *__init octeon_model_get_string(uint32_t chip_id);
 
 /*
  * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
  */
 static inline uint32_t cvmx_get_octeon_family(void)
 {
index 6dfefd2d5cdfd7a39c0679da021a7338c298e490..0415965708565c7ddf8d5fe3451f67e907ce9a9c 100644 (file)
@@ -9,6 +9,7 @@
 #define __ASM_OCTEON_OCTEON_H
 
 #include <asm/octeon/cvmx.h>
+#include <asm/bitfield.h>
 
 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
                                                uint64_t alignment,
@@ -53,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long);
 #define OCTOEN_SERIAL_LEN      20
 
 struct octeon_boot_descriptor {
+#ifdef __BIG_ENDIAN_BITFIELD
        /* Start of block referenced by assembly code - do not change! */
        uint32_t desc_version;
        uint32_t desc_size;
@@ -104,77 +106,149 @@ struct octeon_boot_descriptor {
        uint8_t mac_addr_base[6];
        uint8_t mac_addr_count;
        uint64_t cvmx_desc_vaddr;
+#else
+       uint32_t desc_size;
+       uint32_t desc_version;
+       uint64_t stack_top;
+       uint64_t heap_base;
+       uint64_t heap_end;
+       /* Only used by bootloader */
+       uint64_t entry_point;
+       uint64_t desc_vaddr;
+       /* End of This block referenced by assembly code - do not change! */
+       uint32_t stack_size;
+       uint32_t exception_base_addr;
+       uint32_t argc;
+       uint32_t heap_size;
+       /*
+        * Argc count for application.
+        * Warning low bit scrambled in little-endian.
+        */
+       uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define  BOOT_FLAG_INIT_CORE           (1 << 0)
+#define  OCTEON_BL_FLAG_DEBUG          (1 << 1)
+#define  OCTEON_BL_FLAG_NO_MAGIC       (1 << 2)
+       /* If set, use uart1 for console */
+#define  OCTEON_BL_FLAG_CONSOLE_UART1  (1 << 3)
+       /* If set, use PCI console */
+#define  OCTEON_BL_FLAG_CONSOLE_PCI    (1 << 4)
+       /* Call exit on break on serial port */
+#define  OCTEON_BL_FLAG_BREAK          (1 << 5)
+
+       uint32_t core_mask;
+       uint32_t flags;
+       /* physical address of free memory descriptor block. */
+       uint32_t phy_mem_desc_addr;
+       /* DRAM size in megabyes. */
+       uint32_t dram_size;
+       /* CPU clock speed, in hz. */
+       uint32_t eclock_hz;
+       /* used to pass flags from app to debugger. */
+       uint32_t debugger_flags_base_addr;
+       /* SPI4 clock in hz. */
+       uint32_t spi_clock_hz;
+       /* DRAM clock speed, in hz. */
+       uint32_t dclock_hz;
+       uint8_t chip_rev_minor;
+       uint8_t chip_rev_major;
+       uint16_t chip_type;
+       uint8_t board_rev_minor;
+       uint8_t board_rev_major;
+       uint16_t board_type;
+
+       uint64_t unused1[4]; /* Not even filled in by bootloader. */
+
+       uint64_t cvmx_desc_vaddr;
+#endif
 };
 
 union octeon_cvmemctl {
        uint64_t u64;
        struct {
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t tlbbist:1;
+               __BITFIELD_FIELD(uint64_t tlbbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t l1cbist:1;
+               __BITFIELD_FIELD(uint64_t l1cbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t l1dbist:1;
+               __BITFIELD_FIELD(uint64_t l1dbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t dcmbist:1;
+               __BITFIELD_FIELD(uint64_t dcmbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t ptgbist:1;
+               __BITFIELD_FIELD(uint64_t ptgbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t wbfbist:1;
+               __BITFIELD_FIELD(uint64_t wbfbist:1,
                /* Reserved */
-               uint64_t reserved:22;
+               __BITFIELD_FIELD(uint64_t reserved:17,
+               /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
+                * This field selects between the TLB replacement policies:
+                * bitmask LRU or NLU. Bitmask LRU maintains a mask of
+                * recently used TLB entries and avoids them as new entries
+                * are allocated. NLU simply guarantees that the next
+                * allocation is not the last used TLB entry. */
+               __BITFIELD_FIELD(uint64_t tlbnlu:1,
+               /* OCTEON II - Selects the bit in the counter used for
+                * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
+                * cycles. If not already released, the cnMIPS II core will
+                * always release a given PAUSE instruction within
+                * 2(8+PAUSETIME). If the counter trip happens to line up,
+                * the cnMIPS II core may release the PAUSE instantly. */
+               __BITFIELD_FIELD(uint64_t pausetime:3,
+               /* OCTEON II - This field is an extension of
+                * CvmMemCtl[DIDTTO] */
+               __BITFIELD_FIELD(uint64_t didtto2:1,
                /* R/W If set, marked write-buffer entries time out
                 * the same as as other entries; if clear, marked
                 * write-buffer entries use the maximum timeout. */
-               uint64_t dismarkwblongto:1;
+               __BITFIELD_FIELD(uint64_t dismarkwblongto:1,
                /* R/W If set, a merged store does not clear the
                 * write-buffer entry timeout state. */
-               uint64_t dismrgclrwbto:1;
+               __BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
                /* R/W Two bits that are the MSBs of the resultant
                 * CVMSEG LM word location for an IOBDMA. The other 8
                 * bits come from the SCRADDR field of the IOBDMA. */
-               uint64_t iobdmascrmsb:2;
+               __BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
                /* R/W If set, SYNCWS and SYNCS only order marked
                 * stores; if clear, SYNCWS and SYNCS only order
                 * unmarked stores. SYNCWSMARKED has no effect when
                 * DISSYNCWS is set. */
-               uint64_t syncwsmarked:1;
+               __BITFIELD_FIELD(uint64_t syncwsmarked:1,
                /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
                 * SYNC. */
-               uint64_t dissyncws:1;
+               __BITFIELD_FIELD(uint64_t dissyncws:1,
                /* R/W If set, no stall happens on write buffer
                 * full. */
-               uint64_t diswbfst:1;
+               __BITFIELD_FIELD(uint64_t diswbfst:1,
                /* R/W If set (and SX set), supervisor-level
                 * loads/stores can use XKPHYS addresses with
                 * VA<48>==0 */
-               uint64_t xkmemenas:1;
+               __BITFIELD_FIELD(uint64_t xkmemenas:1,
                /* R/W If set (and UX set), user-level loads/stores
                 * can use XKPHYS addresses with VA<48>==0 */
-               uint64_t xkmemenau:1;
+               __BITFIELD_FIELD(uint64_t xkmemenau:1,
                /* R/W If set (and SX set), supervisor-level
                 * loads/stores can use XKPHYS addresses with
                 * VA<48>==1 */
-               uint64_t xkioenas:1;
+               __BITFIELD_FIELD(uint64_t xkioenas:1,
                /* R/W If set (and UX set), user-level loads/stores
                 * can use XKPHYS addresses with VA<48>==1 */
-               uint64_t xkioenau:1;
+               __BITFIELD_FIELD(uint64_t xkioenau:1,
                /* R/W If set, all stores act as SYNCW (NOMERGE must
                 * be set when this is set) RW, reset to 0. */
-               uint64_t allsyncw:1;
+               __BITFIELD_FIELD(uint64_t allsyncw:1,
                /* R/W If set, no stores merge, and all stores reach
                 * the coherent bus in order. */
-               uint64_t nomerge:1;
+               __BITFIELD_FIELD(uint64_t nomerge:1,
                /* R/W Selects the bit in the counter used for DID
                 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
                 * 214. Actual time-out is between 1x and 2x this
                 * interval. For example, with DIDTTO=3, expiration
                 * interval is between 16K and 32K. */
-               uint64_t didtto:2;
+               __BITFIELD_FIELD(uint64_t didtto:2,
                /* R/W If set, the (mem) CSR clock never turns off. */
-               uint64_t csrckalwys:1;
+               __BITFIELD_FIELD(uint64_t csrckalwys:1,
                /* R/W If set, mclk never turns off. */
-               uint64_t mclkalwys:1;
+               __BITFIELD_FIELD(uint64_t mclkalwys:1,
                /* R/W Selects the bit in the counter used for write
                 * buffer flush time-outs (WBFLT+11) is the bit
                 * position in an internal counter used to determine
@@ -182,25 +256,26 @@ union octeon_cvmemctl {
                 * 2x this interval. For example, with WBFLT = 0, a
                 * write buffer expires between 2K and 4K cycles after
                 * the write buffer entry is allocated. */
-               uint64_t wbfltime:3;
+               __BITFIELD_FIELD(uint64_t wbfltime:3,
                /* R/W If set, do not put Istream in the L2 cache. */
-               uint64_t istrnol2:1;
+               __BITFIELD_FIELD(uint64_t istrnol2:1,
                /* R/W The write buffer threshold. */
-               uint64_t wbthresh:4;
+               __BITFIELD_FIELD(uint64_t wbthresh:4,
                /* Reserved */
-               uint64_t reserved2:2;
+               __BITFIELD_FIELD(uint64_t reserved2:2,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * kernel/debug mode. */
-               uint64_t cvmsegenak:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenak:1,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * supervisor mode. */
-               uint64_t cvmsegenas:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenas:1,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * user mode. */
-               uint64_t cvmsegenau:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenau:1,
                /* R/W Size of local memory in cache blocks, 54 (6912
                 * bytes) is max legal value. */
-               uint64_t lmemsz:6;
+               __BITFIELD_FIELD(uint64_t lmemsz:6,
+               ;)))))))))))))))))))))))))))))))))
        } s;
 };
 
@@ -224,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
        cvmx_read64_uint32(address ^ 4);
 }
 
+/* Octeon multiplier save/restore routines from octeon_switch.S */
+void octeon_mult_save(void);
+void octeon_mult_restore(void);
+void octeon_mult_save_end(void);
+void octeon_mult_restore_end(void);
+void octeon_mult_save3(void);
+void octeon_mult_save3_end(void);
+void octeon_mult_save2(void);
+void octeon_mult_save2_end(void);
+void octeon_mult_restore3(void);
+void octeon_mult_restore3_end(void);
+void octeon_mult_restore2(void);
+void octeon_mult_restore2_end(void);
 
 /**
  * Read a 32bit value from the Octeon NPI register space
index 69529624a0050713b120ecfd3b430a601dfb567b..193b4c6b7541a774f3a0ccfbaa8bb7f7add55b83 100644 (file)
@@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 }
 #endif
 
+#ifdef CONFIG_PCI_DOMAINS
 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
 
 static inline int pci_proc_domain(struct pci_bus *bus)
@@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
        struct pci_controller *hose = bus->sysdata;
        return hose->need_domain_info;
 }
+#endif /* CONFIG_PCI_DOMAINS */
 
 #endif /* __KERNEL__ */
 
index fc807aa5ec8d7593ef8bda4c13e184ae31d79a70..91747c282bb3fb3f1f6560cb5a722089aac9741e 100644 (file)
@@ -35,7 +35,7 @@
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
 /*
- * The following bits are directly used by the TLB hardware
+ * The following bits are implemented by the TLB hardware
  */
 #define _PAGE_GLOBAL_SHIFT     0
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
 #define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
 #define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
-#define _PAGE_SILENT_READ      _PAGE_VALID
-#define _PAGE_SILENT_WRITE     _PAGE_DIRTY
-
 #define _PFN_SHIFT             (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 /*
- * The following are implemented by software
+ * The following bits are implemented in software
  */
-#define _PAGE_PRESENT_SHIFT    0
-#define _PAGE_PRESENT          (1 <<  _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT       1
-#define _PAGE_READ             (1 <<  _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT      2
-#define _PAGE_WRITE            (1 <<  _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT   3
-#define _PAGE_ACCESSED         (1 <<  _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT   4
-#define _PAGE_MODIFIED         (1 <<  _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT       (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ             (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT      (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT   (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED         (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
 /*
- * And these are the hardware TLB bits
+ * The following bits are implemented by the TLB hardware
  */
-#define _PAGE_GLOBAL_SHIFT     8
-#define _PAGE_GLOBAL           (1 <<  _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT      9
-#define _PAGE_VALID            (1 <<  _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ      (1 <<  _PAGE_VALID_SHIFT)       /* synonym  */
-#define _PAGE_DIRTY_SHIFT      10
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_MODIFIED_SHIFT + 4)
+#define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
+#define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE     (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT  11
+#define _CACHE_UNCACHED_SHIFT  (_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_UNCACHED                (1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK            (1 << _CACHE_UNCACHED_SHIFT)
+#define _CACHE_MASK            _CACHE_UNCACHED
 
-#else /* 'Normal' r4K case */
+#define _PFN_SHIFT             PAGE_SHIFT
+
+#else
 /*
  * When using the RI/XI bit support, we have 13 bits of flags below
  * the physical address. The RI/XI bits are placed such that a SRL 5
 
 /*
  * The following bits are implemented in software
- *
- * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
  */
-#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 #define _PAGE_READ_SHIFT       (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
 /* huge tlb page */
 #define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_HUGE             (1 << _PAGE_HUGE_SHIFT)
-#else
-#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
-#endif
-
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
 #else
+#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING                ({BUG(); 1; })  /* Dummy value */
 #endif
 
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
-/* synonym                */
-#define _PAGE_SILENT_READ      (_PAGE_VALID)
-
-/* The MIPS dirty bit     */
 #define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE     (_PAGE_DIRTY)
-
 #define _CACHE_SHIFT           (_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_MASK            (7 << _CACHE_SHIFT)
 
 
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
-#ifndef _PFN_SHIFT
-#define _PFN_SHIFT                 PAGE_SHIFT
-#endif
+#define _PAGE_SILENT_READ      _PAGE_VALID
+#define _PAGE_SILENT_WRITE     _PAGE_DIRTY
+
 #define _PFN_MASK              (~((1 << (_PFN_SHIFT)) - 1))
 
 #ifndef _PAGE_NO_READ
 #ifndef _PAGE_NO_EXEC
 #define _PAGE_NO_EXEC ({BUG(); 0; })
 #endif
-#ifndef _PAGE_GLOBAL_SHIFT
-#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
-#endif
 
 
 #ifndef __ASSEMBLY__
@@ -266,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 #endif
 
 #define __READABLE     (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
-#define __WRITEABLE    (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
+#define __WRITEABLE    (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
 
-#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED |      \
+                        _PFN_MASK | _CACHE_MASK)
 
 #endif /* _ASM_PGTABLE_BITS_H */
index 583ff42154794da86c5cc9b64ae9e81e4507e057..bef782c4a44bd33b20f24dda9422f401e2324acc 100644 (file)
@@ -99,29 +99,35 @@ extern void paging_init(void);
 
 #define htw_stop()                                                     \
 do {                                                                   \
-       if (cpu_has_htw)                                                \
-               write_c0_pwctl(read_c0_pwctl() &                        \
-                              ~(1 << MIPS_PWCTL_PWEN_SHIFT));          \
+       unsigned long flags;                                            \
+                                                                       \
+       if (cpu_has_htw) {                                              \
+               local_irq_save(flags);                                  \
+               if(!raw_current_cpu_data.htw_seq++) {                   \
+                       write_c0_pwctl(read_c0_pwctl() &                \
+                                      ~(1 << MIPS_PWCTL_PWEN_SHIFT));  \
+                       back_to_back_c0_hazard();                       \
+               }                                                       \
+               local_irq_restore(flags);                               \
+       }                                                               \
 } while(0)
 
 #define htw_start()                                                    \
 do {                                                                   \
-       if (cpu_has_htw)                                                \
-               write_c0_pwctl(read_c0_pwctl() |                        \
-                              (1 << MIPS_PWCTL_PWEN_SHIFT));           \
-} while(0)
-
-
-#define htw_reset()                                                    \
-do {                                                                   \
+       unsigned long flags;                                            \
+                                                                       \
        if (cpu_has_htw) {                                              \
-               htw_stop();                                             \
-               back_to_back_c0_hazard();                               \
-               htw_start();                                            \
-               back_to_back_c0_hazard();                               \
+               local_irq_save(flags);                                  \
+               if (!--raw_current_cpu_data.htw_seq) {                  \
+                       write_c0_pwctl(read_c0_pwctl() |                \
+                                      (1 << MIPS_PWCTL_PWEN_SHIFT));   \
+                       back_to_back_c0_hazard();                       \
+               }                                                       \
+               local_irq_restore(flags);                               \
        }                                                               \
 } while(0)
 
+
 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
        pte_t pteval);
 
@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
 {
        pte_t null = __pte(0);
 
+       htw_stop();
        /* Preserve global status for the pair */
        if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
                null.pte_low = null.pte_high = _PAGE_GLOBAL;
 
        set_pte_at(mm, addr, ptep, null);
-       htw_reset();
+       htw_start();
 }
 #else
 
@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+       htw_stop();
 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
        /* Preserve global status for the pair */
        if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
        else
 #endif
                set_pte_at(mm, addr, ptep, __pte(0));
-       htw_reset();
+       htw_start();
 }
 #endif
 
@@ -334,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
        return pte;
 }
 
-#ifdef _PAGE_HUGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 static inline int pte_huge(pte_t pte)  { return pte_val(pte) & _PAGE_HUGE; }
 
 static inline pte_t pte_mkhuge(pte_t pte)
@@ -342,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
        pte_val(pte) |= _PAGE_HUGE;
        return pte;
 }
-#endif /* _PAGE_HUGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 #endif
 static inline int pte_special(pte_t pte)       { return 0; }
 static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
index f1df4cb4a286dc8f76f1186ca9b2b1c439ce1764..b5dcbee01fd7a52641584cbbf8b80848f7c6f4b9 100644 (file)
@@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count;
 #define TASK_SIZE      0x7fff8000UL
 #endif
 
-#ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
-#endif
 
 #define TASK_IS_32BIT_ADDR 1
 
@@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count;
 #define TASK_SIZE32    0x7fff8000UL
 #define TASK_SIZE64    0x10000000000UL
 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
-
-#ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE64
-#endif
-
 
 #define TASK_SIZE_OF(tsk)                                              \
        (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
@@ -211,6 +205,8 @@ struct octeon_cop2_state {
        unsigned long   cop2_gfm_poly;
        /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
        unsigned long   cop2_gfm_result[2];
+       /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
+       unsigned long   cop2_sha3[2];
 };
 #define COP2_INIT                                              \
        .cp2                    = {0,},
@@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p);
 
 #endif
 
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern int mips_get_process_fp_mode(struct task_struct *task);
+extern int mips_set_process_fp_mode(struct task_struct *task,
+                                   unsigned int value);
+
+#define GET_FP_MODE(task)              mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value)                mips_set_process_fp_mode(task, value)
+
 #endif /* _ASM_PROCESSOR_H */
index eaa26270a5e574bae3db56202a22434c4c4b930f..8ebc2aa5f3e1331fd0c84f2ba54675a4128a06a2 100644 (file)
@@ -24,13 +24,6 @@ struct boot_param_header;
 extern void __dt_setup_arch(void *bph);
 extern int __dt_register_buses(const char *bus0, const char *bus1);
 
-#define dt_setup_arch(sym)                                             \
-({                                                                     \
-       extern char __dtb_##sym##_begin[];                              \
-                                                                       \
-       __dt_setup_arch(__dtb_##sym##_begin);                           \
-})
-
 #else /* CONFIG_OF */
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
index fc783f843bdc4272ccecfeddab877044a418703e..ffc320389f40a011ac6c66ef9c453fce23e38c34 100644 (file)
@@ -40,8 +40,8 @@ struct pt_regs {
        unsigned long cp0_cause;
        unsigned long cp0_epc;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-       unsigned long long mpl[3];        /* MTM{0,1,2} */
-       unsigned long long mtp[3];        /* MTP{0,1,2} */
+       unsigned long long mpl[6];        /* MTM{0-5} */
+       unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
 } __aligned(8);
 
index e293a8d89a6da590a3e46be5fccc60f1fc2c1983..1b22d2da88a1ec1b76e42dfde3cbff5cb5b422c2 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/asm.h>
 #include <asm/cacheops.h>
+#include <asm/compiler.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noreorder                               \n"     \
-       "       .set    arch=r4000                              \n"     \
+       "       .set "MIPS_ISA_ARCH_LEVEL"                      \n"     \
        "       cache   %0, %1                                  \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
-       "       .set    arch=r4000              \n"             \
+       "       .set "MIPS_ISA_ARCH_LEVEL"      \n"             \
        "1:     cache   %0, (%1)                \n"             \
        "2:     .set    pop                     \n"             \
        "       .section __ex_table,\"a\"       \n"             \
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
        cache_op(Page_Invalidate_T, addr);
 }
 
+#ifndef CONFIG_CPU_MIPSR6
 #define cache16_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
        "       .set push                                       \n"     \
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
                : "r" (base),                                           \
                  "i" (op));
 
+#else
+/*
+ * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
+ * This means we now need to increment the base register before we flush
+ * more cache lines
+ */
+#define cache16_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x010(%0)\n"     \
+       "       cache %1, 0x020(%0); cache %1, 0x030(%0)\n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x050(%0)\n"     \
+       "       cache %1, 0x060(%0); cache %1, 0x070(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x090(%0)\n"     \
+       "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"     \
+       "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"     \
+       "       addiu $1, $0, 0x100                     \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x010($1)\n"     \
+       "       cache %1, 0x020($1); cache %1, 0x030($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x050($1)\n"     \
+       "       cache %1, 0x060($1); cache %1, 0x070($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x090($1)\n"     \
+       "       cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"     \
+       "       cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache32_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x020(%0)\n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x060(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       addiu $1, $1, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       addiu $1, $1, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache64_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x040(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache128_unroll32(base,op)                             \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 /*
  * Perform the cache operation specified by op using a user mode virtual
  * address while in kernel mode.
index 753275accd1892142151169ed8ca722e41742d14..195db5045ae57fa972096417a41ba76b4048a4e5 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _ASM_SGIALIB_H
 #define _ASM_SGIALIB_H
 
+#include <linux/compiler.h>
 #include <asm/sgiarcs.h>
 
 extern struct linux_romvec *romvec;
@@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 
 /* Misc. routines. */
-extern VOID ArcReboot(VOID) __attribute__((noreturn));
-extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn));
+extern VOID ArcHalt(VOID) __noreturn;
+extern VOID ArcPowerDown(VOID) __noreturn;
+extern VOID ArcRestart(VOID) __noreturn;
+extern VOID ArcReboot(VOID) __noreturn;
+extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
 extern VOID ArcFlushAllCaches(VOID);
 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
 
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
deleted file mode 100644 (file)
index dd9a762..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
- * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
- */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-/*
- * Duplicated here because of <asm-generic/siginfo.h> braindamage ...
- */
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
-       if (from->si_code < 0)
-               memcpy(to, from, sizeof(*to));
-       else
-               /* _sigchld is currently the largest know union member */
-               memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
-}
-
-#endif /* _ASM_SIGINFO_H */
index c6d06d383ef90df1cf7bb8a4f69aaa641b40e7fd..b4548690ade9916e1d94e844641a505fad43bea4 100644 (file)
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        subu   %[ticket], %[ticket], 1                 \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [serving_now_ptr] "+m" (lock->h.serving_now),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        subu   %[ticket], %[ticket], 1                 \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [serving_now_ptr] "+m" (lock->h.serving_now),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "        li     %[ticket], 0                            \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (tmp2),
                  [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "        li     %[ticket], 0                            \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (tmp2),
                  [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                        "       bltz    %1, 1b                          \n"
                        "        addu   %1, 1                           \n"
                        "2:     sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
        smp_llsc_mb();
 }
 
-/* Note the use of sub, not subu which will make the kernel die with an
-   overflow exception if we ever try to unlock an rwlock that is already
-   unlocked or is being held by a writer.  */
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
                "1:     ll      %1, %2          # arch_read_unlock      \n"
-               "       sub     %1, 1                                   \n"
+               "       addiu   %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
                        __asm__ __volatile__(
                        "1:     ll      %1, %2  # arch_read_unlock      \n"
-                       "       sub     %1, 1                           \n"
+                       "       addiu   %1, -1                          \n"
                        "       sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                        "       bnez    %1, 1b                          \n"
                        "        lui    %1, 0x8000                      \n"
                        "2:     sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                __asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        }
 
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                "       li      %2, 1                                   \n"
                "       .set    reorder                                 \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                        "       sc      %1, %0                          \n"
                        "       li      %2, 1                           \n"
                        "2:                                             \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
                          "=&r" (ret)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
 
index 0b89006e490788ffcc26a9225762c66f4bd4ea8b..0f90d88e464d3cc005ab3ae0411d6996c5d41e1a 100644 (file)
@@ -1,10 +1,10 @@
 #ifndef _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_MIPS_SPRAM)
 extern __init void spram_config(void);
 #else
 static inline void spram_config(void) { };
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_MIPS_SPRAM */
 
 #endif /* _MIPS_SPRAM_H */
index b188c797565ce48bac812aacd98922ef31c00180..28d6d9364bd1f2c431df08c72f58262e5297ec5c 100644 (file)
@@ -40,7 +40,7 @@
                LONG_S  v1, PT_HI(sp)
                mflhxu  v1
                LONG_S  v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                mfhi    v1
 #endif
 #ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
                LONG_S  $10, PT_R10(sp)
                LONG_S  $11, PT_R11(sp)
                LONG_S  $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_HI(sp)
                mflo    v1
 #endif
@@ -58,7 +58,7 @@
                LONG_S  $14, PT_R14(sp)
                LONG_S  $15, PT_R15(sp)
                LONG_S  $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_LO(sp)
 #endif
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
                mtlhx   $24
                LONG_L  $24, PT_LO(sp)
                mtlhx   $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                LONG_L  $24, PT_LO(sp)
                mtlo    $24
                LONG_L  $24, PT_HI(sp)
index b928b6f898cd5266efe89465d2dc87089f8f357c..e92d6c4b5ed192305b0b1f1605481f745cfadb10 100644 (file)
@@ -75,9 +75,12 @@ do {                                                                 \
 #endif
 
 #define __clear_software_ll_bit()                                      \
-do {                                                                   \
-       if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)       \
-               ll_bit = 0;                                             \
+do {   if (cpu_has_rw_llb) {                                           \
+               write_c0_lladdr(0);                                     \
+       } else {                                                        \
+               if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
+                       ll_bit = 0;                                     \
+       }                                                               \
 } while (0)
 
 #define switch_to(prev, next, last)                                    \
index 9e1295f874f0c143f3924aac7e606b93125ed9ce..55ed6602204cae5ae1219ee15f5a98b2c9f2813e 100644 (file)
@@ -28,7 +28,7 @@ struct thread_info {
        unsigned long           tp_value;       /* thread pointer */
        __u32                   cpu;            /* current CPU */
        int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
-
+       int                     r2_emul_return; /* 1 => Returning from R2 emulator */
        mm_segment_t            addr_limit;     /*
                                                 * thread address space limit:
                                                 * 0x7fffffff for user-thead
index 89c22433b1c665ccf06c278f4bdf306642ffc828..fc0cf5ac0cf72ce28a38eec08a422719343ba4e4 100644 (file)
 enum major_op {
        spec_op, bcond_op, j_op, jal_op,
        beq_op, bne_op, blez_op, bgtz_op,
-       addi_op, addiu_op, slti_op, sltiu_op,
+       addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
        andi_op, ori_op, xori_op, lui_op,
        cop0_op, cop1_op, cop2_op, cop1x_op,
        beql_op, bnel_op, blezl_op, bgtzl_op,
-       daddi_op, daddiu_op, ldl_op, ldr_op,
+       daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
        spec2_op, jalx_op, mdmx_op, spec3_op,
        lb_op, lh_op, lwl_op, lw_op,
        lbu_op, lhu_op, lwr_op, lwu_op,
        sb_op, sh_op, swl_op, sw_op,
        sdl_op, sdr_op, swr_op, cache_op,
-       ll_op, lwc1_op, lwc2_op, pref_op,
-       lld_op, ldc1_op, ldc2_op, ld_op,
-       sc_op, swc1_op, swc2_op, major_3b_op,
-       scd_op, sdc1_op, sdc2_op, sd_op
+       ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
+       lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+       sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
+       scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
 };
 
 /*
@@ -83,9 +83,12 @@ enum spec3_op {
        swe_op    = 0x1f, bshfl_op  = 0x20,
        swle_op   = 0x21, swre_op   = 0x22,
        prefe_op  = 0x23, dbshfl_op = 0x24,
-       lbue_op   = 0x28, lhue_op   = 0x29,
-       lbe_op    = 0x2c, lhe_op    = 0x2d,
-       lle_op    = 0x2e, lwe_op    = 0x2f,
+       cache6_op = 0x25, sc6_op    = 0x26,
+       scd6_op   = 0x27, lbue_op   = 0x28,
+       lhue_op   = 0x29, lbe_op    = 0x2c,
+       lhe_op    = 0x2d, lle_op    = 0x2e,
+       lwe_op    = 0x2f, pref6_op  = 0x35,
+       ll6_op    = 0x36, lld6_op   = 0x37,
        rdhwr_op  = 0x3b
 };
 
@@ -112,7 +115,8 @@ enum cop_op {
        mfhc_op       = 0x03, mtc_op        = 0x04,
        dmtc_op       = 0x05, ctc_op        = 0x06,
        mthc0_op      = 0x06, mthc_op       = 0x07,
-       bc_op         = 0x08, cop_op        = 0x10,
+       bc_op         = 0x08, bc1eqz_op     = 0x09,
+       bc1nez_op     = 0x0d, cop_op        = 0x10,
        copm_op       = 0x18
 };
 
index d08f83f19db566899298e41ac06bf53f3a757400..2cb7fdead5702a5c8b5f0522e0c9f3e8e4ee96ba 100644 (file)
 
 #define HAVE_ARCH_SIGINFO_T
 
-/*
- * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked
- * by design ...
- */
-#define HAVE_ARCH_COPY_SIGINFO
-struct siginfo;
-
 /*
  * Careful to keep union _sifields from shifting ...
  */
@@ -35,8 +28,9 @@ struct siginfo;
 
 #define __ARCH_SIGSYS
 
-#include <asm-generic/siginfo.h>
+#include <uapi/asm-generic/siginfo.h>
 
+/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
 typedef struct siginfo {
        int si_signo;
        int si_code;
@@ -124,5 +118,6 @@ typedef struct siginfo {
 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
 
+#include <asm-generic/siginfo.h>
 
 #endif /* _UAPI_ASM_SIGINFO_H */
index 92987d1bbe5fe26e957ee8b9a1b5737f6181204c..d3d2ff2d76dc8f2e3643e9255226975e8e4699a4 100644 (file)
@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP)     += smp-mt.o
 obj-$(CONFIG_MIPS_CMP)         += smp-cmp.o
 obj-$(CONFIG_MIPS_CPS)         += smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_GIC_IPI)     += smp-gic.o
-obj-$(CONFIG_CPU_MIPSR2)       += spram.o
+obj-$(CONFIG_MIPS_SPRAM)       += spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_EARLY_PRINTK_8250)        += early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR)    += mips-r2-to-r6-emul.o
 
 CFLAGS_cpu-bugs64.o    = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
index 3b2dfdb4865fd9cbe208e41ca38e709db952c0e1..750d67ac41e9b19affe066d5be8d1f56f7363041 100644 (file)
@@ -97,6 +97,7 @@ void output_thread_info_defines(void)
        OFFSET(TI_TP_VALUE, thread_info, tp_value);
        OFFSET(TI_CPU, thread_info, cpu);
        OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+       OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
        OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
        OFFSET(TI_REGS, thread_info, regs);
        DEFINE(_THREAD_SIZE, THREAD_SIZE);
@@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void)
        OFFSET(OCTEON_CP2_GFM_RESULT,   octeon_cop2_state, cop2_gfm_result);
        OFFSET(OCTEON_CP2_HSH_DATW,     octeon_cop2_state, cop2_hsh_datw);
        OFFSET(OCTEON_CP2_HSH_IVW,      octeon_cop2_state, cop2_hsh_ivw);
+       OFFSET(OCTEON_CP2_SHA3,         octeon_cop2_state, cop2_sha3);
        OFFSET(THREAD_CP2,      task_struct, thread.cp2);
        OFFSET(THREAD_CVMSEG,   task_struct, thread.cvmseg.cvmseg);
        BLANK();
index 4d7d99d601cc13219e9d8f9631da6002b3d9df9d..c2e0f45ddf6cf48d05f7b97b5a095de0a04fca19 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
  * @returns:   -EFAULT on error and forces SIGBUS, and on success
  *             returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *             evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ *     Compact branches do not throw exceptions because they do
+ *     not have delay slots. The forbidden slot instruction ($PC+4)
+ *     is only executed if the branch was not taken. Otherwise the
+ *     forbidden slot is skipped entirely. This means that the
+ *     only possible reason to be here because of a MIPS R6 compact
+ *     branch instruction is that the forbidden slot has thrown one.
+ *     In that case the branch was not taken, so the EPC can be safely
+ *     set to EPC + 8.
  */
 int __compute_return_epc_for_insn(struct pt_regs *regs,
                                   union mips_instruction insn)
 {
-       unsigned int bit, fcr31, dspcontrol;
+       unsigned int bit, fcr31, dspcontrol, reg;
        long epc = regs->cp0_epc;
        int ret = 0;
 
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->regs[insn.r_format.rd] = epc + 8;
                        /* Fall through */
                case jr_op:
+                       if (NO_R6EMU && insn.r_format.func == jr_op)
+                               goto sigill_r6;
                        regs->cp0_epc = regs->regs[insn.r_format.rs];
                        break;
                }
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         */
        case bcond_op:
                switch (insn.i_format.rt) {
-               case bltz_op:
                case bltzl_op:
+                       if (NO_R6EMU)
+                               goto sigill_r6;
+               case bltz_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->cp0_epc = epc;
                        break;
 
-               case bgez_op:
                case bgezl_op:
+                       if (NO_R6EMU)
+                               goto sigill_r6;
+               case bgez_op:
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bltzal_op:
                case bltzall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bltzall_op)) {
+                               ret = -SIGILL;
+                               break;
+                       }
                        regs->regs[31] = epc + 8;
+                       /*
+                        * OK we are here either because we hit a NAL
+                        * instruction or because we are emulating an
+                        * old bltzal{,l} one. Lets figure out what the
+                        * case really is.
+                        */
+                       if (!insn.i_format.rs) {
+                               /*
+                                * NAL or BLTZAL with rs == 0
+                                * Doesn't matter if we are R6 or not. The
+                                * result is the same
+                                */
+                               regs->cp0_epc += 4 +
+                                       (insn.i_format.simmediate << 2);
+                               break;
+                       }
+                       /* Now do the real thing for non-R6 BLTZAL{,L} */
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bgezal_op:
                case bgezall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bgezall_op)) {
+                               ret = -SIGILL;
+                               break;
+                       }
                        regs->regs[31] = epc + 8;
+                       /*
+                        * OK we are here either because we hit a BAL
+                        * instruction or because we are emulating an
+                        * old bgezal{,l} one. Lets figure out what the
+                        * case really is.
+                        */
+                       if (!insn.i_format.rs) {
+                               /*
+                                * BAL or BGEZAL with rs == 0
+                                * Doesn't matter if we are R6 or not. The
+                                * result is the same
+                                */
+                               regs->cp0_epc += 4 +
+                                       (insn.i_format.simmediate << 2);
+                               break;
+                       }
+                       /* Now do the real thing for non-R6 BGEZAL{,L} */
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bposge32_op:
                        if (!cpu_has_dsp)
-                               goto sigill;
+                               goto sigill_dsp;
 
                        dspcontrol = rddsp(0x01);
 
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
        /*
         * These are conditional and in i_format.
         */
-       case beq_op:
        case beql_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case beq_op:
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case bne_op:
        case bnel_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case bne_op:
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case blez_op: /* not really i_format */
-       case blezl_op:
+       case blezl_op: /* not really i_format */
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case blez_op:
+               /*
+                * Compact branches for R6 for the
+                * blez and blezl opcodes.
+                * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+                * BLEZ  | rs = rt != 0      == BGEZALC
+                * BLEZ  | rs != 0 | rt != 0 == BGEUC
+                * BLEZL | rs = 0 | rt != 0  == BLEZC
+                * BLEZL | rs = rt != 0      == BGEZC
+                * BLEZL | rs != 0 | rt != 0 == BGEC
+                *
+                * For real BLEZ{,L}, rt is always 0.
+                */
+
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = epc + 4;
+                       regs->cp0_epc += 8;
+                       break;
+               }
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] <= 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case bgtz_op:
        case bgtzl_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case bgtz_op:
+               /*
+                * Compact branches for R6 for the
+                * bgtz and bgtzl opcodes.
+                * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+                * BGTZ  | rs = rt != 0      == BLTZALC
+                * BGTZ  | rs != 0 | rt != 0 == BLTUC
+                * BGTZL | rs = 0 | rt != 0  == BGTZC
+                * BGTZL | rs = rt != 0      == BLTZC
+                * BGTZL | rs != 0 | rt != 0 == BLTC
+                *
+                * *ZALC varint for BGTZ &&& rt != 0
+                * For real GTZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                           (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = epc + 4;
+                       regs->cp0_epc += 8;
+                       break;
+               }
+
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] > 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         * And now the FPA/cp1 branch instructions.
         */
        case cop1_op:
-               preempt_disable();
-               if (is_fpu_owner())
-                       fcr31 = read_32bit_cp1_register(CP1_STATUS);
-               else
-                       fcr31 = current->thread.fpu.fcr31;
-               preempt_enable();
-
-               bit = (insn.i_format.rt >> 2);
-               bit += (bit != 0);
-               bit += 23;
-               switch (insn.i_format.rt & 3) {
-               case 0: /* bc1f */
-               case 2: /* bc1fl */
-                       if (~fcr31 & (1 << bit)) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                               if (insn.i_format.rt == 2)
-                                       ret = BRANCH_LIKELY_TAKEN;
-                       } else
+               if (cpu_has_mips_r6 &&
+                   ((insn.i_format.rs == bc1eqz_op) ||
+                    (insn.i_format.rs == bc1nez_op))) {
+                       if (!used_math()) { /* First time FPU user */
+                               ret = init_fpu();
+                               if (ret && NO_R6EMU) {
+                                       ret = -ret;
+                                       break;
+                               }
+                               ret = 0;
+                               set_used_math();
+                       }
+                       lose_fpu(1);    /* Save FPU state for the emulator. */
+                       reg = insn.i_format.rt;
+                       bit = 0;
+                       switch (insn.i_format.rs) {
+                       case bc1eqz_op:
+                               /* Test bit 0 */
+                               if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
+                                   & 0x1)
+                                       bit = 1;
+                               break;
+                       case bc1nez_op:
+                               /* Test bit 0 */
+                               if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
+                                     & 0x1))
+                                       bit = 1;
+                               break;
+                       }
+                       own_fpu(1);
+                       if (bit)
+                               epc = epc + 4 +
+                                       (insn.i_format.simmediate << 2);
+                       else
                                epc += 8;
                        regs->cp0_epc = epc;
+
                        break;
+               } else {
 
-               case 1: /* bc1t */
-               case 3: /* bc1tl */
-                       if (fcr31 & (1 << bit)) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                               if (insn.i_format.rt == 3)
-                                       ret = BRANCH_LIKELY_TAKEN;
-                       } else
-                               epc += 8;
-                       regs->cp0_epc = epc;
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               fcr31 = read_32bit_cp1_register(CP1_STATUS);
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       bit = (insn.i_format.rt >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       switch (insn.i_format.rt & 3) {
+                       case 0: /* bc1f */
+                       case 2: /* bc1fl */
+                               if (~fcr31 & (1 << bit)) {
+                                       epc = epc + 4 +
+                                               (insn.i_format.simmediate << 2);
+                                       if (insn.i_format.rt == 2)
+                                               ret = BRANCH_LIKELY_TAKEN;
+                               } else
+                                       epc += 8;
+                               regs->cp0_epc = epc;
+                               break;
+
+                       case 1: /* bc1t */
+                       case 3: /* bc1tl */
+                               if (fcr31 & (1 << bit)) {
+                                       epc = epc + 4 +
+                                               (insn.i_format.simmediate << 2);
+                                       if (insn.i_format.rt == 3)
+                                               ret = BRANCH_LIKELY_TAKEN;
+                               } else
+                                       epc += 8;
+                               regs->cp0_epc = epc;
+                               break;
+                       }
                        break;
                }
-               break;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        epc += 8;
                regs->cp0_epc = epc;
                break;
+#else
+       case bc6_op:
+               /* Only valid for MIPS R6 */
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               regs->cp0_epc += 8;
+               break;
+       case balc6_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BALC */
+               regs->regs[31] = epc + 4;
+               epc += 4 + (insn.i_format.simmediate << 2);
+               regs->cp0_epc = epc;
+               break;
+       case beqzcjic_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BEQZC || JIC */
+               regs->cp0_epc += 8;
+               break;
+       case bnezcjialc_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BNEZC || JIALC */
+               if (insn.i_format.rs)
+                       regs->regs[31] = epc + 4;
+               regs->cp0_epc += 8;
+               break;
 #endif
+       case cbcond0_op:
+       case cbcond1_op:
+               /* Only valid for MIPS R6 */
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /*
+                * Compact branches:
+                * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+                */
+               if (insn.i_format.rt && !insn.i_format.rs)
+                       regs->regs[31] = epc + 4;
+               regs->cp0_epc += 8;
+               break;
        }
 
        return ret;
 
-sigill:
+sigill_dsp:
        printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
        force_sig(SIGBUS, current);
        return -EFAULT;
+sigill_r6:
+       pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+               current->comm);
+       force_sig(SIGILL, current);
+       return -EFAULT;
 }
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 
index 6acaad0480af366830c77be996e9dba57fd8ad95..82bd2b278a243602dbf4c7ec15f8366af1ecf059 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/percpu.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
 
 #include <asm/time.h>
 #include <asm/cevt-r4k.h>
@@ -40,7 +39,7 @@ int cp0_timer_irq_installed;
 
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
-       const int r2 = cpu_has_mips_r2;
+       const int r2 = cpu_has_mips_r2_r6;
        struct clock_event_device *cd;
        int cpu = smp_processor_id();
 
@@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev)
  */
 static int c0_compare_int_pending(void)
 {
-#ifdef CONFIG_MIPS_GIC
-       if (gic_present)
-               return gic_get_timer_pending();
-#endif
+       /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
        return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
 }
 
index 0384b05ab5a02413cbcb11a163375029f285255f..55b759a0019e61671d78919bc5b143ba5ff4a94d 100644 (file)
@@ -99,11 +99,11 @@ not_nmi:
        xori    t2, t1, 0x7
        beqz    t2, 1f
         li     t3, 32
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
        sllv    t1, t3, t1
 1:     /* At this point t1 == I-cache sets per way */
        _EXT    t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
-       addi    t2, t2, 1
+       addiu   t2, t2, 1
        mul     t1, t1, t0
        mul     t1, t1, t2
 
@@ -126,11 +126,11 @@ icache_done:
        xori    t2, t1, 0x7
        beqz    t2, 1f
         li     t3, 32
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
        sllv    t1, t3, t1
 1:     /* At this point t1 == D-cache sets per way */
        _EXT    t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
-       addi    t2, t2, 1
+       addiu   t2, t2, 1
        mul     t1, t1, t0
        mul     t1, t1, t2
 
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
        mfc0    t0, CP0_MVPCONF0
        srl     t0, t0, MVPCONF0_PVPE_SHIFT
        andi    t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
-       addi    t7, t0, 1
+       addiu   t7, t0, 1
 
        /* If there's only 1, we're done */
        beqz    t0, 2f
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
        mttc0   t0, CP0_TCHALT
 
        /* Next VPE */
-       addi    t5, t5, 1
+       addiu   t5, t5, 1
        slt     t0, t5, t7
        bnez    t0, 1b
         nop
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
        mfc0    t1, CP0_MVPCONF0
        srl     t1, t1, MVPCONF0_PVPE_SHIFT
        andi    t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
 
        /* Calculate a mask for the VPE ID from EBase.CPUNum */
        clz     t1, t1
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
 
        /* Next VPE */
 2:     srl     t6, t6, 1
-       addi    t5, t5, 1
+       addiu   t5, t5, 1
        bnez    t6, 1b
         nop
 
index 2d80b5f1aeae29361843640d1d0c4148cbf489fd..09f4034f239f511867a4d56792de909d0ad0c773 100644 (file)
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
        panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 }
 
-int daddiu_bug = -1;
+int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
 static inline void check_daddiu(void)
 {
@@ -314,11 +314,14 @@ static inline void check_daddiu(void)
 
 void __init check_bugs64_early(void)
 {
-       check_mult_sh();
-       check_daddiu();
+       if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+               check_mult_sh();
+               check_daddiu();
+       }
 }
 
 void __init check_bugs64(void)
 {
-       check_daddi();
+       if (!config_enabled(CONFIG_CPU_MIPSR6))
+               check_daddi();
 }
index 5342674842f5826572b71ce10a76c7a2f635139f..48dfb9de853ddc92ebf4254a95e400b0db5e1789 100644 (file)
@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
                c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
                break;
 
+       /* R6 incompatible with everything else */
+       case MIPS_CPU_ISA_M64R6:
+               c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+       case MIPS_CPU_ISA_M32R6:
+               c->isa_level |= MIPS_CPU_ISA_M32R6;
+               /* Break here so we don't add incompatible ISAs */
+               break;
        case MIPS_CPU_ISA_M32R2:
                c->isa_level |= MIPS_CPU_ISA_M32R2;
        case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M32R2);
                        break;
+               case 2:
+                       set_isa(c, MIPS_CPU_ISA_M32R6);
+                       break;
                default:
                        goto unknown;
                }
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M64R2);
                        break;
+               case 2:
+                       set_isa(c, MIPS_CPU_ISA_M64R6);
+                       break;
                default:
                        goto unknown;
                }
@@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
        if (config3 & MIPS_CONF3_MSA)
                c->ases |= MIPS_ASE_MSA;
        /* Only tested on 32-bit cores */
-       if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
+       if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
+               c->htw_seq = 0;
                c->options |= MIPS_CPU_HTW;
+       }
 
        return config3 & MIPS_CONF_M;
 }
@@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_EVA;
        if (config5 & MIPS_CONF5_MRP)
                c->options |= MIPS_CPU_MAAR;
+       if (config5 & MIPS_CONF5_LLB)
+               c->options |= MIPS_CPU_RW_LLB;
 
        return config5 & MIPS_CONF_M;
 }
@@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c)
 
        if (cpu_has_rixi) {
                /* Enable the RIXI exceptions */
-               write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+               set_c0_pagegrain(PG_IEC);
                back_to_back_c0_hazard();
                /* Verify the IEC bit is set */
                if (read_c0_pagegrain() & PG_IEC)
@@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c)
        }
 
 #ifndef CONFIG_MIPS_CPS
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                c->core = get_ebase_cpunum();
                if (cpu_has_mipsmt)
                        c->core >>= fls(core_nvpes()) - 1;
@@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
 {
        c->writecombine = _CACHE_UNCACHED_ACCELERATED;
        switch (c->processor_id & PRID_IMP_MASK) {
+       case PRID_IMP_QEMU_GENERIC:
+               c->writecombine = _CACHE_UNCACHED;
+               c->cputype = CPU_QEMU_GENERIC;
+               __cpu_name[cpu] = "MIPS GENERIC QEMU";
+               break;
        case PRID_IMP_4KC:
                c->cputype = CPU_4KC;
                c->writecombine = _CACHE_UNCACHED;
@@ -1345,8 +1367,7 @@ void cpu_probe(void)
        if (c->options & MIPS_CPU_FPU) {
                c->fpu_id = cpu_get_fpu_id();
 
-               if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+               if (c->isa_level & cpu_has_mips_r) {
                        if (c->fpu_id & MIPS_FPIR_3D)
                                c->ases |= MIPS_ASE_MIPS3D;
                        if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1354,7 +1375,7 @@ void cpu_probe(void)
                }
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
                /* R2 has Performance Counter Interrupt indicator */
                c->options |= MIPS_CPU_PCI;
index a5b5b56485c1618c34af3daea2b67e795a0f8c2b..d2c09f6475c5cb5454b34ed1c498d43fdc31b29e 100644 (file)
 #include <linux/elf.h>
 #include <linux/sched.h>
 
+/* FPU modes */
 enum {
-       FP_ERROR = -1,
-       FP_DOUBLE_64A = -2,
+       FP_FRE,
+       FP_FR0,
+       FP_FR1,
 };
 
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single:    The program being loaded needs an FPU but it will only issue
+ *             single precision instructions meaning that it can execute in
+ *             either FR0 or FR1.
+ * @soft:      The soft(-float) requirement means that the program being
+ *             loaded needs has no FPU dependency at all (i.e. it has no
+ *             FPU instructions).
+ * @fr1:       The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault: The program being loaded depends on the default FPU mode.
+ *             That is FR0 for O32 and FR1 for N32/N64.
+ * @fre:       The program being loaded depends on FPU with FRE=1. This mode is
+ *             a bridge which uses FR=1 whilst still being able to maintain
+ *             full compatibility with pre-existing code using the O32 FP32
+ *             ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+       bool single;
+       bool soft;
+       bool fr1;
+       bool frdefault;
+       bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+       [MIPS_ABI_FP_ANY]    = { true,  true,  true,  true,  true  },
+       [MIPS_ABI_FP_DOUBLE] = { false, false, false, true,  true  },
+       [MIPS_ABI_FP_SINGLE] = { true,  false, false, false, false },
+       [MIPS_ABI_FP_SOFT]   = { false, true,  false, false, false },
+       [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+       [MIPS_ABI_FP_XX]     = { false, false, true,  true,  true  },
+       [MIPS_ABI_FP_64]     = { false, false, true,  false, false },
+       [MIPS_ABI_FP_64A]    = { false, false, true,  false, true  }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                     bool is_interp, struct arch_elf_state *state)
 {
-       struct elf32_hdr *ehdr = _ehdr;
-       struct elf32_phdr *phdr = _phdr;
+       struct elf32_hdr *ehdr32 = _ehdr;
+       struct elf32_phdr *phdr32 = _phdr;
+       struct elf64_phdr *phdr64 = _phdr;
        struct mips_elf_abiflags_v0 abiflags;
        int ret;
 
-       if (config_enabled(CONFIG_64BIT) &&
-           (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
-               return 0;
-       if (phdr->p_type != PT_MIPS_ABIFLAGS)
-               return 0;
-       if (phdr->p_filesz < sizeof(abiflags))
-               return -EINVAL;
+       /* Lets see if this is an O32 ELF */
+       if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
+               /* FR = 1 for N32 */
+               if (ehdr32->e_flags & EF_MIPS_ABI2)
+                       state->overall_fp_mode = FP_FR1;
+               else
+                       /* Set a good default FPU mode for O32 */
+                       state->overall_fp_mode = cpu_has_mips_r6 ?
+                               FP_FRE : FP_FR0;
+
+               if (ehdr32->e_flags & EF_MIPS_FP64) {
+                       /*
+                        * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+                        * later if needed
+                        */
+                       if (is_interp)
+                               state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+                       else
+                               state->fp_abi = MIPS_ABI_FP_OLD_64;
+               }
+               if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+                       return 0;
+
+               if (phdr32->p_filesz < sizeof(abiflags))
+                       return -EINVAL;
+
+               ret = kernel_read(elf, phdr32->p_offset,
+                                 (char *)&abiflags,
+                                 sizeof(abiflags));
+       } else {
+               /* FR=1 is really the only option for 64-bit */
+               state->overall_fp_mode = FP_FR1;
+
+               if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+                       return 0;
+               if (phdr64->p_filesz < sizeof(abiflags))
+                       return -EINVAL;
+
+               ret = kernel_read(elf, phdr64->p_offset,
+                                 (char *)&abiflags,
+                                 sizeof(abiflags));
+       }
 
-       ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
-                         sizeof(abiflags));
        if (ret < 0)
                return ret;
        if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
        return 0;
 }
 
-static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(int in_abi)
 {
        /* If the ABI requirement is provided, simply return that */
-       if (in_abi != -1)
+       if (in_abi != MIPS_ABI_FP_UNKNOWN)
                return in_abi;
 
-       /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
-       if (ehdr->e_flags & EF_MIPS_FP64)
-               return MIPS_ABI_FP_64;
-
-       /* Default to MIPS_ABI_FP_DOUBLE */
-       return MIPS_ABI_FP_DOUBLE;
+       /* Unknown ABI */
+       return MIPS_ABI_FP_UNKNOWN;
 }
 
 int arch_check_elf(void *_ehdr, bool has_interpreter,
                   struct arch_elf_state *state)
 {
        struct elf32_hdr *ehdr = _ehdr;
-       unsigned fp_abi, interp_fp_abi, abi0, abi1;
+       struct mode_req prog_req, interp_req;
+       int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
 
-       /* Ignore non-O32 binaries */
-       if (config_enabled(CONFIG_64BIT) &&
-           (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+       if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
                return 0;
 
-       fp_abi = get_fp_abi(ehdr, state->fp_abi);
+       fp_abi = get_fp_abi(state->fp_abi);
 
        if (has_interpreter) {
-               interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+               interp_fp_abi = get_fp_abi(state->interp_fp_abi);
 
                abi0 = min(fp_abi, interp_fp_abi);
                abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
                abi0 = abi1 = fp_abi;
        }
 
-       state->overall_abi = FP_ERROR;
-
-       if (abi0 == abi1) {
-               state->overall_abi = abi0;
-       } else if (abi0 == MIPS_ABI_FP_ANY) {
-               state->overall_abi = abi1;
-       } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
-               switch (abi1) {
-               case MIPS_ABI_FP_XX:
-                       state->overall_abi = MIPS_ABI_FP_DOUBLE;
-                       break;
-
-               case MIPS_ABI_FP_64A:
-                       state->overall_abi = FP_DOUBLE_64A;
-                       break;
-               }
-       } else if (abi0 == MIPS_ABI_FP_SINGLE ||
-                  abi0 == MIPS_ABI_FP_SOFT) {
-               /* Cannot link with other ABIs */
-       } else if (abi0 == MIPS_ABI_FP_OLD_64) {
-               switch (abi1) {
-               case MIPS_ABI_FP_XX:
-               case MIPS_ABI_FP_64:
-               case MIPS_ABI_FP_64A:
-                       state->overall_abi = MIPS_ABI_FP_64;
-                       break;
-               }
-       } else if (abi0 == MIPS_ABI_FP_XX ||
-                  abi0 == MIPS_ABI_FP_64 ||
-                  abi0 == MIPS_ABI_FP_64A) {
-               state->overall_abi = MIPS_ABI_FP_64;
-       }
+       /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
+       max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+                  (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
+               MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
 
-       switch (state->overall_abi) {
-       case MIPS_ABI_FP_64:
-       case MIPS_ABI_FP_64A:
-       case FP_DOUBLE_64A:
-               if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-                       return -ELIBBAD;
-               break;
+       if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+           (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+               return -ELIBBAD;
+
+       /* It's time to determine the FPU mode requirements */
+       prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+       interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
 
-       case FP_ERROR:
+       /*
+        * Check whether the program's and interp's ABIs have a matching FPU
+        * mode requirement.
+        */
+       prog_req.single = interp_req.single && prog_req.single;
+       prog_req.soft = interp_req.soft && prog_req.soft;
+       prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+       prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+       prog_req.fre = interp_req.fre && prog_req.fre;
+
+       /*
+        * Determine the desired FPU mode
+        *
+        * Decision making:
+        *
+        * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+        *   means that we have a combination of program and interpreter
+        *   that inherently require the hybrid FP mode.
+        * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+        *   fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+        *   instructions so we don't care about the mode. We will simply use
+        *   the one preferred by the hardware. In fpxx case, that ABI can
+        *   handle both FR=1 and FR=0, so, again, we simply choose the one
+        *   preferred by the hardware. Next, if we only use single-precision
+        *   FPU instructions, and the default ABI FPU mode is not good
+        *   (ie single + any ABI combination), we set again the FPU mode to the
+        *   one is preferred by the hardware. Next, if we know that the code
+        *   will only use single-precision instructions, shown by single being
+        *   true but frdefault being false, then we again set the FPU mode to
+        *   the one that is preferred by the hardware.
+        * - We want FP_FR1 if that's the only matching mode and the default one
+        *   is not good.
+        * - Return with -ELIBADD if we can't find a matching FPU mode.
+        */
+       if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+               state->overall_fp_mode = FP_FRE;
+       else if ((prog_req.fr1 && prog_req.frdefault) ||
+                (prog_req.single && !prog_req.frdefault))
+               /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+               state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+                                         cpu_has_mips_r2_r6) ?
+                                         FP_FR1 : FP_FR0;
+       else if (prog_req.fr1)
+               state->overall_fp_mode = FP_FR1;
+       else  if (!prog_req.fre && !prog_req.frdefault &&
+                 !prog_req.fr1 && !prog_req.single && !prog_req.soft)
                return -ELIBBAD;
-       }
 
        return 0;
 }
 
-void mips_set_personality_fp(struct arch_elf_state *state)
+static inline void set_thread_fp_mode(int hybrid, int regs32)
 {
-       if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
-               /*
-                * Use hybrid FPRs for all code which can correctly execute
-                * with that mode.
-                */
-               switch (state->overall_abi) {
-               case MIPS_ABI_FP_DOUBLE:
-               case MIPS_ABI_FP_SINGLE:
-               case MIPS_ABI_FP_SOFT:
-               case MIPS_ABI_FP_XX:
-               case MIPS_ABI_FP_ANY:
-                       /* FR=1, FRE=1 */
-                       clear_thread_flag(TIF_32BIT_FPREGS);
-                       set_thread_flag(TIF_HYBRID_FPREGS);
-                       return;
-               }
-       }
-
-       switch (state->overall_abi) {
-       case MIPS_ABI_FP_DOUBLE:
-       case MIPS_ABI_FP_SINGLE:
-       case MIPS_ABI_FP_SOFT:
-               /* FR=0 */
-               set_thread_flag(TIF_32BIT_FPREGS);
+       if (hybrid)
+               set_thread_flag(TIF_HYBRID_FPREGS);
+       else
                clear_thread_flag(TIF_HYBRID_FPREGS);
-               break;
-
-       case FP_DOUBLE_64A:
-               /* FR=1, FRE=1 */
+       if (regs32)
+               set_thread_flag(TIF_32BIT_FPREGS);
+       else
                clear_thread_flag(TIF_32BIT_FPREGS);
-               set_thread_flag(TIF_HYBRID_FPREGS);
-               break;
+}
 
-       case MIPS_ABI_FP_64:
-       case MIPS_ABI_FP_64A:
-               /* FR=1, FRE=0 */
-               clear_thread_flag(TIF_32BIT_FPREGS);
-               clear_thread_flag(TIF_HYBRID_FPREGS);
-               break;
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+       /*
+        * This function is only ever called for O32 ELFs so we should
+        * not be worried about N32/N64 binaries.
+        */
 
-       case MIPS_ABI_FP_XX:
-       case MIPS_ABI_FP_ANY:
-               if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-                       set_thread_flag(TIF_32BIT_FPREGS);
-               else
-                       clear_thread_flag(TIF_32BIT_FPREGS);
+       if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+               return;
 
-               clear_thread_flag(TIF_HYBRID_FPREGS);
+       switch (state->overall_fp_mode) {
+       case FP_FRE:
+               set_thread_fp_mode(1, 0);
+               break;
+       case FP_FR0:
+               set_thread_fp_mode(0, 1);
+               break;
+       case FP_FR1:
+               set_thread_fp_mode(0, 0);
                break;
-
        default:
-       case FP_ERROR:
                BUG();
        }
 }
index 4353d323f0175cc2fcbefac58762b90ad7da0c59..af41ba6db9601d16540b945b0112e130bd9eb845 100644 (file)
@@ -46,6 +46,11 @@ resume_userspace:
        local_irq_disable               # make sure we dont miss an
                                        # interrupt setting need_resched
                                        # between sampling and return
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+       lw      k0, TI_R2_EMUL_RET($28)
+       bnez    k0, restore_all_from_r2_emul
+#endif
+
        LONG_L  a2, TI_FLAGS($28)       # current->work
        andi    t0, a2, _TIF_WORK_MASK  # (ignoring syscall_trace)
        bnez    t0, work_pending
@@ -114,6 +119,19 @@ restore_partial:           # restore partial frame
        RESTORE_SP_AND_RET
        .set    at
 
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+restore_all_from_r2_emul:                      # restore full frame
+       .set    noat
+       sw      zero, TI_R2_EMUL_RET($28)       # reset it
+       RESTORE_TEMP
+       RESTORE_AT
+       RESTORE_STATIC
+       RESTORE_SOME
+       LONG_L  sp, PT_R29(sp)
+       eretnc
+       .set    at
+#endif
+
 work_pending:
        andi    t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
        beqz    t0, work_notifysig
@@ -158,7 +176,8 @@ syscall_exit_work:
        jal     syscall_trace_leave
        b       resume_userspace
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
+    defined(CONFIG_MIPS_MT)
 
 /*
  * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@ LEAF(mips_ihb)
        nop
        END(mips_ihb)
 
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
index a5e26dd9059256ed7c2a2033210bcd1bbc99b6e3..2ebaabe3af1513269e100d8bcffa9e8e9cb1f2c8 100644 (file)
@@ -125,7 +125,7 @@ LEAF(__r4k_wait)
        nop
        nop
 #endif
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
        wait
        /* end of rollback region (the region size must be power of two) */
 1:
index 0b9082b6b6832d104a7994c0a6d44f13f61912cb..368c88b7eb6c985a848601415c9e8baf7aa2a5bb 100644 (file)
@@ -186,6 +186,7 @@ void __init check_wait(void)
        case CPU_PROAPTIV:
        case CPU_P5600:
        case CPU_M5150:
+       case CPU_QEMU_GENERIC:
                cpu_wait = r4k_wait;
                if (read_c0_config7() & MIPS_CONF7_WII)
                        cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644 (file)
index 0000000..64d17e4
--- /dev/null
@@ -0,0 +1,2378 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *      MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU  "daddiu "
+#define INS    "dins "
+#define EXT    "dext "
+#else
+#define ADDIU  "addiu "
+#define INS    "ins "
+#define EXT    "ext "
+#endif /* CONFIG_64BIT */
+
+#define SB     "sb "
+#define LB     "lb "
+#define LL     "ll "
+#define SC     "sc "
+
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS        10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+       mipsr2_emulation = 1;
+
+       pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+       return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+       switch (MIPSInst_OPCODE(ir)) {
+       case addiu_op:
+               if (MIPSInst_RT(ir))
+                       regs->regs[MIPSInst_RT(ir)] =
+                               (s32)regs->regs[MIPSInst_RS(ir)] +
+                               (s32)MIPSInst_SIMM(ir);
+               return 0;
+       case daddiu_op:
+               if (config_enabled(CONFIG_32BIT))
+                       break;
+
+               if (MIPSInst_RT(ir))
+                       regs->regs[MIPSInst_RT(ir)] =
+                               (s64)regs->regs[MIPSInst_RS(ir)] +
+                               (s64)MIPSInst_SIMM(ir);
+               return 0;
+       case lwc1_op:
+       case swc1_op:
+       case cop1_op:
+       case cop1x_op:
+               /* FPU instructions in delay slot */
+               return -SIGFPE;
+       case spec_op:
+               switch (MIPSInst_FUNC(ir)) {
+               case or_op:
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       regs->regs[MIPSInst_RS(ir)] |
+                                       regs->regs[MIPSInst_RT(ir)];
+                       return 0;
+               case sll_op:
+                       if (MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case srl_op:
+                       if (MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case addu_op:
+                       if (MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+                                             (u32)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               case subu_op:
+                       if (MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+                                             (u32)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               case dsll_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case dsrl_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case daddu_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (u64)regs->regs[MIPSInst_RS(ir)] +
+                                       (u64)regs->regs[MIPSInst_RT(ir)];
+                       return 0;
+               case dsubu_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+                                             (u64)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               }
+               break;
+       default:
+               pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+                        ir, MIPSInst_OPCODE(ir));
+       }
+
+       return SIGILL;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+       u32 csr;
+       u32 cond;
+
+       csr = current->thread.fpu.fcr31;
+       cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+       if (((csr & cond) == 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+       return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+       u32 csr;
+       u32 cond;
+
+       csr = current->thread.fpu.fcr31;
+       cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+       if (((csr & cond) != 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+       int err;
+       unsigned long cepc, epc, nepc;
+       u32 nir;
+
+       if (delay_slot(regs))
+               return SIGILL;
+
+       /* EPC after the RI/JR instruction */
+       nepc = regs->cp0_epc;
+       /* Roll back to the reserved R2 JR instruction */
+       regs->cp0_epc -= 4;
+       epc = regs->cp0_epc;
+       err = __compute_return_epc(regs);
+
+       if (err < 0)
+               return SIGEMT;
+
+
+       /* Computed EPC */
+       cepc = regs->cp0_epc;
+
+       /* Get DS instruction */
+       err = __get_user(nir, (u32 __user *)nepc);
+       if (err)
+               return SIGSEGV;
+
+       MIPS_R2BR_STATS(jrs);
+
+       /* If nir == 0(NOP), then nothing else to do */
+       if (nir) {
+               /*
+                * Negative err means FPU instruction in BD-slot,
+                * Zero err means 'BD-slot emulation done'
+                * For anything else we go back to trampoline emulation.
+                */
+               err = mipsr6_emul(regs, nir);
+               if (err > 0) {
+                       regs->cp0_epc = nepc;
+                       err = mips_dsemul(regs, nir, cepc);
+                       if (err == SIGILL)
+                               err = SIGEMT;
+                       MIPS_R2_STATS(dsemul);
+               }
+       }
+
+       return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+       if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+       if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+       if (MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+       regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+       if (MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+       regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+
+       rs = res;
+       regs->lo = (s64)rs;
+       rt = res >> 32;
+       res = (s64)rt;
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = res;
+       regs->lo = (s64)rt;
+       regs->hi = (s64)(res >> 32);
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = (s64)(rs / rt);
+       regs->hi = (s64)(rs % rt);
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = (s64)(rs / rt);
+       regs->hi = (s64)(rs % rt);
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = rt * rs;
+
+       regs->lo = res;
+       __asm__ __volatile__(
+               "dmuh %0, %1, %2\t\n"
+               : "=r"(res)
+               : "r"(rt), "r"(rs));
+
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = rt * rs;
+
+       regs->lo = res;
+       __asm__ __volatile__(
+               "dmuhu %0, %1, %2\t\n"
+               : "=r"(res)
+               : "r"(rt), "r"(rs));
+
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+       s64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = rs / rt;
+       regs->hi = rs % rt;
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = rs / rt;
+       regs->hi = rs % rt;
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static struct r2_decoder_table spec_op_table[] = {
+       { 0xfc1ff83f, 0x00000008, jr_func },
+       { 0xfc00ffff, 0x00000018, mult_func },
+       { 0xfc00ffff, 0x00000019, multu_func },
+       { 0xfc00ffff, 0x0000001c, dmult_func },
+       { 0xfc00ffff, 0x0000001d, dmultu_func },
+       { 0xffff07ff, 0x00000010, mfhi_func },
+       { 0xfc1fffff, 0x00000011, mthi_func },
+       { 0xffff07ff, 0x00000012, mflo_func },
+       { 0xfc1fffff, 0x00000013, mtlo_func },
+       { 0xfc0307ff, 0x00000001, movf_func },
+       { 0xfc0307ff, 0x00010001, movt_func },
+       { 0xfc0007ff, 0x0000000a, movz_func },
+       { 0xfc0007ff, 0x0000000b, movn_func },
+       { 0xfc00ffff, 0x0000001a, div_func },
+       { 0xfc00ffff, 0x0000001b, divu_func },
+       { 0xfc00ffff, 0x0000001e, ddiv_func },
+       { 0xfc00ffff, 0x0000001f, ddivu_func },
+       {}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res += ((((s64)rt) << 32) | (u32)rs);
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res += ((((s64)rt) << 32) | (u32)rs);
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+
+       rs = res;
+       regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+       u32 res;
+       u32 rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+       u32 res;
+       u32 rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static struct r2_decoder_table spec2_op_table[] = {
+       { 0xfc00ffff, 0x70000000, madd_func },
+       { 0xfc00ffff, 0x70000001, maddu_func },
+       { 0xfc0007ff, 0x70000002, mul_func },
+       { 0xfc00ffff, 0x70000004, msub_func },
+       { 0xfc00ffff, 0x70000005, msubu_func },
+       { 0xfc0007ff, 0x70000020, clz_func },
+       { 0xfc0007ff, 0x70000021, clo_func },
+       { 0xfc0007ff, 0x70000024, dclz_func },
+       { 0xfc0007ff, 0x70000025, dclo_func },
+       { }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+                                     struct r2_decoder_table *table)
+{
+       struct r2_decoder_table *p;
+       int err;
+
+       for (p = table; p->func; p++) {
+               if ((inst & p->mask) == p->code) {
+                       err = (p->func)(regs, inst);
+                       return err;
+               }
+       }
+       return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst)
+{
+       int err = 0;
+       unsigned long vaddr;
+       u32 nir;
+       unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+       void __user *fault_addr = NULL;
+       int pass = 0;
+
+repeat:
+       r31 = regs->regs[31];
+       epc = regs->cp0_epc;
+       err = compute_return_epc(regs);
+       if (err < 0) {
+               BUG();
+               return SIGEMT;
+       }
+       pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+                inst, epc, pass);
+
+       switch (MIPSInst_OPCODE(inst)) {
+       case spec_op:
+               err = mipsr2_find_op_func(regs, inst, spec_op_table);
+               if (err < 0) {
+                       /* FPU instruction under JR */
+                       regs->cp0_cause |= CAUSEF_BD;
+                       goto fpu_emul;
+               }
+               break;
+       case spec2_op:
+               err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+               break;
+       case bcond_op:
+               rt = MIPSInst_RT(inst);
+               rs = MIPSInst_RS(inst);
+               switch (rt) {
+               case tgei_op:
+                       if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TGEI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tgeiu_op:
+                       if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+                               do_trap_or_bp(regs, 0, "TGEIU");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tlti_op:
+                       if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TLTI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tltiu_op:
+                       if (regs->regs[rs] < MIPSInst_UIMM(inst))
+                               do_trap_or_bp(regs, 0, "TLTIU");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case teqi_op:
+                       if (regs->regs[rs] == MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TEQI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tnei_op:
+                       if (regs->regs[rs] != MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TNEI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case bltzl_op:
+               case bgezl_op:
+               case bltzall_op:
+               case bgezall_op:
+                       if (delay_slot(regs)) {
+                               err = SIGILL;
+                               break;
+                       }
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = __compute_return_epc(regs);
+                       if (err < 0)
+                               return SIGEMT;
+                       if (err != BRANCH_LIKELY_TAKEN)
+                               break;
+                       cpc = regs->cp0_epc;
+                       nepc = epc + 4;
+                       err = __get_user(nir, (u32 __user *)nepc);
+                       if (err) {
+                               err = SIGSEGV;
+                               break;
+                       }
+                       /*
+                        * This will probably be optimized away when
+                        * CONFIG_DEBUG_FS is not enabled
+                        */
+                       switch (rt) {
+                       case bltzl_op:
+                               MIPS_R2BR_STATS(bltzl);
+                               break;
+                       case bgezl_op:
+                               MIPS_R2BR_STATS(bgezl);
+                               break;
+                       case bltzall_op:
+                               MIPS_R2BR_STATS(bltzall);
+                               break;
+                       case bgezall_op:
+                               MIPS_R2BR_STATS(bgezall);
+                               break;
+                       }
+
+                       switch (MIPSInst_OPCODE(nir)) {
+                       case cop1_op:
+                       case cop1x_op:
+                       case lwc1_op:
+                       case swc1_op:
+                               regs->cp0_cause |= CAUSEF_BD;
+                               goto fpu_emul;
+                       }
+                       if (nir) {
+                               err = mipsr6_emul(regs, nir);
+                               if (err > 0) {
+                                       err = mips_dsemul(regs, nir, cpc);
+                                       if (err == SIGILL)
+                                               err = SIGEMT;
+                                       MIPS_R2_STATS(dsemul);
+                               }
+                       }
+                       break;
+               case bltzal_op:
+               case bgezal_op:
+                       if (delay_slot(regs)) {
+                               err = SIGILL;
+                               break;
+                       }
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = __compute_return_epc(regs);
+                       if (err < 0)
+                               return SIGEMT;
+                       cpc = regs->cp0_epc;
+                       nepc = epc + 4;
+                       err = __get_user(nir, (u32 __user *)nepc);
+                       if (err) {
+                               err = SIGSEGV;
+                               break;
+                       }
+                       /*
+                        * This will probably be optimized away when
+                        * CONFIG_DEBUG_FS is not enabled
+                        */
+                       switch (rt) {
+                       case bltzal_op:
+                               MIPS_R2BR_STATS(bltzal);
+                               break;
+                       case bgezal_op:
+                               MIPS_R2BR_STATS(bgezal);
+                               break;
+                       }
+
+                       switch (MIPSInst_OPCODE(nir)) {
+                       case cop1_op:
+                       case cop1x_op:
+                       case lwc1_op:
+                       case swc1_op:
+                               regs->cp0_cause |= CAUSEF_BD;
+                               goto fpu_emul;
+                       }
+                       if (nir) {
+                               err = mipsr6_emul(regs, nir);
+                               if (err > 0) {
+                                       err = mips_dsemul(regs, nir, cpc);
+                                       if (err == SIGILL)
+                                               err = SIGEMT;
+                                       MIPS_R2_STATS(dsemul);
+                               }
+                       }
+                       break;
+               default:
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = SIGILL;
+                       break;
+               }
+               break;
+
+       case beql_op:
+       case bnel_op:
+       case blezl_op:
+       case bgtzl_op:
+               if (delay_slot(regs)) {
+                       err = SIGILL;
+                       break;
+               }
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+               err = __compute_return_epc(regs);
+               if (err < 0)
+                       return SIGEMT;
+               if (err != BRANCH_LIKELY_TAKEN)
+                       break;
+               cpc = regs->cp0_epc;
+               nepc = epc + 4;
+               err = __get_user(nir, (u32 __user *)nepc);
+               if (err) {
+                       err = SIGSEGV;
+                       break;
+               }
+               /*
+                * This will probably be optimized away when
+                * CONFIG_DEBUG_FS is not enabled
+                */
+               switch (MIPSInst_OPCODE(inst)) {
+               case beql_op:
+                       MIPS_R2BR_STATS(beql);
+                       break;
+               case bnel_op:
+                       MIPS_R2BR_STATS(bnel);
+                       break;
+               case blezl_op:
+                       MIPS_R2BR_STATS(blezl);
+                       break;
+               case bgtzl_op:
+                       MIPS_R2BR_STATS(bgtzl);
+                       break;
+               }
+
+               switch (MIPSInst_OPCODE(nir)) {
+               case cop1_op:
+               case cop1x_op:
+               case lwc1_op:
+               case swc1_op:
+                       regs->cp0_cause |= CAUSEF_BD;
+                       goto fpu_emul;
+               }
+               if (nir) {
+                       err = mipsr6_emul(regs, nir);
+                       if (err > 0) {
+                               err = mips_dsemul(regs, nir, cpc);
+                               if (err == SIGILL)
+                                       err = SIGEMT;
+                               MIPS_R2_STATS(dsemul);
+                       }
+               }
+               break;
+       case lwc1_op:
+       case swc1_op:
+       case cop1_op:
+       case cop1x_op:
+fpu_emul:
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+               if (!used_math()) {     /* First time FPU user.  */
+                       err = init_fpu();
+                       set_used_math();
+               }
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+
+               err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+                                              &fault_addr);
+
+               /*
+                * this is a tricky issue - lose_fpu() uses LL/SC atomics
+                * if FPU is owned and effectively cancels user level LL/SC.
+                * So, it could be logical to don't restore FPU ownership here.
+                * But the sequence of multiple FPU instructions is much much
+                * more often than LL-FPU-SC and I prefer loop here until
+                * next scheduler cycle cancels FPU ownership
+                */
+               own_fpu(1);     /* Restore FPU state. */
+
+               if (err)
+                       current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+               MIPS_R2_STATS(fpus);
+
+               break;
+
+       case lwl_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:     sll     %0, %0, 0\n"
+                       "10:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       10b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+
+               break;
+
+       case lwr_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       sll     %0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       sll     %0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "10:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       10b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+
+               break;
+
+       case swl_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                               EXT     "%1, %0, 24, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 0, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                               EXT     "%1, %0, 24, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 0, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+
+       case swr_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                               EXT     "%1, %0, 0, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 24, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                               EXT     "%1, %0, 0, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 24, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+
+       case ldl_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 56, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 48, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 40, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 32, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 56, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 48, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 40, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 32, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 24, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 16, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 8, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+               break;
+
+       case ldr_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 0, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 8, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 16, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 24, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 32, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 40, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 48, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 0, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 32, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 40, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 48, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li     %3,%4\n"
+                       "       j      9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word  1b,8b\n"
+                       "       .word  2b,8b\n"
+                       "       .word  3b,8b\n"
+                       "       .word  4b,8b\n"
+                       "       .word  5b,8b\n"
+                       "       .word  6b,8b\n"
+                       "       .word  7b,8b\n"
+                       "       .word  0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+               break;
+
+       case sdl_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "       dextu   %1, %0, 56, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 0, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "       dextu   %1, %0, 56, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 0, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+               break;
+
+       case sdr_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "       dext    %1, %0, 0, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 56, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "       dext    %1, %0, 0, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 56, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+       case ll_op:
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x3) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "ll     %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "=&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV)
+                       : "memory");
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case sc_op:
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x3) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               res = regs->regs[MIPSInst_RT(inst)];
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "sc     %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "+&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case lld_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x7) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "lld    %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "=&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV)
+                       : "memory");
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case scd_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x7) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               res = regs->regs[MIPSInst_RT(inst)];
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "scd    %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "+&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+       case pref_op:
+               /* skip it */
+               break;
+       default:
+               err = SIGILL;
+       }
+
+       /*
+        * Lets not return to userland just yet. It's constly and
+        * it's likely we have more R2 instructions to emulate
+        */
+       if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+               regs->cp0_cause &= ~CAUSEF_BD;
+               err = get_user(inst, (u32 __user *)regs->cp0_epc);
+               if (!err)
+                       goto repeat;
+
+               if (err < 0)
+                       err = SIGSEGV;
+       }
+
+       if (err && (err != SIGEMT)) {
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+       }
+
+       /* Likely a MIPS R6 compatible instruction */
+       if (pass && (err == SIGILL))
+               err = 0;
+
+       return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_stats_show(struct seq_file *s, void *unused)
+{
+
+       seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+       seq_printf(s, "movs\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+       seq_printf(s, "hilo\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+       seq_printf(s, "muls\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+       seq_printf(s, "divs\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+       seq_printf(s, "dsps\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+       seq_printf(s, "bops\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+       seq_printf(s, "traps\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+       seq_printf(s, "fpus\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+       seq_printf(s, "loads\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+       seq_printf(s, "stores\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+       seq_printf(s, "llsc\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+       seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+       seq_printf(s, "jr\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+       seq_printf(s, "bltzl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+       seq_printf(s, "bgezl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+       seq_printf(s, "bltzll\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+       seq_printf(s, "bgezll\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+       seq_printf(s, "bltzal\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+       seq_printf(s, "bgezal\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+       seq_printf(s, "beql\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+       seq_printf(s, "bnel\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+       seq_printf(s, "blezl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+       seq_printf(s, "bgtzl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+       return 0;
+}
+
+static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+{
+       mipsr2_stats_show(s, unused);
+
+       __this_cpu_write((mipsr2emustats).movs, 0);
+       __this_cpu_write((mipsr2bdemustats).movs, 0);
+       __this_cpu_write((mipsr2emustats).hilo, 0);
+       __this_cpu_write((mipsr2bdemustats).hilo, 0);
+       __this_cpu_write((mipsr2emustats).muls, 0);
+       __this_cpu_write((mipsr2bdemustats).muls, 0);
+       __this_cpu_write((mipsr2emustats).divs, 0);
+       __this_cpu_write((mipsr2bdemustats).divs, 0);
+       __this_cpu_write((mipsr2emustats).dsps, 0);
+       __this_cpu_write((mipsr2bdemustats).dsps, 0);
+       __this_cpu_write((mipsr2emustats).bops, 0);
+       __this_cpu_write((mipsr2bdemustats).bops, 0);
+       __this_cpu_write((mipsr2emustats).traps, 0);
+       __this_cpu_write((mipsr2bdemustats).traps, 0);
+       __this_cpu_write((mipsr2emustats).fpus, 0);
+       __this_cpu_write((mipsr2bdemustats).fpus, 0);
+       __this_cpu_write((mipsr2emustats).loads, 0);
+       __this_cpu_write((mipsr2bdemustats).loads, 0);
+       __this_cpu_write((mipsr2emustats).stores, 0);
+       __this_cpu_write((mipsr2bdemustats).stores, 0);
+       __this_cpu_write((mipsr2emustats).llsc, 0);
+       __this_cpu_write((mipsr2bdemustats).llsc, 0);
+       __this_cpu_write((mipsr2emustats).dsemul, 0);
+       __this_cpu_write((mipsr2bdemustats).dsemul, 0);
+       __this_cpu_write((mipsr2bremustats).jrs, 0);
+       __this_cpu_write((mipsr2bremustats).bltzl, 0);
+       __this_cpu_write((mipsr2bremustats).bgezl, 0);
+       __this_cpu_write((mipsr2bremustats).bltzll, 0);
+       __this_cpu_write((mipsr2bremustats).bgezll, 0);
+       __this_cpu_write((mipsr2bremustats).bltzal, 0);
+       __this_cpu_write((mipsr2bremustats).bgezal, 0);
+       __this_cpu_write((mipsr2bremustats).beql, 0);
+       __this_cpu_write((mipsr2bremustats).bnel, 0);
+       __this_cpu_write((mipsr2bremustats).blezl, 0);
+       __this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+       return 0;
+}
+
+static int mipsr2_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mipsr2_stats_show, inode->i_private);
+}
+
+static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mipsr2_stats_clear_show, inode->i_private);
+}
+
+static const struct file_operations mipsr2_emul_fops = {
+       .open                   = mipsr2_stats_open,
+       .read                   = seq_read,
+       .llseek                 = seq_lseek,
+       .release                = single_release,
+};
+
+static const struct file_operations mipsr2_clear_fops = {
+       .open                   = mipsr2_stats_clear_open,
+       .read                   = seq_read,
+       .llseek                 = seq_lseek,
+       .release                = single_release,
+};
+
+
+static int __init mipsr2_init_debugfs(void)
+{
+       extern struct dentry    *mips_debugfs_dir;
+       struct dentry           *mipsr2_emul;
+
+       if (!mips_debugfs_dir)
+               return -ENODEV;
+
+       mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
+                                         mips_debugfs_dir, NULL,
+                                         &mipsr2_emul_fops);
+       if (!mipsr2_emul)
+               return -ENOMEM;
+
+       mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
+                                         mips_debugfs_dir, NULL,
+                                         &mipsr2_clear_fops);
+       if (!mipsr2_emul)
+               return -ENOMEM;
+
+       return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
index 17eaf0cf760c60eb08fad9d666877dc5606ee61b..291af0b5c4828adaa22ff0fc065f37c6a094fd85 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 #include <asm/ftrace.h>
+#include <asm/fpu.h>
+#include <asm/msa.h>
 
 extern void *__bzero(void *__s, size_t __count);
 extern long __strncpy_from_kernel_nocheck_asm(char *__to,
@@ -31,6 +33,14 @@ extern long __strnlen_kernel_asm(const char *s);
 extern long __strnlen_user_nocheck_asm(const char *s);
 extern long __strnlen_user_asm(const char *s);
 
+/*
+ * Core architecture code
+ */
+EXPORT_SYMBOL_GPL(_save_fp);
+#ifdef CONFIG_CPU_HAS_MSA
+EXPORT_SYMBOL_GPL(_save_msa);
+#endif
+
 /*
  * String functions
  */
@@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm);
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
+#ifndef CONFIG_CPU_MIPSR6
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_kernel);
 EXPORT_SYMBOL(__csum_partial_copy_to_user);
 EXPORT_SYMBOL(__csum_partial_copy_from_user);
+#endif
 
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
index f6547680c81cd7db68f5c77b19131a6f9fec05d4..423ae83af1fb7043a1daff5d06a079658446de5a 100644 (file)
        /*
         * check if we need to save FPU registers
         */
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-       LONG_L  t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
-
-       and     t0, t0, t1
-       LONG_S  t0, TI_FLAGS(t3)
+       .set push
+       .set noreorder
+       beqz    a3, 1f
+        PTR_L  t3, TASK_THREAD_INFO(a0)
+       .set pop
 
        /*
         * clear saved user stack CU1 bit
        .set pop
 1:
 
-       /* check if we need to save COP2 registers */
-       PTR_L   t2, TASK_THREAD_INFO(a0)
-       LONG_L  t0, ST_OFF(t2)
-       bbit0   t0, 30, 1f
-
-       /* Disable COP2 in the stored process state */
-       li      t1, ST0_CU2
-       xor     t0, t1
-       LONG_S  t0, ST_OFF(t2)
-
-       /* Enable COP2 so we can save it */
-       mfc0    t0, CP0_STATUS
-       or      t0, t1
-       mtc0    t0, CP0_STATUS
-
-       /* Save COP2 */
-       daddu   a0, THREAD_CP2
-       jal octeon_cop2_save
-       dsubu   a0, THREAD_CP2
-
-       /* Disable COP2 now that we are done */
-       mfc0    t0, CP0_STATUS
-       li      t1, ST0_CU2
-       xor     t0, t1
-       mtc0    t0, CP0_STATUS
-
-1:
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       dmfc0   t0, $11,7       /* CvmMemCtl */
        bbit0   t0, 6, 3f       /* Is user access enabled? */
 
        /* Store the CVMSEG state */
        .set reorder
 
        /* Disable access to CVMSEG */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       dmfc0   t0, $11,7       /* CvmMemCtl */
        xori    t0, t0, 0x40    /* Bit 6 is CVMSEG user enable */
-       mtc0    t0, $11,7       /* CvmMemCtl */
+       dmtc0   t0, $11,7       /* CvmMemCtl */
 #endif
 3:
 
  * void octeon_cop2_save(struct octeon_cop2_state *a0)
  */
        .align  7
+       .set push
+       .set noreorder
        LEAF(octeon_cop2_save)
 
        dmfc0   t9, $9,7        /* CvmCtl register. */
        dmfc2   t2, 0x0200
        sd      t0, OCTEON_CP2_CRC_IV(a0)
        sd      t1, OCTEON_CP2_CRC_LENGTH(a0)
-       sd      t2, OCTEON_CP2_CRC_POLY(a0)
        /* Skip next instructions if CvmCtl[NODFA_CP2] set */
        bbit1   t9, 28, 1f
+        sd     t2, OCTEON_CP2_CRC_POLY(a0)
 
        /* Save the LLM state */
        dmfc2   t0, 0x0402
        dmfc2   t1, 0x040A
        sd      t0, OCTEON_CP2_LLM_DAT(a0)
-       sd      t1, OCTEON_CP2_LLM_DAT+8(a0)
 
 1:     bbit1   t9, 26, 3f      /* done if CvmCtl[NOCRYPTO] set */
+        sd     t1, OCTEON_CP2_LLM_DAT+8(a0)
 
        /* Save the COP2 crypto state */
        /* this part is mostly common to both pass 1 and later revisions */
        sd      t2, OCTEON_CP2_AES_KEY+16(a0)
        dmfc2   t2, 0x0101
        sd      t3, OCTEON_CP2_AES_KEY+24(a0)
-       mfc0    t3, $15,0       /* Get the processor ID register */
+       mfc0    v0, $15,0       /* Get the processor ID register */
        sd      t0, OCTEON_CP2_AES_KEYLEN(a0)
-       li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
+       li      v1, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        sd      t1, OCTEON_CP2_AES_RESULT(a0)
-       sd      t2, OCTEON_CP2_AES_RESULT+8(a0)
        /* Skip to the Pass1 version of the remainder of the COP2 state */
-       beq     t3, t0, 2f
+       beq     v0, v1, 2f
+        sd     t2, OCTEON_CP2_AES_RESULT+8(a0)
 
        /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
        dmfc2   t1, 0x0240
        dmfc2   t2, 0x0241
+       ori     v1, v1, 0x9500 /* lowest OCTEON III PrId*/
        dmfc2   t3, 0x0242
+       subu    v1, v0, v1 /* prid - lowest OCTEON III PrId */
        dmfc2   t0, 0x0243
        sd      t1, OCTEON_CP2_HSH_DATW(a0)
        dmfc2   t1, 0x0244
        sd      t1, OCTEON_CP2_GFM_MULT+8(a0)
        sd      t2, OCTEON_CP2_GFM_POLY(a0)
        sd      t3, OCTEON_CP2_GFM_RESULT(a0)
-       sd      t0, OCTEON_CP2_GFM_RESULT+8(a0)
+       bltz    v1, 4f
+        sd     t0, OCTEON_CP2_GFM_RESULT+8(a0)
+       /* OCTEON III things*/
+       dmfc2   t0, 0x024F
+       dmfc2   t1, 0x0050
+       sd      t0, OCTEON_CP2_SHA3(a0)
+       sd      t1, OCTEON_CP2_SHA3+8(a0)
+4:
        jr      ra
+        nop
 
 2:     /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
        dmfc2   t3, 0x0040
 
 3:     /* pass 1 or CvmCtl[NOCRYPTO] set */
        jr      ra
+        nop
        END(octeon_cop2_save)
+       .set pop
 
 /*
  * void octeon_cop2_restore(struct octeon_cop2_state *a0)
        ld      t2, OCTEON_CP2_AES_RESULT+8(a0)
        mfc0    t3, $15,0       /* Get the processor ID register */
        dmtc2   t0, 0x0110
-       li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
+       li      v0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        dmtc2   t1, 0x0100
-       bne     t0, t3, 3f      /* Skip the next stuff for non-pass1 */
+       bne     v0, t3, 3f      /* Skip the next stuff for non-pass1 */
         dmtc2  t2, 0x0101
 
        /* this code is specific for pass 1 */
 
 3:     /* this is post-pass1 code */
        ld      t2, OCTEON_CP2_HSH_DATW(a0)
+       ori     v0, v0, 0x9500 /* lowest OCTEON III PrId*/
        ld      t0, OCTEON_CP2_HSH_DATW+8(a0)
        ld      t1, OCTEON_CP2_HSH_DATW+16(a0)
        dmtc2   t2, 0x0240
        dmtc2   t2, 0x0259
        ld      t2, OCTEON_CP2_GFM_RESULT+8(a0)
        dmtc2   t0, 0x025E
+       subu    v0, t3, v0 /* prid - lowest OCTEON III PrId */
        dmtc2   t1, 0x025A
-       dmtc2   t2, 0x025B
-
+       bltz    v0, done_restore
+        dmtc2  t2, 0x025B
+       /* OCTEON III things*/
+       ld      t0, OCTEON_CP2_SHA3(a0)
+       ld      t1, OCTEON_CP2_SHA3+8(a0)
+       dmtc2   t0, 0x0051
+       dmtc2   t1, 0x0050
 done_restore:
        jr      ra
         nop
@@ -450,18 +440,23 @@ done_restore:
  * void octeon_mult_save()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- *      safely modify k0 and k1.
+ * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
+ *       safely modify v1,k0, k1,$10-$15, and $24.  It will
+ *      be overwritten with a processor specific version of the code.
  */
-       .align  7
+       .p2align 7
        .set push
        .set noreorder
        LEAF(octeon_mult_save)
-       dmfc0   k0, $9,7        /* CvmCtl register. */
-       bbit1   k0, 27, 1f      /* Skip CvmCtl[NOMUL] */
+       jr      ra
         nop
+       .space 30 * 4, 0
+octeon_mult_save_end:
+       EXPORT(octeon_mult_save_end)
+       END(octeon_mult_save)
 
-       /* Save the multiplier state */
+       LEAF(octeon_mult_save2)
+       /* Save the multiplier state OCTEON II and earlier*/
        v3mulu  k0, $0, $0
        v3mulu  k1, $0, $0
        sd      k0, PT_MTP(sp)        /* PT_MTP    has P0 */
@@ -476,44 +471,107 @@ done_restore:
        sd      k0, PT_MPL+8(sp)      /* PT_MPL+8  has MPL1 */
        jr      ra
         sd     k1, PT_MPL+16(sp)     /* PT_MPL+16 has MPL2 */
-
-1:     /* Resume here if CvmCtl[NOMUL] */
+octeon_mult_save2_end:
+       EXPORT(octeon_mult_save2_end)
+       END(octeon_mult_save2)
+
+       LEAF(octeon_mult_save3)
+       /* Save the multiplier state OCTEON III */
+       v3mulu  $10, $0, $0             /* read P0 */
+       v3mulu  $11, $0, $0             /* read P1 */
+       v3mulu  $12, $0, $0             /* read P2 */
+       sd      $10, PT_MTP+(0*8)(sp)   /* store P0 */
+       v3mulu  $10, $0, $0             /* read P3 */
+       sd      $11, PT_MTP+(1*8)(sp)   /*  store P1 */
+       v3mulu  $11, $0, $0             /* read P4 */
+       sd      $12, PT_MTP+(2*8)(sp)   /* store P2 */
+       ori     $13, $0, 1
+       v3mulu  $12, $0, $0             /* read P5 */
+       sd      $10, PT_MTP+(3*8)(sp)   /* store P3 */
+       v3mulu  $13, $13, $0            /* P4-P0 = MPL5-MPL1, $13 = MPL0 */
+       sd      $11, PT_MTP+(4*8)(sp)   /* store P4 */
+       v3mulu  $10, $0, $0             /* read MPL1 */
+       sd      $12, PT_MTP+(5*8)(sp)   /* store P5 */
+       v3mulu  $11, $0, $0             /* read MPL2 */
+       sd      $13, PT_MPL+(0*8)(sp)   /* store MPL0 */
+       v3mulu  $12, $0, $0             /* read MPL3 */
+       sd      $10, PT_MPL+(1*8)(sp)   /* store MPL1 */
+       v3mulu  $10, $0, $0             /* read MPL4 */
+       sd      $11, PT_MPL+(2*8)(sp)   /* store MPL2 */
+       v3mulu  $11, $0, $0             /* read MPL5 */
+       sd      $12, PT_MPL+(3*8)(sp)   /* store MPL3 */
+       sd      $10, PT_MPL+(4*8)(sp)   /* store MPL4 */
        jr      ra
-       END(octeon_mult_save)
+        sd     $11, PT_MPL+(5*8)(sp)   /* store MPL5 */
+octeon_mult_save3_end:
+       EXPORT(octeon_mult_save3_end)
+       END(octeon_mult_save3)
        .set pop
 
 /*
  * void octeon_mult_restore()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ * NOTE: This is called in RESTORE_TEMP in stackframe.h.
  */
-       .align  7
+       .p2align 7
        .set push
        .set noreorder
        LEAF(octeon_mult_restore)
-       dmfc0   k1, $9,7                /* CvmCtl register. */
-       ld      v0, PT_MPL(sp)          /* MPL0 */
-       ld      v1, PT_MPL+8(sp)        /* MPL1 */
-       ld      k0, PT_MPL+16(sp)       /* MPL2 */
-       bbit1   k1, 27, 1f              /* Skip CvmCtl[NOMUL] */
-       /* Normally falls through, so no time wasted here */
-       nop
+       jr      ra
+        nop
+       .space 30 * 4, 0
+octeon_mult_restore_end:
+       EXPORT(octeon_mult_restore_end)
+       END(octeon_mult_restore)
 
+       LEAF(octeon_mult_restore2)
+       ld      v0, PT_MPL(sp)          /* MPL0 */
+       ld      v1, PT_MPL+8(sp)        /* MPL1 */
+       ld      k0, PT_MPL+16(sp)       /* MPL2 */
        /* Restore the multiplier state */
-       ld      k1, PT_MTP+16(sp)       /* P2 */
-       MTM0    v0                      /* MPL0 */
+       ld      k1, PT_MTP+16(sp)       /* P2 */
+       mtm0    v0                      /* MPL0 */
        ld      v0, PT_MTP+8(sp)        /* P1 */
-       MTM1    v1                      /* MPL1 */
-       ld      v1, PT_MTP(sp)          /* P0 */
-       MTM2    k0                      /* MPL2 */
-       MTP2    k1                      /* P2 */
-       MTP1    v0                      /* P1 */
+       mtm1    v1                      /* MPL1 */
+       ld      v1, PT_MTP(sp)          /* P0 */
+       mtm2    k0                      /* MPL2 */
+       mtp2    k1                      /* P2 */
+       mtp1    v0                      /* P1 */
        jr      ra
-        MTP0   v1                      /* P0 */
-
-1:     /* Resume here if CvmCtl[NOMUL] */
+        mtp0   v1                      /* P0 */
+octeon_mult_restore2_end:
+       EXPORT(octeon_mult_restore2_end)
+       END(octeon_mult_restore2)
+
+       LEAF(octeon_mult_restore3)
+       ld      $12, PT_MPL+(0*8)(sp)   /* read MPL0 */
+       ld      $13, PT_MPL+(3*8)(sp)   /* read MPL3 */
+       ld      $10, PT_MPL+(1*8)(sp)   /* read MPL1 */
+       ld      $11, PT_MPL+(4*8)(sp)   /* read MPL4 */
+       .word   0x718d0008
+       /* mtm0 $12, $13                   restore MPL0 and MPL3 */
+       ld      $12, PT_MPL+(2*8)(sp)   /* read MPL2 */
+       .word   0x714b000c
+       /* mtm1 $10, $11                   restore MPL1 and MPL4 */
+       ld      $13, PT_MPL+(5*8)(sp)   /* read MPL5 */
+       ld      $10, PT_MTP+(0*8)(sp)   /* read P0 */
+       ld      $11, PT_MTP+(3*8)(sp)   /* read P3 */
+       .word   0x718d000d
+       /* mtm2 $12, $13                   restore MPL2 and MPL5 */
+       ld      $12, PT_MTP+(1*8)(sp)   /* read P1 */
+       .word   0x714b0009
+       /* mtp0 $10, $11                   restore P0 and P3 */
+       ld      $13, PT_MTP+(4*8)(sp)   /* read P4 */
+       ld      $10, PT_MTP+(2*8)(sp)   /* read P2 */
+       ld      $11, PT_MTP+(5*8)(sp)   /* read P5 */
+       .word   0x718d000a
+       /* mtp1 $12, $13                   restore P1 and P4 */
        jr      ra
-        nop
-       END(octeon_mult_restore)
+       .word   0x714b000b
+       /* mtp2 $10, $11                   restore P2 and P5 */
+
+octeon_mult_restore3_end:
+       EXPORT(octeon_mult_restore3_end)
+       END(octeon_mult_restore3)
        .set pop
index 097fc8d14e4225288733bac25f6ea17c6737d1c8..130af7d26a9c5de2fe2ec5fdd5969d9bd4a5f42a 100644 (file)
@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "]\n");
        }
 
-       seq_printf(m, "isa\t\t\t: mips1");
+       seq_printf(m, "isa\t\t\t:"); 
+       if (cpu_has_mips_r1)
+               seq_printf(m, " mips1");
        if (cpu_has_mips_2)
                seq_printf(m, "%s", " mips2");
        if (cpu_has_mips_3)
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "%s", " mips32r1");
        if (cpu_has_mips32r2)
                seq_printf(m, "%s", " mips32r2");
+       if (cpu_has_mips32r6)
+               seq_printf(m, "%s", " mips32r6");
        if (cpu_has_mips64r1)
                seq_printf(m, "%s", " mips64r1");
        if (cpu_has_mips64r2)
                seq_printf(m, "%s", " mips64r2");
+       if (cpu_has_mips64r6)
+               seq_printf(m, "%s", " mips64r6");
        seq_printf(m, "\n");
 
        seq_printf(m, "ASEs implemented\t:");
index 85bff5d513e5b42ae483e414c14a4a844793b9a1..bf85cc180d9105b7c605d049ccfdb352c76cc066 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/completion.h>
 #include <linux/kallsyms.h>
 #include <linux/random.h>
+#include <linux/prctl.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        smp_call_function(arch_dump_stack, NULL, 1);
 }
+
+int mips_get_process_fp_mode(struct task_struct *task)
+{
+       int value = 0;
+
+       if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
+               value |= PR_FP_MODE_FR;
+       if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
+               value |= PR_FP_MODE_FRE;
+
+       return value;
+}
+
+int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+{
+       const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+       unsigned long switch_count;
+       struct task_struct *t;
+
+       /* Check the value is valid */
+       if (value & ~known_bits)
+               return -EOPNOTSUPP;
+
+       /* Avoid inadvertently triggering emulation */
+       if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+           !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+               return -EOPNOTSUPP;
+       if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+               return -EOPNOTSUPP;
+
+       /* FR = 0 not supported in MIPS R6 */
+       if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+               return -EOPNOTSUPP;
+
+       /* Save FP & vector context, then disable FPU & MSA */
+       if (task->signal == current->signal)
+               lose_fpu(1);
+
+       /* Prevent any threads from obtaining live FP context */
+       atomic_set(&task->mm->context.fp_mode_switching, 1);
+       smp_mb__after_atomic();
+
+       /*
+        * If there are multiple online CPUs then wait until all threads whose
+        * FP mode is about to change have been context switched. This approach
+        * allows us to only worry about whether an FP mode switch is in
+        * progress when FP is first used in a tasks time slice. Pretty much all
+        * of the mode switch overhead can thus be confined to cases where mode
+        * switches are actually occuring. That is, to here. However for the
+        * thread performing the mode switch it may take a while...
+        */
+       if (num_online_cpus() > 1) {
+               spin_lock_irq(&task->sighand->siglock);
+
+               for_each_thread(task, t) {
+                       if (t == current)
+                               continue;
+
+                       switch_count = t->nvcsw + t->nivcsw;
+
+                       do {
+                               spin_unlock_irq(&task->sighand->siglock);
+                               cond_resched();
+                               spin_lock_irq(&task->sighand->siglock);
+                       } while ((t->nvcsw + t->nivcsw) == switch_count);
+               }
+
+               spin_unlock_irq(&task->sighand->siglock);
+       }
+
+       /*
+        * There are now no threads of the process with live FP context, so it
+        * is safe to proceed with the FP mode switch.
+        */
+       for_each_thread(task, t) {
+               /* Update desired FP register width */
+               if (value & PR_FP_MODE_FR) {
+                       clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+               } else {
+                       set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+                       clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
+               }
+
+               /* Update desired FP single layout */
+               if (value & PR_FP_MODE_FRE)
+                       set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+               else
+                       clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+       }
+
+       /* Allow threads to use FP again */
+       atomic_set(&task->mm->context.fp_mode_switching, 0);
+
+       return 0;
+}
index 6c160c67984c014e53a3a068698fdef58a9b9345..676c5030a953bf9cca5ad038a7526d3b94ce372d 100644 (file)
@@ -34,7 +34,7 @@
        .endm
 
        .set    noreorder
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
 
 LEAF(_save_fp_context)
        .set    push
@@ -42,7 +42,8 @@ LEAF(_save_fp_context)
        cfc1    t1, fcr31
        .set    pop
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        .set    push
        SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32)
        SET_HARDFLOAT
        cfc1    t1, fcr31
 
+#ifndef CONFIG_CPU_MIPS64_R6
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip storing odd if FR=0
         nop
+#endif
 
        /* Store the 16 odd double precision registers */
        EX      sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32)
 LEAF(_restore_fp_context)
        EX      lw t1, SC_FPC_CSR(a0)
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
+               defined(CONFIG_CPU_MIPS32_R6)
        .set    push
        SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32)
        SET_HARDFLOAT
        EX      lw t1, SC32_FPC_CSR(a0)
 
+#ifndef CONFIG_CPU_MIPS64_R6
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip loading odd if FR=0
         nop
+#endif
 
        EX      ldc1 $f1, SC32_FPREGS+8(a0)
        EX      ldc1 $f3, SC32_FPREGS+24(a0)
index 64591e671878f41d9c4d0551806783b65578eb23..3b1a36f13a7dd915c235f11e073c805264f90960 100644 (file)
  * Save a thread's fp context.
  */
 LEAF(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_save_double a0 t0 t1                # clobbers t1
@@ -126,7 +127,8 @@ LEAF(_save_fp)
  * Restore a thread's fp context.
  */
 LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_restore_double a0 t0 t1             # clobbers t1
@@ -240,9 +242,9 @@ LEAF(_init_fpu)
        mtc1    t1, $f30
        mtc1    t1, $f31
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
        .set    push
-       .set    mips32r2
+       .set    MIPS_ISA_LEVEL_RAW
        .set    fp=64
        sll     t0, t0, 5                       # is Status.FR set?
        bgez    t0, 1f                          # no: skip setting upper 32b
@@ -280,9 +282,9 @@ LEAF(_init_fpu)
        mthc1   t1, $f30
        mthc1   t1, $f31
 1:     .set    pop
-#endif /* CONFIG_CPU_MIPS32_R2 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
 #else
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
        dmtc1   t1, $f0
        dmtc1   t1, $f2
        dmtc1   t1, $f4
index 67f2495def1cd18615210c32e2cc111ea66c8fe0..d1168d7c31e8ef37c51568b93cf676e9a6c3ef81 100644 (file)
@@ -208,6 +208,7 @@ void spram_config(void)
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
        case CPU_P5600:
+       case CPU_QEMU_GENERIC:
                config0 = read_c0_config();
                /* FIXME: addresses are Malta specific */
                if (config0 & (1<<24)) {
index 604b558809c4c4474bfd953507ca7e54af5e9eca..53a7ef9a8f320c5b14c5abb2d3e1518c0ad7f756 100644 (file)
@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
                : "memory");
        } else if (cpu_has_llsc) {
                __asm__ __volatile__ (
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "       li      %[err], 0                               \n"
                "1:     ll      %[old], (%[addr])                       \n"
                "       move    %[tmp], %[new]                          \n"
index c3b41e24c05a47337509b9579d5b1302ba6f6e80..33984c04b60b710516f1b0bfb88aa52aaa04629f 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/idle.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
@@ -837,7 +838,7 @@ out:
        exception_exit(prev_state);
 }
 
-static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        const char *str)
 {
        siginfo_t info;
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs)
        unsigned int opcode = 0;
        int status = -1;
 
+       /*
+        * Avoid any kernel code. Just emulate the R2 instruction
+        * as quickly as possible.
+        */
+       if (mipsr2_emulation && cpu_has_mips_r6 &&
+           likely(user_mode(regs))) {
+               if (likely(get_user(opcode, epc) >= 0)) {
+                       status = mipsr2_decoder(regs, opcode);
+                       switch (status) {
+                       case 0:
+                       case SIGEMT:
+                               task_thread_info(current)->r2_emul_return = 1;
+                               return;
+                       case SIGILL:
+                               goto no_r2_instr;
+                       default:
+                               process_fpemu_return(status,
+                                                    &current->thread.cp0_baduaddr);
+                               task_thread_info(current)->r2_emul_return = 1;
+                               return;
+                       }
+               }
+       }
+
+no_r2_instr:
+
        prev_state = exception_enter();
+
        if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
                       SIGILL) == NOTIFY_STOP)
                goto out;
@@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;
 }
 
+static int wait_on_fp_mode_switch(atomic_t *p)
+{
+       /*
+        * The FP mode for this task is currently being switched. That may
+        * involve modifications to the format of this tasks FP context which
+        * make it unsafe to proceed with execution for the moment. Instead,
+        * schedule some other task.
+        */
+       schedule();
+       return 0;
+}
+
 static int enable_restore_fp_context(int msa)
 {
        int err, was_fpu_owner, prior_msa;
 
+       /*
+        * If an FP mode switch is currently underway, wait for it to
+        * complete before proceeding.
+        */
+       wait_on_atomic_t(&current->mm->context.fp_mode_switching,
+                        wait_on_fp_mode_switch, TASK_KILLABLE);
+
        if (!used_math()) {
                /* First time FP context user. */
                preempt_disable();
@@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void)
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
        case CPU_P5600:
+       case CPU_QEMU_GENERIC:
                {
 #define ERRCTL_PE      0x80000000
 #define ERRCTL_L2P     0x00800000
@@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void)
        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
               reg_val & (1<<30) ? "secondary" : "primary",
               reg_val & (1<<31) ? "data" : "insn");
-       if (cpu_has_mips_r2 &&
+       if ((cpu_has_mips_r2_r6) &&
            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
                        reg_val & (1<<29) ? "ED " : "",
@@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void)
        unsigned int reg_val;
 
        /* For the moment, report the problem and hang. */
-       if (cpu_has_mips_r2 &&
+       if ((cpu_has_mips_r2_r6) &&
            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
                       read_c0_ecc());
@@ -1959,7 +2007,7 @@ static void configure_hwrena(void)
 {
        unsigned int hwrena = cpu_hwrena_impl_bits;
 
-       if (cpu_has_mips_r2)
+       if (cpu_has_mips_r2_r6)
                hwrena |= 0x0000000f;
 
        if (!noulri && cpu_has_userlocal)
@@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
         *  o read IntCtl.IPTI to determine the timer interrupt
         *  o read IntCtl.IPPCI to determine the performance counter interrupt
         */
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2094,7 +2142,7 @@ void __init trap_init(void)
 #else
         ebase = CKSEG0;
 #endif
-               if (cpu_has_mips_r2)
+               if (cpu_has_mips_r2_r6)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
 
index e11906dff8850fc277b14985eafdbbb4d2c3ece9..bbb69695a0a10765f4c6bdf300da304667c021f5 100644 (file)
@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lb("%0", "0(%2)")"\n\t"    \
+                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define            LoadWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lbu("%0", "0(%2)")"\n\t"   \
+                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:lb\t%0, 0(%2)\n\t"               \
+                       "2:lbu\t $1, 1(%2)\n\t"             \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:lbu\t$1, 2(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:lbu\t$1, 3(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "5:lbu\t$1, 4(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "6:lbu\t$1, 5(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "7:lbu\t$1, 6(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "8:lbu\t$1, 7(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n\t"                     \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=r" (res)                        \
                        : "r" (value), "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_swl("%1", "(%2)")"\n"    \
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=r" (res)                                \
                : "r" (value), "r" (addr), "i" (-EFAULT));
-#endif
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_sb("%1", "3(%2)")"\n\t"    \
+                       "srl\t$1, %1, 0x8\n\t"              \
+                       "2:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "srl\t$1, $1,  0x8\n\t"             \
+                       "3:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "srl\t$1, $1, 0x8\n\t"              \
+                       "4:"user_sb("$1", "0(%2)")"\n\t"    \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:sb\t%1, 7(%2)\n\t"               \
+                       "dsrl\t$1, %1, 0x8\n\t"             \
+                       "2:sb\t$1, 6(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "3:sb\t$1, 5(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "4:sb\t$1, 4(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "5:sb\t$1, 3(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "6:sb\t$1, 2(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "7:sb\t$1, 1(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "8:sb\t$1, 0(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#else /* __BIG_ENDIAN */
 
-#ifdef __LITTLE_ENDIAN
 #define     LoadHW(addr, value, res)  \
                __asm__ __volatile__ (".set\tnoat\n"        \
                        "1:\t"user_lb("%0", "1(%2)")"\n"    \
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lb("%0", "3(%2)")"\n\t"    \
+                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define            LoadWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lbu("%0", "3(%2)")"\n\t"   \
+                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:lb\t%0, 7(%2)\n\t"               \
+                       "2:lbu\t$1, 6(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:lbu\t$1, 5(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:lbu\t$1, 4(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "5:lbu\t$1, 3(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "6:lbu\t$1, 2(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "7:lbu\t$1, 1(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "8:lbu\t$1, 0(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n\t"                     \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=r" (res)                        \
                        : "r" (value), "r" (addr), "i" (-EFAULT));
-
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_swl("%1", "3(%2)")"\n"   \
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=r" (res)                                \
                : "r" (value), "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_sb("%1", "0(%2)")"\n\t"    \
+                       "srl\t$1, %1, 0x8\n\t"              \
+                       "2:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "srl\t$1, $1,  0x8\n\t"             \
+                       "3:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "srl\t$1, $1, 0x8\n\t"              \
+                       "4:"user_sb("$1", "3(%2)")"\n\t"    \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:sb\t%1, 0(%2)\n\t"               \
+                       "dsrl\t$1, %1, 0x8\n\t"             \
+                       "2:sb\t$1, 1(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "3:sb\t$1, 2(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "4:sb\t$1, 3(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "5:sb\t$1, 4(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "6:sb\t$1, 5(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "7:sb\t$1, 6(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "8:sb\t$1, 7(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
 #endif
 
 static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                        break;
                return;
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * COP2 is available to implementor for application specific use.
         * It's up to applications to register a notifier chain and do
         * whatever they have to do, including possible sending of signals.
+        *
+        * This instruction has been reallocated in Release 6
         */
        case lwc2_op:
                cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sdc2_op:
                cu2_notifier_call_chain(CU2_SDC2_OP, regs);
                break;
-
+#endif
        default:
                /*
                 * Pheeee...  We encountered an yet unknown instruction or
index bbcd82242059d36f91f2f4a7dfbd8d22fb479ea6..b6beb0e07b1b3b535f7625d61100e6c0087de00d 100644 (file)
@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
        if (idx > current_cpu_data.tlbsize) {
                kvm_err("%s: Invalid Index: %d\n", __func__, idx);
                kvm_mips_dump_host_tlbs();
+               local_irq_restore(flags);
                return -1;
        }
 
index c1388d40663b0143501bacae8af8f2793e54c815..bd6437f67dc03b01c3a76e57a53a811a61f0ebe7 100644 (file)
@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
            TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
            TP_ARGS(vcpu, reason),
            TP_STRUCT__entry(
-                       __field(struct kvm_vcpu *, vcpu)
+                       __field(unsigned long, pc)
                        __field(unsigned int, reason)
            ),
 
            TP_fast_assign(
-                       __entry->vcpu = vcpu;
+                       __entry->pc = vcpu->arch.pc;
                        __entry->reason = reason;
            ),
 
            TP_printk("[%s]PC: 0x%08lx",
                      kvm_mips_exit_types_str[__entry->reason],
-                     __entry->vcpu->arch.pc)
+                     __entry->pc)
 );
 
 #endif /* _TRACE_KVM_H */
index eeddc58802e11a57595ee7d123e0538577af0c67..1e9e900cd3c382a4a8c3bce2ab7ca2987a1acfcd 100644 (file)
@@ -8,6 +8,7 @@ lib-y   += bitops.o csum_partial.o delay.o memcpy.o memset.o \
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM)     := $(filter-out csum_partial.o, $(lib-y))
 
 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
 obj-$(CONFIG_CPU_R3000)                += r3k_dump_tlb.o
index 5d3238af9b5cc551ecb0ca9670b242b62f181a73..9245e1705e691124ad3f4eb563cfe680184c30cf 100644 (file)
         and    t0, src, ADDRMASK
        PREFS(  0, 2*32(src) )
        PREFD(  1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
        bnez    t1, .Ldst_unaligned\@
         nop
        bnez    t0, .Lsrc_unaligned_dst_aligned\@
+#else
+       or      t0, t0, t1
+       bnez    t0, .Lcopy_unaligned_bytes\@
+#endif
        /*
         * use delay slot for fall-through
         * src and dst are aligned; need to compute rem
        bne     rem, len, 1b
        .set    noreorder
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
         * A loop would do only a byte at a time with possible branch
        bne     len, rem, 1b
        .set    noreorder
 
+#endif /* !CONFIG_CPU_MIPSR6 */
 .Lcopy_bytes_checklen\@:
        beqz    len, .Ldone\@
         nop
 .Ldone\@:
        jr      ra
         nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes\@:
+1:
+       COPY_BYTE(0)
+       COPY_BYTE(1)
+       COPY_BYTE(2)
+       COPY_BYTE(3)
+       COPY_BYTE(4)
+       COPY_BYTE(5)
+       COPY_BYTE(6)
+       COPY_BYTE(7)
+       ADD     src, src, 8
+       b       1b
+        ADD    dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
        .if __memcpy == 1
        END(memcpy)
        .set __memcpy, 0
index c8fe6b1968fb313dca7e9bb307ec89b159746730..b8e63fd0037547cb9939907696da7ae55d699a95 100644 (file)
        .set            at
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
        EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
        PTR_SUBU        a0, t0                  /* long align ptr */
        PTR_ADDU        a2, t0                  /* correct size */
 
+#else /* CONFIG_CPU_MIPSR6 */
+#define STORE_BYTE(N)                          \
+       EX(sb, a1, N(a0), .Lbyte_fixup\@);      \
+       beqz            t0, 0f;                 \
+       PTR_ADDU        t0, 1;
+
+       PTR_ADDU        a2, t0                  /* correct size */
+       PTR_ADDU        t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+       ori             a0, STORMASK
+       xori            a0, STORMASK
+       PTR_ADDIU       a0, STORSIZE
+#endif /* CONFIG_CPU_MIPSR6 */
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial\@  /* no block to fill */
        andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
+#ifndef CONFIG_CPU_MIPSR6
        PTR_ADDU        a0, a2                  /* What's left */
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 #else
        EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
+#else
+       PTR_SUBU        t0, $0, a2
+       PTR_ADDIU       t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif
 1:     jr              ra
        move            a2, zero
 
        .hidden __memset
        .endif
 
+.Lbyte_fixup\@:
+       PTR_SUBU        a2, $0, t0
+       jr              ra
+        PTR_ADDIU      a2, 1
+
 .Lfirst_fixup\@:
        jr      ra
        nop
index be777d9a3f85969a6b3104d392402ffe37d12c9d..272af8ac2425290c892849186b170f4b95c259b6 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/stringify.h>
 
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
 
 /*
  * For cli() we have to insert nops to make sure that the new value
index 9dfcd7fc1bc3dd712980c93f95ea4b8c1f3049d9..b30bf65c7d7d81ea1ed7e714d7a3554139855207 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/processor.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu.h>
+#include <asm/mips-r2-to-r6-emul.h>
 
 #include "ieee754.h"
 
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *,
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
 /* convert condition code register number to csr bit */
-static const unsigned int fpucondbit[8] = {
+const unsigned int fpucondbit[8] = {
        FPU_CSR_COND0,
        FPU_CSR_COND1,
        FPU_CSR_COND2,
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                        /* Fall through */
                case jr_op:
+                       /* For R6, JR already emulated in jalr_op */
+                       if (NO_R6EMU && insn.r_format.opcode == jr_op)
+                               break;
                        *contpc = regs->regs[insn.r_format.rs];
                        return 1;
                }
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                switch (insn.i_format.rt) {
                case bltzal_op:
                case bltzall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bltzall_op))
+                               break;
+
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
-               case bltz_op:
                case bltzl_op:
+                       if (NO_R6EMU)
+                               break;
+               case bltz_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                        return 1;
                case bgezal_op:
                case bgezall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bgezall_op))
+                               break;
+
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
-               case bgez_op:
                case bgezl_op:
+                       if (NO_R6EMU)
+                               break;
+               case bgez_op:
                        if ((long)regs->regs[insn.i_format.rs] >= 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                /* Set microMIPS mode bit: XOR for jalx. */
                *contpc ^= bit;
                return 1;
-       case beq_op:
        case beql_op:
+               if (NO_R6EMU)
+                       break;
+       case beq_op:
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case bne_op:
        case bnel_op:
+               if (NO_R6EMU)
+                       break;
+       case bne_op:
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case blez_op:
        case blezl_op:
+               if (NO_R6EMU)
+                       break;
+       case blez_op:
+
+               /*
+                * Compact branches for R6 for the
+                * blez and blezl opcodes.
+                * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+                * BLEZ  | rs = rt != 0      == BGEZALC
+                * BLEZ  | rs != 0 | rt != 0 == BGEUC
+                * BLEZL | rs = 0 | rt != 0  == BLEZC
+                * BLEZL | rs = rt != 0      == BGEZC
+                * BLEZL | rs != 0 | rt != 0 == BGEC
+                *
+                * For real BLEZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+
+                       return 1;
+               }
                if ((long)regs->regs[insn.i_format.rs] <= 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case bgtz_op:
        case bgtzl_op:
+               if (NO_R6EMU)
+                       break;
+       case bgtz_op:
+               /*
+                * Compact branches for R6 for the
+                * bgtz and bgtzl opcodes.
+                * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+                * BGTZ  | rs = rt != 0      == BLTZALC
+                * BGTZ  | rs != 0 | rt != 0 == BLTUC
+                * BGTZL | rs = 0 | rt != 0  == BGTZC
+                * BGTZL | rs = rt != 0      == BLTZC
+                * BGTZL | rs != 0 | rt != 0 == BLTC
+                *
+                * *ZALC varint for BGTZ &&& rt != 0
+                * For real GTZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+
+                       return 1;
+               }
+
                if ((long)regs->regs[insn.i_format.rs] > 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
+       case cbcond0_op:
+       case cbcond1_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               if (insn.i_format.rt && !insn.i_format.rs)
+                       regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                else
                        *contpc = regs->cp0_epc + 8;
                return 1;
+#else
+       case bc6_op:
+               /*
+                * Only valid for MIPS R6 but we can still end up
+                * here from a broken userland so just tell emulator
+                * this is not a branch and let it break later on.
+                */
+               if  (!cpu_has_mips_r6)
+                       break;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case balc6_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case beqzcjic_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case bnezcjialc_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               if (!insn.i_format.rs)
+                       regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
 #endif
        case cop0_op:
        case cop1_op:
+               /* Need to check for R6 bc1nez and bc1eqz branches */
+               if (cpu_has_mips_r6 &&
+                   ((insn.i_format.rs == bc1eqz_op) ||
+                    (insn.i_format.rs == bc1nez_op))) {
+                       bit = 0;
+                       switch (insn.i_format.rs) {
+                       case bc1eqz_op:
+                               if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
+                                   bit = 1;
+                               break;
+                       case bc1nez_op:
+                               if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
+                                   bit = 1;
+                               break;
+                       }
+                       if (bit)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+
+                       return 1;
+               }
+               /* R2/R6 compatible cop1 instruction. Fall through */
        case cop2_op:
        case cop1x_op:
                if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                 * achieve full IEEE-754 accuracy - however this emulator does.
                 */
                case frsqrt_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_sp_rsqrt;
                        goto scopuop;
 
                case frecip_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@ copcsr:
                 * achieve full IEEE-754 accuracy - however this emulator does.
                 */
                case frsqrt_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_dp_rsqrt;
                        goto dcopuop;
                case frecip_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_dp_recip;
index dd261df005c20c6ce7540fb458a2ec70317d6c99..3f8059602765ea9703841715e15c60a72ab1d123 100644 (file)
@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
                __asm__ __volatile__ (
                        ".set push\n\t"
                        ".set noat\n\t"
-                       ".set mips3\n\t"
+                       ".set "MIPS_ISA_LEVEL"\n\t"
 #ifdef CONFIG_32BIT
                        "la     $at,1f\n\t"
 #endif
@@ -1255,6 +1255,7 @@ static void probe_pcache(void)
        case CPU_P5600:
        case CPU_PROAPTIV:
        case CPU_M5150:
+       case CPU_QEMU_GENERIC:
                if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
                    (c->icache.waysize > PAGE_SIZE))
                        c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@ static void setup_scache(void)
 
        default:
                if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+                                   MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+                                   MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
 #ifdef CONFIG_MIPS_CPU_SCACHE
                        if (mips_sc_init ()) {
                                scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
index 70ab5d664332694e92305331f13ed15a35ab1956..7ff8637e530d7974d002594797d42044505a0467 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/ratelimit.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
@@ -28,6 +29,8 @@
 #include <asm/highmem.h>               /* For VMALLOC_END */
 #include <linux/kdebug.h>
 
+int show_unhandled_signals = 1;
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
        int fault;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
               current->comm, current->pid, field, address, write,
@@ -203,15 +208,21 @@ bad_area_nosemaphore:
        if (user_mode(regs)) {
                tsk->thread.cp0_badvaddr = address;
                tsk->thread.error_code = write;
-#if 0
-               printk("do_page_fault() #2: sending SIGSEGV to %s for "
-                      "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
-                      tsk->comm,
-                      write ? "write access to" : "read access from",
-                      field, address,
-                      field, (unsigned long) regs->cp0_epc,
-                      field, (unsigned long) regs->regs[31]);
-#endif
+               if (show_unhandled_signals &&
+                   unhandled_signal(tsk, SIGSEGV) &&
+                   __ratelimit(&ratelimit_state)) {
+                       pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
+                               tsk->comm,
+                               write ? "write access to" : "read access from",
+                               field, address);
+                       pr_info("epc = %0*lx in", field,
+                               (unsigned long) regs->cp0_epc);
+                       print_vma_addr(" ", regs->cp0_epc);
+                       pr_info("ra  = %0*lx in", field,
+                               (unsigned long) regs->regs[31]);
+                       print_vma_addr(" ", regs->regs[31]);
+                       pr_info("\n");
+               }
                info.si_signo = SIGSEGV;
                info.si_errno = 0;
                /* info.si_code has been set above */
index b611102e23b5c72948ef5c5b8c1e12d603a0ff15..3f85f921801b84f7f91cd67e42bbe528552d6860 100644 (file)
@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5];
 #define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
+/*
+ * R6 has a limited offset of the pref instruction.
+ * Skip it if the offset is more than 9 bits.
+ */
+#define _uasm_i_pref(a, b, c, d)               \
+do {                                           \
+       if (cpu_has_mips_r6) {                  \
+               if (c <= 0xff && c >= -0x100)   \
+                       uasm_i_pref(a, b, c, d);\
+       } else {                                \
+               uasm_i_pref(a, b, c, d);        \
+       }                                       \
+} while(0)
+
 static int pref_bias_clear_store;
 static int pref_bias_copy_load;
 static int pref_bias_copy_store;
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void)
                        pref_bias_copy_load = 256;
                        pref_bias_copy_store = 128;
                        pref_src_mode = Pref_LoadStreamed;
-                       pref_dst_mode = Pref_PrepareForStore;
+                       if (cpu_has_mips_r6)
+                               /*
+                                * Bit 30 (Pref_PrepareForStore) has been
+                                * removed from MIPS R6. Use bit 5
+                                * (Pref_StoreStreamed).
+                                */
+                               pref_dst_mode = Pref_StoreStreamed;
+                       else
+                               pref_dst_mode = Pref_PrepareForStore;
                        break;
                }
        } else {
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_clear_store) {
-               uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+               _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
                            A0);
        } else if (cache_line_size == (half_clear_loop_size << 1)) {
                if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_copy_load)
-               uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+               _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
 }
 
 static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_copy_store) {
-               uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+               _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
                            A0);
        } else if (cache_line_size == (half_copy_loop_size << 1)) {
                if (cpu_has_cache_cdex_s) {
index 99eb8fabab606afe28781620301f430e4b776fd4..4ceafd13870cd6945634713f04ac3f7d65ba52d2 100644 (file)
@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
        case CPU_PROAPTIV:
        case CPU_P5600:
        case CPU_BMIPS5000:
+       case CPU_QEMU_GENERIC:
                if (config2 & (1 << 12))
                        return 0;
        }
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void)
 
        /* Ignore anything but MIPSxx processors */
        if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                             MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+                             MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+                             MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
                return 0;
 
        /* Does this MIPS32/MIPS64 CPU have a config2 register? */
index 30639a6e9b8ca3ad3677afb0cbd553b9c9472c18..b2afa49beab082116e282f8b043cb2a65e4c06ce 100644 (file)
@@ -485,13 +485,11 @@ static void r4k_tlb_configure(void)
                 * Enable the no read, no exec bits, and enable large virtual
                 * address.
                 */
-               u32 pg = PG_RIE | PG_XIE;
 #ifdef CONFIG_64BIT
-               pg |= PG_ELPA;
+               set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
+#else
+               set_c0_pagegrain(PG_RIE | PG_XIE);
 #endif
-               if (cpu_has_rixiex)
-                       pg |= PG_IEC;
-               write_c0_pagegrain(pg);
        }
 
        temp_tlb_entry = current_cpu_data.tlbsize - 1;
index 3978a3d813666f8566159ac20c705c2a5c88df53..d75ff73a20120bf28d919b7b6b5bf30a22b5088f 100644 (file)
@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case tlb_indexed: tlbw = uasm_i_tlbwi; break;
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_exec_hazard) {
                /*
                 * The architecture spec says an ehb is required here,
                 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
                case CPU_PROAPTIV:
                case CPU_P5600:
                case CPU_M5150:
+               case CPU_QEMU_GENERIC:
                        break;
 
                default:
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void)
 
                switch (current_cpu_type()) {
                default:
-                       if (cpu_has_mips_r2) {
+                       if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
 
                case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void)
 
                switch (current_cpu_type()) {
                default:
-                       if (cpu_has_mips_r2) {
+                       if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
 
                case CPU_CAVIUM_OCTEON:
index 8399ddf03a0235c5ce9d5ee28ed5007c9986a8ab..d78178daea4bc2c1e069d2069db976b1ce693740 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifndef CONFIG_CPU_MICROMIPS
-#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
-
 #include "uasm.c"
 
 static struct insn insn_table_MM[] = {
index 8e02291cfc0c1c2d8edeb5733498e877bff09542..b4a8378935625b2e1d0f228f6961ebd8208544f1 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifdef CONFIG_CPU_MICROMIPS
-#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, e)                                      \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << SIMM9_SH                                      \
+        | (e) << FUNC_SH)
 
 #include "uasm.c"
 
@@ -62,7 +62,11 @@ static struct insn insn_table[] = {
        { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
        { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
        { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+#endif
        { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
        { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +89,22 @@ static struct insn insn_table[] = {
        { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+#else
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jalr_op),  RS },
+#endif
        { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
        { insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_lld,  M6(spec3_op, 0, 0, 0, lld6_op),  RS | RT | SIMM9 },
+       { insn_ll,  M6(spec3_op, 0, 0, 0, ll6_op),  RS | RT | SIMM9 },
+#endif
        { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
        { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +117,20 @@ static struct insn insn_table[] = {
        { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
        { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
        { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_pref,  M6(spec3_op, 0, 0, 0, pref6_op),  RS | RT | SIMM9 },
+#endif
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
        { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_scd,  M6(spec3_op, 0, 0, 0, scd6_op),  RS | RT | SIMM9 },
+       { insn_sc,  M6(spec3_op, 0, 0, 0, sc6_op),  RS | RT | SIMM9 },
+#endif
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
@@ -198,6 +220,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
                op |= build_set(va_arg(ap, u32));
        if (ip->fields & SCIMM)
                op |= build_scimm(va_arg(ap, u32));
+       if (ip->fields & SIMM9)
+               op |= build_scimm9(va_arg(ap, u32));
        va_end(ap);
 
        **buf = op;
index 4adf30284813a93aa11143ddbfb433831327e329..319051c34343ae9e0eb7c5d4adf82732e5598cd6 100644 (file)
@@ -24,7 +24,8 @@ enum fields {
        JIMM = 0x080,
        FUNC = 0x100,
        SET = 0x200,
-       SCIMM = 0x400
+       SCIMM = 0x400,
+       SIMM9 = 0x800,
 };
 
 #define OP_MASK                0x3f
@@ -41,6 +42,8 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
+#define SIMM9_SH       7
+#define SIMM9_MASK     0x1ff
 
 enum opcode {
        insn_invalid,
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg)
        return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 
+static inline u32 build_scimm9(s32 arg)
+{
+       WARN((arg > 0xff || arg < -0x100),
+              KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
 static inline u32 build_func(u32 arg)
 {
        WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -330,7 +341,7 @@ I_u3u1u2(_ldx)
 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
                            unsigned int c)
 {
-       if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
+       if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
                /*
                 * As per erratum Core-14449, replace prefetches 0-4,
                 * 6-24 with 'pref 28'.
index ec1dd2491f962da8103e616b3c91b3758e0ddcc9..e1d69895fb1de44f5d8503027f86ebb50f40d5a6 100644 (file)
@@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts)
 int get_c0_perfcount_int(void)
 {
        if (gic_present)
-               return gic_get_c0_compare_int();
+               return gic_get_c0_perfcount_int();
        if (cp0_perfcount_irq >= 0)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
index f2355e3e65a10c4259015c48d273236630102c2d..f97e169393bc3060eb778835cbb1b4813f31b140 100644 (file)
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
 }
 
 struct pci_ops bcm1480_pci_ops = {
-       .read = bcm1480_pcibios_read,
-       .write = bcm1480_pcibios_write,
+       .read   = bcm1480_pcibios_read,
+       .write  = bcm1480_pcibios_write,
 };
 
 static struct resource bcm1480_mem_resource = {
index bedb72bd3a27155fb4068cfb7b9ee69d589f0c22..a04af55d89f10a55593b2b16f53f20c7c16ee95d 100644 (file)
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
 
 
 static struct pci_ops octeon_pci_ops = {
-       .read = octeon_read_config,
-       .write = octeon_write_config,
+       .read   = octeon_read_config,
+       .write  = octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
index eb4a17ba4a530a9a73702bfb6fd6db1890295bb4..1bb0b2bf8d6ea1e411266fd3d8de6a5afc50fba0 100644 (file)
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-       .read = octeon_pcie0_read_config,
-       .write = octeon_pcie0_write_config,
+       .read   = octeon_pcie0_read_config,
+       .write  = octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-       .read = octeon_pcie1_read_config,
-       .write = octeon_pcie1_write_config,
+       .read   = octeon_pcie1_read_config,
+       .write  = octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
 };
 
 static struct pci_ops octeon_dummy_ops = {
-       .read = octeon_dummy_read_config,
-       .write = octeon_dummy_write_config,
+       .read   = octeon_dummy_read_config,
+       .write  = octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
index 8f1b86d4da84a8f4a4f894e50f61a76ef009554f..cdf1876000101e25a153f04415e622820418ba13 100644 (file)
@@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev)
        return 0;
 }
 
-static int gio_device_suspend(struct device *dev, pm_message_t state)
-{
-       struct gio_device *gio_dev = to_gio_device(dev);
-       struct gio_driver *drv = to_gio_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->suspend)
-               error = drv->suspend(gio_dev, state);
-       return error;
-}
-
-static int gio_device_resume(struct device *dev)
-{
-       struct gio_device *gio_dev = to_gio_device(dev);
-       struct gio_driver *drv = to_gio_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->resume)
-               error = drv->resume(gio_dev);
-       return error;
-}
-
 static void gio_device_shutdown(struct device *dev)
 {
        struct gio_device *gio_dev = to_gio_device(dev);
@@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = {
        .match     = gio_bus_match,
        .probe     = gio_device_probe,
        .remove    = gio_device_remove,
-       .suspend   = gio_device_suspend,
-       .resume    = gio_device_resume,
        .shutdown  = gio_device_shutdown,
        .uevent    = gio_device_uevent,
 };
index ac37e54b3d5e7454fc2c78daf88008655d6b8870..e44a15d4f57364be376af2ee4c9c625d13e53371 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
@@ -25,9 +26,9 @@
 #include <asm/sn/gda.h>
 #include <asm/sn/sn0/hub.h>
 
-void machine_restart(char *command) __attribute__((noreturn));
-void machine_halt(void) __attribute__((noreturn));
-void machine_power_off(void) __attribute__((noreturn));
+void machine_restart(char *command) __noreturn;
+void machine_halt(void) __noreturn;
+void machine_power_off(void) __noreturn;
 
 #define noreturn while(1);                             /* Silence gcc.  */
 
index 1f823da4c77bca73980094d4b27a043d24cffaa8..44b3470a0bbb71babcc9af5131d566d22aa317c5 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org>
  */
 
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -35,9 +36,9 @@
 static struct timer_list power_timer, blink_timer, debounce_timer;
 static int has_panicked, shuting_down;
 
-static void ip32_machine_restart(char *command) __attribute__((noreturn));
-static void ip32_machine_halt(void) __attribute__((noreturn));
-static void ip32_machine_power_off(void) __attribute__((noreturn));
+static void ip32_machine_restart(char *command) __noreturn;
+static void ip32_machine_halt(void) __noreturn;
+static void ip32_machine_power_off(void) __noreturn;
 
 static void ip32_machine_restart(char *cmd)
 {
index afab728ab65ecde48b3c50c78e1dd19a795ad99d..96d3f9deb59c38b2172f7b3b7a81c1397ceebad9 100644 (file)
@@ -56,7 +56,9 @@ extern void paging_init(void);
 #define PGDIR_SHIFT    22
 #define PTRS_PER_PGD   1024
 #define PTRS_PER_PUD   1       /* we don't really have any PUD physically */
+#define __PAGETABLE_PUD_FOLDED
 #define PTRS_PER_PMD   1       /* we don't really have any PMD physically */
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PTE   1024
 
 #define PGD_SIZE       PAGE_SIZE
index 20fb1cf2dab63238b0e8b464a7b08a447257946e..64246214487288eb5cc352dd625b4e55538dded2 100644 (file)
 
 #include <uapi/asm/ptrace.h>
 
+/* This struct defines the way the registers are stored on the
+   stack during a system call.  */
+
 #ifndef __ASSEMBLY__
+struct pt_regs {
+       unsigned long  r8;      /* r8-r15 Caller-saved GP registers */
+       unsigned long  r9;
+       unsigned long  r10;
+       unsigned long  r11;
+       unsigned long  r12;
+       unsigned long  r13;
+       unsigned long  r14;
+       unsigned long  r15;
+       unsigned long  r1;      /* Assembler temporary */
+       unsigned long  r2;      /* Retval LS 32bits */
+       unsigned long  r3;      /* Retval MS 32bits */
+       unsigned long  r4;      /* r4-r7 Register arguments */
+       unsigned long  r5;
+       unsigned long  r6;
+       unsigned long  r7;
+       unsigned long  orig_r2; /* Copy of r2 ?? */
+       unsigned long  ra;      /* Return address */
+       unsigned long  fp;      /* Frame pointer */
+       unsigned long  sp;      /* Stack pointer */
+       unsigned long  gp;      /* Global pointer */
+       unsigned long  estatus;
+       unsigned long  ea;      /* Exception return address (pc) */
+       unsigned long  orig_r7;
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+       unsigned long  r16;     /* r16-r23 Callee-saved GP registers */
+       unsigned long  r17;
+       unsigned long  r18;
+       unsigned long  r19;
+       unsigned long  r20;
+       unsigned long  r21;
+       unsigned long  r22;
+       unsigned long  r23;
+       unsigned long  fp;
+       unsigned long  gp;
+       unsigned long  ra;
+};
+
 #define user_mode(regs)        (((regs)->estatus & ESTATUS_EU))
 
 #define instruction_pointer(regs)      ((regs)->ra)
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h
deleted file mode 100644 (file)
index 2c87614..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2004 Microtronix Datacom Ltd
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_NIOS2_UCONTEXT_H
-#define _ASM_NIOS2_UCONTEXT_H
-
-typedef int greg_t;
-#define NGREG 32
-typedef greg_t gregset_t[NGREG];
-
-struct mcontext {
-       int version;
-       gregset_t gregs;
-};
-
-#define MCONTEXT_VERSION 2
-
-struct ucontext {
-       unsigned long     uc_flags;
-       struct ucontext  *uc_link;
-       stack_t           uc_stack;
-       struct mcontext   uc_mcontext;
-       sigset_t          uc_sigmask;   /* mask last for extensibility */
-};
-
-#endif
index 4f07ca3f8d10edd443595bac553bb170f192ce58..e0bb972a50d7425b3f422605ab64bbc82c428cc0 100644 (file)
@@ -1,4 +1,5 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 header-y += elf.h
-header-y += ucontext.h
+
+generic-y += ucontext.h
index a5b91ae5cf56fb5fcff4cfb6e3ad62bd96998491..6f06d3b2949e7ffd3d95c4951517a99abdcdeea1 100644 (file)
@@ -50,9 +50,7 @@
 
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG      \
-       ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) /       \
-               sizeof(elf_greg_t))
+#define ELF_NGREG              49
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 typedef unsigned long elf_fpregset_t;
index e83a7c9d1c36c4bc6a2690bfac8fc4a05f015cfa..71a330597adff689dcae092d63839aad1fb8b7d3 100644 (file)
 
 #define NUM_PTRACE_REG (PTR_TLBMISC + 1)
 
-/* this struct defines the way the registers are stored on the
-   stack during a system call.
-
-   There is a fake_regs in setup.c that has to match pt_regs.*/
-
-struct pt_regs {
-       unsigned long  r8;              /* r8-r15 Caller-saved GP registers */
-       unsigned long  r9;
-       unsigned long  r10;
-       unsigned long  r11;
-       unsigned long  r12;
-       unsigned long  r13;
-       unsigned long  r14;
-       unsigned long  r15;
-       unsigned long  r1;              /* Assembler temporary */
-       unsigned long  r2;              /* Retval LS 32bits */
-       unsigned long  r3;              /* Retval MS 32bits */
-       unsigned long  r4;              /* r4-r7 Register arguments */
-       unsigned long  r5;
-       unsigned long  r6;
-       unsigned long  r7;
-       unsigned long  orig_r2;         /* Copy of r2 ?? */
-       unsigned long  ra;              /* Return address */
-       unsigned long  fp;              /* Frame pointer */
-       unsigned long  sp;              /* Stack pointer */
-       unsigned long  gp;              /* Global pointer */
-       unsigned long  estatus;
-       unsigned long  ea;              /* Exception return address (pc) */
-       unsigned long  orig_r7;
-};
-
-/*
- * This is the extended stack used by signal handlers and the context
- * switcher: it's pushed after the normal "struct pt_regs".
- */
-struct switch_stack {
-       unsigned long  r16;             /* r16-r23 Callee-saved GP registers */
-       unsigned long  r17;
-       unsigned long  r18;
-       unsigned long  r19;
-       unsigned long  r20;
-       unsigned long  r21;
-       unsigned long  r22;
-       unsigned long  r23;
-       unsigned long  fp;
-       unsigned long  gp;
-       unsigned long  ra;
+/* User structures for general purpose registers.  */
+struct user_pt_regs {
+       __u32           regs[49];
 };
 
 #endif /* __ASSEMBLY__ */
index 7b8bb41867d4416e7b1cfeb61e1b930fc68c71dc..b67944a509273a020a8e13e9ebf0e039e492867c 100644 (file)
  * details.
  */
 
-#ifndef _ASM_NIOS2_SIGCONTEXT_H
-#define _ASM_NIOS2_SIGCONTEXT_H
+#ifndef _UAPI__ASM_SIGCONTEXT_H
+#define _UAPI__ASM_SIGCONTEXT_H
 
-#include <asm/ptrace.h>
+#include <linux/types.h>
+
+#define MCONTEXT_VERSION 2
 
 struct sigcontext {
-       struct pt_regs regs;
-       unsigned long  sc_mask; /* old sigmask */
+       int version;
+       unsigned long gregs[32];
 };
 
 #endif
index 2d0ea25be1717de06d8cd138032dc5c7c5f3970d..dda41e4fe7070885ee7ab77e4c5e9e18e51dd0a3 100644 (file)
@@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs,
                                        struct ucontext *uc, int *pr2)
 {
        int temp;
-       greg_t *gregs = uc->uc_mcontext.gregs;
+       unsigned long *gregs = uc->uc_mcontext.gregs;
        int err;
 
        /* Always make any pending restarted system calls return -EINTR */
@@ -127,7 +127,7 @@ badframe:
 static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
 {
        struct switch_stack *sw = (struct switch_stack *)regs - 1;
-       greg_t *gregs = uc->uc_mcontext.gregs;
+       unsigned long *gregs = uc->uc_mcontext.gregs;
        int err = 0;
 
        err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
index 0d231adfe576b897073c924e02211dea7b110285..0c9b6afe69e9094815cc1e73084422368b3a2e52 100644 (file)
@@ -126,7 +126,6 @@ good_area:
                break;
        }
 
-survive:
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -220,11 +219,6 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(tsk)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
index 8c966b2270aa6692c6f085027a8c70098b447733..15207b9362bfd0947064f7572ad34fdfb6e4d30c 100644 (file)
@@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
 #if PT_NLEVELS == 3
 #define BITS_PER_PMD   (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
+#define __PAGETABLE_PMD_FOLDED
 #define BITS_PER_PMD   0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
index 51866f17068457038503ff14520ef9a67e3a0d7d..ca7957b09a3cc13ad6882006468a7e3431e7a75f 100644 (file)
@@ -142,6 +142,7 @@ CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
 CONFIG_STAGING=y
 CONFIG_FSL_CORENET_CF=y
+CONFIG_CLK_QORIQ=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
index d6c0c819895288c39bd826085e7fadb6793b360a..04737aaa8b6b8ead5bfc2574fabe48afd78c872c 100644 (file)
@@ -122,6 +122,7 @@ CONFIG_DMADEVICES=y
 CONFIG_FSL_DMA=y
 CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
+CONFIG_CLK_QORIQ=y
 CONFIG_FSL_CORENET_CF=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
index 9cfa3706a1b8750942d7e41a91ef44996053a2ad..f1ea5972f6eccddceb7b776961ff67dcf2cb14e6 100644 (file)
@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl,
                                 int pci_domain_number, unsigned long pe_num);
 extern int iommu_add_device(struct device *dev);
 extern void iommu_del_device(struct device *dev);
+extern int __init tce_iommu_bus_notifier_init(void);
 #else
 static inline void iommu_register_group(struct iommu_table *tbl,
                                        int pci_domain_number,
@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev)
 static inline void iommu_del_device(struct device *dev)
 {
 }
+
+static inline int __init tce_iommu_bus_notifier_init(void)
+{
+        return 0;
+}
 #endif /* !CONFIG_IOMMU_API */
 
 static inline void set_iommu_table_base_and_group(struct device *dev,
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644 (file)
index 0000000..744fd54
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _ASM_POWERPC_IRQ_WORK_H
+#define _ASM_POWERPC_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+       return true;
+}
+
+#endif /* _ASM_POWERPC_IRQ_WORK_H */
index 5d3968c4d79973a4645f9d7b4069056311fd42cc..b054f33ab1fbcdad3bff7fda332c9e55dcda1d2f 100644 (file)
@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_del_device);
 
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+                unsigned long action, void *data)
+{
+        struct device *dev = data;
+
+        switch (action) {
+        case BUS_NOTIFY_ADD_DEVICE:
+                return iommu_add_device(dev);
+        case BUS_NOTIFY_DEL_DEVICE:
+                if (dev->iommu_group)
+                        iommu_del_device(dev);
+                return 0;
+        default:
+                return 0;
+        }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+        .notifier_call = tce_iommu_bus_notifier,
+};
+
+int __init tce_iommu_bus_notifier_init(void)
+{
+        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+        return 0;
+}
 #endif /* CONFIG_IOMMU_API */
index 6e19afa35a153d2736af94bc845008d724fe735f..ec9ec2058d2d3f3ec6db2dfbe25b35ab1f4cfb0b 100644 (file)
@@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        if (smp_ops->give_timebase)
                smp_ops->give_timebase();
 
-       /* Wait until cpu puts itself in the online map */
-       while (!cpu_online(cpu))
+       /* Wait until cpu puts itself in the online & active maps */
+       while (!cpu_online(cpu) || !cpu_active(cpu))
                cpu_relax();
 
        return 0;
index 7316dd15278a35f60b710d0a348bc0d9328830fb..2d7b33fab953283cf391679b1e316e81f53852f9 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/irq_work.h>
+#include <linux/clk-provider.h>
 #include <asm/trace.h>
 
 #include <asm/io.h>
@@ -975,6 +976,10 @@ void __init time_init(void)
 
        init_decrementer_clockevent();
        tick_setup_hrtimer_broadcast();
+
+#ifdef CONFIG_COMMON_CLK
+       of_clk_init(NULL);
+#endif
 }
 
 
index 6eb614a271fbe07390271aca65d802e37d3a6481..f691bcabd71013e23a09a07bfec61a592e1cd237 100644 (file)
@@ -1168,6 +1168,11 @@ static void mpc5121_clk_provide_backwards_compat(void)
        }
 }
 
+/*
+ * The "fixed-clock" nodes (which includes the oscillator node if the board's
+ * DT provides one) has already been scanned by the of_clk_init() in
+ * time_init().
+ */
 int __init mpc5121_clk_init(void)
 {
        struct device_node *clk_np;
@@ -1186,12 +1191,6 @@ int __init mpc5121_clk_init(void)
        /* invalidate all not yet registered clock slots */
        mpc512x_clk_preset_data();
 
-       /*
-        * have the device tree scanned for "fixed-clock" nodes (which
-        * includes the oscillator node if the board's DT provides one)
-        */
-       of_clk_init(NULL);
-
        /*
         * add a dummy clock for those situations where a clock spec is
         * required yet no real clock is involved
index e69142f4af089cf986dd84f3fb2731ae705cf1f2..54323d6b5166218fa4e9c548a0511c7ec64884dd 100644 (file)
@@ -836,30 +836,4 @@ void __init pnv_pci_init(void)
 #endif
 }
 
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
-               unsigned long action, void *data)
-{
-       struct device *dev = data;
-
-       switch (action) {
-       case BUS_NOTIFY_ADD_DEVICE:
-               return iommu_add_device(dev);
-       case BUS_NOTIFY_DEL_DEVICE:
-               if (dev->iommu_group)
-                       iommu_del_device(dev);
-               return 0;
-       default:
-               return 0;
-       }
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
-       .notifier_call = tce_iommu_bus_notifier,
-};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
-       bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
-       return 0;
-}
 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
index 1d3d52dc3ff31ed6ce0ab9b554c45c1233d0f039..7803a19adb31822fbe1c679160ff7655ec5c5411 100644 (file)
@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str)
 }
 
 __setup("multitce=", disable_multitce);
+
+machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
index 4c8008dd938e8d0eb5c4fdc18b35b2cfc90f6807..99824ff8dd354e74ff421a2c9bb59243e045d541 100644 (file)
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
        parent = dentry->d_parent;
        mutex_lock(&parent->d_inode->i_mutex);
        if (hypfs_positive(dentry)) {
-               if (S_ISDIR(dentry->d_inode->i_mode))
+               if (d_is_dir(dentry))
                        simple_rmdir(parent->d_inode, dentry);
                else
                        simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
        return nonseekable_open(inode, filp);
 }
 
-static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t offset)
+static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       char *data;
-       ssize_t ret;
-       struct file *filp = iocb->ki_filp;
-       /* XXX: temporary */
-       char __user *buf = iov[0].iov_base;
-       size_t count = iov[0].iov_len;
-
-       if (nr_segs != 1)
-               return -EINVAL;
-
-       data = filp->private_data;
-       ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
-       if (ret <= 0)
-               return ret;
+       struct file *file = iocb->ki_filp;
+       char *data = file->private_data;
+       size_t available = strlen(data);
+       loff_t pos = iocb->ki_pos;
+       size_t count;
 
-       iocb->ki_pos += ret;
-       file_accessed(filp);
-
-       return ret;
+       if (pos < 0)
+               return -EINVAL;
+       if (pos >= available || !iov_iter_count(to))
+               return 0;
+       count = copy_to_iter(data + pos, available - pos, to);
+       if (!count)
+               return -EFAULT;
+       iocb->ki_pos = pos + count;
+       file_accessed(file);
+       return count;
 }
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t offset)
+
+static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        int rc;
        struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
        struct hypfs_sb_info *fs_info = sb->s_fs_info;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(from);
 
        /*
         * Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
        }
        hypfs_update_update(sb);
        rc = count;
+       iov_iter_advance(from, count);
 out:
        mutex_unlock(&fs_info->lock);
        return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
 static const struct file_operations hypfs_file_ops = {
        .open           = hypfs_open,
        .release        = hypfs_release,
-       .read           = do_sync_read,
-       .write          = do_sync_write,
-       .aio_read       = hypfs_aio_read,
-       .aio_write      = hypfs_aio_write,
+       .read           = new_sync_read,
+       .write          = new_sync_write,
+       .read_iter      = hypfs_read_iter,
+       .write_iter     = hypfs_write_iter,
        .llseek         = no_llseek,
 };
 
index d84559e31f3222c9dc9500c07750beccb733ab82..f407bbf5ee94ca5e2f6122951e52ce2d7db1a7ef 100644 (file)
@@ -515,15 +515,15 @@ struct s390_io_adapter {
 #define S390_ARCH_FAC_MASK_SIZE_U64 \
        (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
 
-struct s390_model_fac {
-       /* facilities used in SIE context */
-       __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
-       /* subset enabled by kvm */
-       __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+struct kvm_s390_fac {
+       /* facility list requested by guest */
+       __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+       /* facility mask supported by kvm & hosting machine */
+       __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
 };
 
 struct kvm_s390_cpu_model {
-       struct s390_model_fac *fac;
+       struct kvm_s390_fac *fac;
        struct cpuid cpu_id;
        unsigned short ibc;
 };
index f49b719546541d7dad60d78fd1a1d86daec1f7c2..8fb3802f8fad0f8ff150ae14250fc73b8936ab78 100644 (file)
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        int cpu = smp_processor_id();
 
+       S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
        if (prev == next)
                return;
        if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        atomic_dec(&prev->context.attach_count);
        if (MACHINE_HAS_TLB_LC)
                cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
-       S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
index 7b2ac6e44166ac258ebd6481cb1691e77be53b71..53eacbd4f09bf4c32ac051f1be7f9d3e5445e0df 100644 (file)
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
 #endif
 }
 
-static inline void clear_page(void *page)
-{
-       register unsigned long reg1 asm ("1") = 0;
-       register void *reg2 asm ("2") = page;
-       register unsigned long reg3 asm ("3") = 4096;
-       asm volatile(
-               "       mvcl    2,0"
-               : "+d" (reg2), "+d" (reg3) : "d" (reg1)
-               : "memory", "cc");
-}
+#define clear_page(page)       memset((page), 0, PAGE_SIZE)
 
 /*
  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
index fbb5ee3ae57c6d21b589a2a3f38437a17a218f2b..e08ec38f8c6eb74b7cf998e18e022db4c206b616 100644 (file)
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask;
  */
 #define PTRS_PER_PTE   256
 #ifndef CONFIG_64BIT
+#define __PAGETABLE_PUD_FOLDED
 #define PTRS_PER_PMD   1
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PUD   1
 #else /* CONFIG_64BIT */
 #define PTRS_PER_PMD   2048
index c4fbb9527c5ca2b553ca98c3f7887ea236fdff1c..b1453a2ae1ca583b2d4a0dc99bc325fd30ddf10d 100644 (file)
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
        cpumask_t book_mask;
 };
 
-extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 
-#define topology_physical_package_id(cpu)      (cpu_topology[cpu].socket_id)
-#define topology_thread_id(cpu)                        (cpu_topology[cpu].thread_id)
-#define topology_thread_cpumask(cpu)           (&cpu_topology[cpu].thread_mask)
-#define topology_core_id(cpu)                  (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu)             (&cpu_topology[cpu].core_mask)
-#define topology_book_id(cpu)                  (cpu_topology[cpu].book_id)
-#define topology_book_cpumask(cpu)             (&cpu_topology[cpu].book_mask)
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu)                  (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu)     (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu)            (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu)            (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).book_mask)
 
 #define mc_capable() 1
 
@@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { }
 #define POLARIZATION_VM                (2)
 #define POLARIZATION_VH                (3)
 
-#ifdef CONFIG_SCHED_BOOK
-void s390_init_cpu_topology(void);
-#else
-static inline void s390_init_cpu_topology(void)
-{
-};
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
index 632fa06ea162c567923044ddf4e3ced0339d24bd..0969d113b3d68f290a95b3b7e1484bf5c6b8caa3 100644 (file)
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
 {
        if (level >= CACHE_MAX_LEVEL)
                return CACHE_TYPE_NOCACHE;
-
        ci += level;
-
        if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
                return CACHE_TYPE_NOCACHE;
-
        return cache_type_map[ci->type];
 }
 
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
 }
 
 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
-                        enum cache_type type, unsigned int level)
+                        enum cache_type type, unsigned int level, int cpu)
 {
        int ti, num_sets;
-       int cpu = smp_processor_id();
 
        if (type == CACHE_TYPE_INST)
                ti = CACHE_TI_INSTRUCTION;
        else
                ti = CACHE_TI_UNIFIED;
-
        this_leaf->level = level + 1;
        this_leaf->type = type;
        this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
-       this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
-                                               level, ti);
+       this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
        this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-
        num_sets = this_leaf->size / this_leaf->coherency_line_size;
        num_sets /= this_leaf->ways_of_associativity;
        this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
 
        if (!this_cpu_ci)
                return -EINVAL;
-
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        do {
                ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
                /* Separate instruction and data caches */
                leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
        } while (++level < CACHE_MAX_LEVEL);
-
        this_cpu_ci->num_levels = level;
        this_cpu_ci->num_leaves = leaves;
-
        return 0;
 }
 
 int populate_cache_leaves(unsigned int cpu)
 {
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
        unsigned int level, idx, pvt;
        union cache_topology ct;
        enum cache_type ctype;
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
-       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
             idx < this_cpu_ci->num_leaves; idx++, level++) {
                if (!this_leaf)
                        return -EINVAL;
-
                pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
                ctype = get_cache_type(&ct.ci[0], level);
                if (ctype == CACHE_TYPE_SEPARATE) {
-                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
-                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
+                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
                } else {
-                       ci_leaf_init(this_leaf++, pvt, ctype, level);
+                       ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
                }
        }
        return 0;
index 70a3294509018e33c49d6b0a383c841d79d2f3af..4427ab7ac23af3e0450e4875b36b83ead687c36d 100644 (file)
@@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
        if (test_facility(129))
                S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-       if (test_facility(128))
-               S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
 #endif
 }
 
-static int __init nocad_setup(char *str)
+static int __init cad_setup(char *str)
 {
-       S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+       int val;
+
+       get_option(&str, &val);
+       if (val && test_facility(128))
+               S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
        return 0;
 }
-early_param("nocad", nocad_setup);
+early_param("cad", cad_setup);
 
 static int __init cad_init(void)
 {
index cb2d51e779dfafe0bf02077bdd68918dde731511..830066f936c8ffbb6c5359f600f3b1d4a6f7c747 100644 (file)
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
        insn->offset = (entry->target - entry->code) >> 1;
 }
 
-static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+                          struct insn *new)
 {
        unsigned char *ipc = (unsigned char *)entry->code;
-       unsigned char *ipe = (unsigned char *)insn;
+       unsigned char *ipe = (unsigned char *)expected;
+       unsigned char *ipn = (unsigned char *)new;
 
        pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
        pr_emerg("Found:    %02x %02x %02x %02x %02x %02x\n",
                 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
        pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
                 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+       pr_emerg("New:      %02x %02x %02x %02x %02x %02x\n",
+                ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
        panic("Corrupted kernel text");
 }
 
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
        }
        if (init) {
                if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
-                       jump_label_bug(entry, &old);
+                       jump_label_bug(entry, &orignop, &new);
        } else {
                if (memcmp((void *)entry->code, &old, sizeof(old)))
-                       jump_label_bug(entry, &old);
+                       jump_label_bug(entry, &old, &new);
        }
        probe_kernel_write((void *)entry->code, &new, sizeof(new));
 }
index 36154a2f1814f71a036b68713eeaaf9b1bea7f1f..2ca95862e336d0a040ccc4d968f7aa20da8c4056 100644 (file)
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
+       jump_label_apply_nops(me);
        vfree(me->arch.syminfo);
        me->arch.syminfo = NULL;
        return 0;
index 26108232fcaaf049f4e4caa3938e537af5066ac6..dc488e13b7e35b216335323a68bd6eac422ce172 100644 (file)
@@ -18,7 +18,7 @@
 
 static DEFINE_PER_CPU(struct cpuid, cpu_id);
 
-void cpu_relax(void)
+void notrace cpu_relax(void)
 {
        if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
                asm volatile("diag 0,0,0x44");
index bfac77ada4f28137b5545d9268d37346eb3af0ff..a5ea8bc17cb3bdd24f085260ca5d03364d509bec 100644 (file)
@@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
        setup_lowcore();
        smp_fill_possible_mask();
         cpu_init();
-       s390_init_cpu_topology();
 
        /*
         * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
index a668993ff577f95d40aff6f474e082374967a79b..db8f1115a3bf5530b82eb53d00931cbe94d6522b 100644 (file)
@@ -59,14 +59,13 @@ enum {
        CPU_STATE_CONFIGURED,
 };
 
+static DEFINE_PER_CPU(struct cpu *, cpu_device);
+
 struct pcpu {
-       struct cpu *cpu;
        struct _lowcore *lowcore;       /* lowcore page(s) for the cpu */
-       unsigned long async_stack;      /* async stack for the cpu */
-       unsigned long panic_stack;      /* panic stack for the cpu */
        unsigned long ec_mask;          /* bit mask for ec_xxx functions */
-       int state;                      /* physical cpu state */
-       int polarization;               /* physical polarization */
+       signed char state;              /* physical cpu state */
+       signed char polarization;       /* physical polarization */
        u16 address;                    /* physical cpu address */
 };
 
@@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
        pcpu_sigp_retry(pcpu, order, 0);
 }
 
+#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 {
+       unsigned long async_stack, panic_stack;
        struct _lowcore *lc;
 
        if (pcpu != &pcpu_devices[0]) {
                pcpu->lowcore = (struct _lowcore *)
                        __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-               pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-               pcpu->panic_stack = __get_free_page(GFP_KERNEL);
-               if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+               async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+               panic_stack = __get_free_page(GFP_KERNEL);
+               if (!pcpu->lowcore || !panic_stack || !async_stack)
                        goto out;
+       } else {
+               async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
+               panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
        }
        lc = pcpu->lowcore;
        memcpy(lc, &S390_lowcore, 512);
        memset((char *) lc + 512, 0, sizeof(*lc) - 512);
-       lc->async_stack = pcpu->async_stack + ASYNC_SIZE
-               - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-       lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
-               - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+       lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
+       lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
        lc->cpu_nr = cpu;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
 #ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        return 0;
 out:
        if (pcpu != &pcpu_devices[0]) {
-               free_page(pcpu->panic_stack);
-               free_pages(pcpu->async_stack, ASYNC_ORDER);
+               free_page(panic_stack);
+               free_pages(async_stack, ASYNC_ORDER);
                free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
        }
        return -ENOMEM;
@@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
 #else
        vdso_free_per_cpu(pcpu->lowcore);
 #endif
-       if (pcpu != &pcpu_devices[0]) {
-               free_page(pcpu->panic_stack);
-               free_pages(pcpu->async_stack, ASYNC_ORDER);
-               free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
-       }
+       if (pcpu == &pcpu_devices[0])
+               return;
+       free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
+       free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
+       free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
        pcpu_delegate(&pcpu_devices[0], func, data,
-                     pcpu_devices->panic_stack + PAGE_SIZE);
+                     pcpu_devices->lowcore->panic_stack -
+                     PANIC_FRAME_OFFSET + PAGE_SIZE);
 }
 
 int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void)
        pcpu->state = CPU_STATE_CONFIGURED;
        pcpu->address = stap();
        pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
-       pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
-               + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-       pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
-               + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
        set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
                          void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
-       struct cpu *c = pcpu_devices[cpu].cpu;
-       struct device *s = &c->dev;
+       struct device *s = &per_cpu(cpu_device, cpu)->dev;
        int err = 0;
 
        switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
                return -ENOMEM;
-       pcpu_devices[cpu].cpu = c;
+       per_cpu(cpu_device, cpu) = c;
        s = &c->dev;
        c->hotpluggable = 1;
        rc = register_cpu(c, cpu);
index 24ee33f1af24228e04686700523819fcb77afdf2..14da43b801d93c2041f2d4964028fc180301eeff 100644 (file)
@@ -7,14 +7,14 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/workqueue.h>
-#include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
 static struct mask_info socket_info;
 static struct mask_info book_info;
 
-struct cpu_topology_s390 cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
+DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
                if (lcpu < 0)
                        continue;
                for (i = 0; i <= smp_cpu_mtid; i++) {
-                       cpu_topology[lcpu + i].book_id = book->id;
-                       cpu_topology[lcpu + i].core_id = rcore;
-                       cpu_topology[lcpu + i].thread_id = lcpu + i;
+                       per_cpu(cpu_topology, lcpu + i).book_id = book->id;
+                       per_cpu(cpu_topology, lcpu + i).core_id = rcore;
+                       per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
                        cpumask_set_cpu(lcpu + i, &book->mask);
                        cpumask_set_cpu(lcpu + i, &socket->mask);
                        if (one_socket_per_cpu)
-                               cpu_topology[lcpu + i].socket_id = rcore;
+                               per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
                        else
-                               cpu_topology[lcpu + i].socket_id = socket->id;
+                               per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
                        smp_cpu_set_polarization(lcpu + i, tl_core->pp);
                }
                if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
 
        spin_lock_irqsave(&topology_lock, flags);
        for_each_possible_cpu(cpu) {
-               cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
-               cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
-               cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+               per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
+               per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
+               per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
                if (!MACHINE_HAS_TOPOLOGY) {
-                       cpu_topology[cpu].thread_id = cpu;
-                       cpu_topology[cpu].core_id = cpu;
-                       cpu_topology[cpu].socket_id = cpu;
-                       cpu_topology[cpu].book_id = cpu;
+                       per_cpu(cpu_topology, cpu).thread_id = cpu;
+                       per_cpu(cpu_topology, cpu).core_id = cpu;
+                       per_cpu(cpu_topology, cpu).socket_id = cpu;
+                       per_cpu(cpu_topology, cpu).book_id = cpu;
                }
        }
        spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@ void topology_expect_change(void)
        set_topology_timer();
 }
 
-static int __init early_parse_topology(char *p)
-{
-       if (strncmp(p, "off", 3))
-               return 0;
-       topology_enabled = 0;
-       return 0;
-}
-early_param("topology", early_parse_topology);
-
-static void __init alloc_masks(struct sysinfo_15_1_x *info,
-                              struct mask_info *mask, int offset)
-{
-       int i, nr_masks;
-
-       nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
-       for (i = 0; i < info->mnest - offset; i++)
-               nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
-       nr_masks = max(nr_masks, 1);
-       for (i = 0; i < nr_masks; i++) {
-               mask->next = alloc_bootmem_align(
-                       roundup_pow_of_two(sizeof(struct mask_info)),
-                       roundup_pow_of_two(sizeof(struct mask_info)));
-               mask = mask->next;
-       }
-}
-
-void __init s390_init_cpu_topology(void)
-{
-       struct sysinfo_15_1_x *info;
-       int i;
-
-       if (!MACHINE_HAS_TOPOLOGY)
-               return;
-       tl_info = alloc_bootmem_pages(PAGE_SIZE);
-       info = tl_info;
-       store_topology(info);
-       pr_info("The CPU configuration topology of the machine is:");
-       for (i = 0; i < TOPOLOGY_NR_MAG; i++)
-               printk(KERN_CONT " %d", info->mag[i]);
-       printk(KERN_CONT " / %d\n", info->mnest);
-       alloc_masks(info, &socket_info, 1);
-       alloc_masks(info, &book_info, 2);
-}
-
 static int cpu_management;
 
 static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu)
 
 const struct cpumask *cpu_thread_mask(int cpu)
 {
-       return &cpu_topology[cpu].thread_mask;
+       return &per_cpu(cpu_topology, cpu).thread_mask;
 }
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       return &cpu_topology[cpu].core_mask;
+       return &per_cpu(cpu_topology, cpu).core_mask;
 }
 
 static const struct cpumask *cpu_book_mask(int cpu)
 {
-       return &cpu_topology[cpu].book_mask;
+       return &per_cpu(cpu_topology, cpu).book_mask;
 }
 
+static int __init early_parse_topology(char *p)
+{
+       if (strncmp(p, "off", 3))
+               return 0;
+       topology_enabled = 0;
+       return 0;
+}
+early_param("topology", early_parse_topology);
+
 static struct sched_domain_topology_level s390_topology[] = {
        { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
        { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = {
        { NULL, },
 };
 
+static void __init alloc_masks(struct sysinfo_15_1_x *info,
+                              struct mask_info *mask, int offset)
+{
+       int i, nr_masks;
+
+       nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
+       for (i = 0; i < info->mnest - offset; i++)
+               nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
+       nr_masks = max(nr_masks, 1);
+       for (i = 0; i < nr_masks; i++) {
+               mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+               mask = mask->next;
+       }
+}
+
+static int __init s390_topology_init(void)
+{
+       struct sysinfo_15_1_x *info;
+       int i;
+
+       if (!MACHINE_HAS_TOPOLOGY)
+               return 0;
+       tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+       info = tl_info;
+       store_topology(info);
+       pr_info("The CPU configuration topology of the machine is:");
+       for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+               printk(KERN_CONT " %d", info->mag[i]);
+       printk(KERN_CONT " / %d\n", info->mnest);
+       alloc_masks(info, &socket_info, 1);
+       alloc_masks(info, &book_info, 2);
+       set_sched_topology(s390_topology);
+       return 0;
+}
+early_initcall(s390_topology_init);
+
 static int __init topology_init(void)
 {
        if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@ static int __init topology_init(void)
        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);
-
-static int __init early_topology_init(void)
-{
-       set_sched_topology(s390_topology);
-       return 0;
-}
-early_initcall(early_topology_init);
index 7699e735ae28ed726f4725ee97619119e1aa5e1e..61541fb93dc63e0f4673dcfe12e9d69681eb0993 100644 (file)
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
        je      4f
        cghi    %r2,__CLOCK_REALTIME
        je      5f
-       cghi    %r2,__CLOCK_THREAD_CPUTIME_ID
-       je      9f
-       cghi    %r2,-2          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
+       cghi    %r2,-3          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
        je      9f
        cghi    %r2,__CLOCK_MONOTONIC_COARSE
        je      3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
        aghi    %r15,16
        br      %r14
 
-       /* CLOCK_THREAD_CPUTIME_ID for this thread */
+       /* CPUCLOCK_VIRT for this thread */
 9:     icm     %r0,15,__VDSO_ECTG_OK(%r5)
        jz      12f
        ear     %r2,%a4
index 0c362392756310e1ce0e2da8e35c6bedb5cf0849..19e17bd7aec09b2662874a3925e3d55f4e4207f4 100644 (file)
@@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ONE_REG:
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_S390_CSS_SUPPORT:
-       case KVM_CAP_IRQFD:
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_ENABLE_CAP_VM:
@@ -522,7 +521,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
                memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
                       sizeof(struct cpuid));
                kvm->arch.model.ibc = proc->ibc;
-               memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+               memcpy(kvm->arch.model.fac->list, proc->fac_list,
                       S390_ARCH_FAC_LIST_SIZE_BYTE);
        } else
                ret = -EFAULT;
@@ -556,7 +555,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
        }
        memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
        proc->ibc = kvm->arch.model.ibc;
-       memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+       memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
        if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
                ret = -EFAULT;
        kfree(proc);
@@ -576,10 +575,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
        }
        get_cpu_id((struct cpuid *) &mach->cpuid);
        mach->ibc = sclp_get_ibc();
-       memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
-              kvm_s390_fac_list_mask_size() * sizeof(u64));
+       memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+              S390_ARCH_FAC_LIST_SIZE_BYTE);
        memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_U64);
+              S390_ARCH_FAC_LIST_SIZE_BYTE);
        if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
                ret = -EFAULT;
        kfree(mach);
@@ -778,15 +777,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
 static int kvm_s390_query_ap_config(u8 *config)
 {
        u32 fcn_code = 0x04000000UL;
-       u32 cc;
+       u32 cc = 0;
 
+       memset(config, 0, 128);
        asm volatile(
                "lgr 0,%1\n"
                "lgr 2,%2\n"
                ".long 0xb2af0000\n"            /* PQAP(QCI) */
-               "ipm %0\n"
+               "0: ipm %0\n"
                "srl %0,28\n"
-               : "=r" (cc)
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               : "+r" (cc)
                : "r" (fcn_code), "r" (config)
                : "cc", "0", "2", "memory"
        );
@@ -839,9 +841,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
 
        kvm_s390_set_crycb_format(kvm);
 
-       /* Disable AES/DEA protected key functions by default */
-       kvm->arch.crypto.aes_kw = 0;
-       kvm->arch.crypto.dea_kw = 0;
+       /* Enable AES/DEA protected key functions by default */
+       kvm->arch.crypto.aes_kw = 1;
+       kvm->arch.crypto.dea_kw = 1;
+       get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+                        sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+       get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+                        sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
 
        return 0;
 }
@@ -886,40 +892,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        /*
         * The architectural maximum amount of facilities is 16 kbit. To store
         * this amount, 2 kbyte of memory is required. Thus we need a full
-        * page to hold the active copy (arch.model.fac->sie) and the current
-        * facilities set (arch.model.fac->kvm). Its address size has to be
+        * page to hold the guest facility list (arch.model.fac->list) and the
+        * facility mask (arch.model.fac->mask). Its address size has to be
         * 31 bits and word aligned.
         */
        kvm->arch.model.fac =
-               (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+               (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
        if (!kvm->arch.model.fac)
                goto out_nofac;
 
-       memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_U64);
-
-       /*
-        * If this KVM host runs *not* in a LPAR, relax the facility bits
-        * of the kvm facility mask by all missing facilities. This will allow
-        * to determine the right CPU model by means of the remaining facilities.
-        * Live guest migration must prohibit the migration of KVMs running in
-        * a LPAR to non LPAR hosts.
-        */
-       if (!MACHINE_IS_LPAR)
-               for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
-                       kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
-
-       /*
-        * Apply the kvm facility mask to limit the kvm supported/tolerated
-        * facility list.
-        */
+       /* Populate the facility mask initially. */
+       memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+              S390_ARCH_FAC_LIST_SIZE_BYTE);
        for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
                if (i < kvm_s390_fac_list_mask_size())
-                       kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+                       kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
                else
-                       kvm->arch.model.fac->kvm[i] = 0UL;
+                       kvm->arch.model.fac->mask[i] = 0UL;
        }
 
+       /* Populate the facility list initially. */
+       memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+              S390_ARCH_FAC_LIST_SIZE_BYTE);
+
        kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
        kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
 
@@ -1165,8 +1160,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 
        mutex_lock(&vcpu->kvm->lock);
        vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
-       memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
        vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
        mutex_unlock(&vcpu->kvm->lock);
 
@@ -1212,7 +1205,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
                set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
        }
-       vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
+       vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
 
        spin_lock_init(&vcpu->arch.local_int.lock);
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
index 985c2114d7ef3b9d2b50eb9c33acd314ae9fa3ae..c34109aa552d9b1a6e5ea66f172b5c3e30ad001b 100644 (file)
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
 /* test availability of facility in a kvm intance */
 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
 {
-       return __test_facility(nr, kvm->arch.model.fac->kvm);
+       return __test_facility(nr, kvm->arch.model.fac->mask) &&
+               __test_facility(nr, kvm->arch.model.fac->list);
 }
 
 /* are cpu states controlled by user space */
index bdd9b5b17e03ed3ab113e73c955f35fbeb8d1560..351116939ea27f2fcf6eb4de60447d86335d0b6e 100644 (file)
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
         * We need to shift the lower 32 facility bits (bit 0-31) from a u64
         * into a u32 memory representation. They will remain bits 0-31.
         */
-       fac = *vcpu->kvm->arch.model.fac->sie >> 32;
+       fac = *vcpu->kvm->arch.model.fac->list >> 32;
        rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
                            &fac, sizeof(fac));
        if (rc)
index d008f638b2cd27f7615c11a02bb5d2a8f3138d04..179a2c20b01f143a51a897d9b85e33198bf881a7 100644 (file)
@@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void)
 {
        unsigned long base;
 
-       base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+       base = STACK_TOP / 3 * 2;
+       if (!is_32bit_task())
+               /* Align to 4GB */
+               base &= ~((1UL << 32) - 1);
        return base + mmap_rnd();
 }
 
index 753a5673195112051667031bfee73921e149eef1..f0b85443e06093d2f5d3d4c5dbe85554e459c138 100644 (file)
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
        addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
        return (void __iomem *) addr + offset;
 }
-EXPORT_SYMBOL_GPL(pci_iomap_range);
+EXPORT_SYMBOL(pci_iomap_range);
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
        }
        spin_unlock(&zpci_iomap_lock);
 }
-EXPORT_SYMBOL_GPL(pci_iounmap);
+EXPORT_SYMBOL(pci_iounmap);
 
 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
                    int size, u32 *val)
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
        airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
 }
 
-static void zpci_map_resources(struct zpci_dev *zdev)
+static void zpci_map_resources(struct pci_dev *pdev)
 {
-       struct pci_dev *pdev = zdev->pdev;
        resource_size_t len;
        int i;
 
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
        }
 }
 
-static void zpci_unmap_resources(struct zpci_dev *zdev)
+static void zpci_unmap_resources(struct pci_dev *pdev)
 {
-       struct pci_dev *pdev = zdev->pdev;
        resource_size_t len;
        int i;
 
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
 
        zdev->pdev = pdev;
        pdev->dev.groups = zpci_attr_groups;
-       zpci_map_resources(zdev);
+       zpci_map_resources(pdev);
 
        for (i = 0; i < PCI_BAR_COUNT; i++) {
                res = &pdev->resource[i];
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
        return 0;
 }
 
+void pcibios_release_device(struct pci_dev *pdev)
+{
+       zpci_unmap_resources(pdev);
+}
+
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
 {
        struct zpci_dev *zdev = get_zdev(pdev);
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
        zdev->pdev = pdev;
        zpci_debug_init_device(zdev);
        zpci_fmb_enable_device(zdev);
-       zpci_map_resources(zdev);
 
        return pci_enable_resources(pdev, mask);
 }
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
 {
        struct zpci_dev *zdev = get_zdev(pdev);
 
-       zpci_unmap_resources(zdev);
        zpci_fmb_disable_device(zdev);
        zpci_debug_exit_device(zdev);
        zdev->pdev = NULL;
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 static int zpci_restore(struct device *dev)
 {
-       struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct zpci_dev *zdev = get_zdev(pdev);
        int ret = 0;
 
        if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev)
        if (ret)
                goto out;
 
-       zpci_map_resources(zdev);
+       zpci_map_resources(pdev);
        zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
                           zdev->start_dma + zdev->iommu_size - 1,
                           (u64) zdev->dma_table);
@@ -709,12 +711,14 @@ out:
 
 static int zpci_freeze(struct device *dev)
 {
-       struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct zpci_dev *zdev = get_zdev(pdev);
 
        if (zdev->state != ZPCI_FN_STATE_ONLINE)
                return 0;
 
        zpci_unregister_ioat(zdev, 0);
+       zpci_unmap_resources(pdev);
        return clp_disable_fh(zdev);
 }
 
index 8aa271b3d1ad95b79cb0fa40d130ab76d26fc2bd..b1bb2b72302ca43836720f5232af0a43b0db8d12 100644 (file)
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
        if (copy_from_user(buf, user_buffer, length))
                goto out;
 
-       memcpy_toio(io_addr, buf, length);
-       ret = 0;
+       ret = zpci_memcpy_toio(io_addr, buf, length);
 out:
        if (buf != local_buf)
                kfree(buf);
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
                goto out;
        io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
 
-       ret = -EFAULT;
-       if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+       if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+               ret = -EFAULT;
                goto out;
-
-       memcpy_fromio(buf, io_addr, length);
-
-       if (copy_to_user(user_buffer, buf, length))
+       }
+       ret = zpci_memcpy_fromio(buf, io_addr, length);
+       if (ret)
                goto out;
+       if (copy_to_user(user_buffer, buf, length))
+               ret = -EFAULT;
 
-       ret = 0;
 out:
        if (buf != local_buf)
                kfree(buf);
index 96ac69c5eba016a7b5c1ef93fd35baccf8e2f52b..efb00ec758058afebb97f912e2bc6f142bbd43ff 100644 (file)
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG
        default "arch/sparc/configs/sparc32_defconfig" if SPARC32
        default "arch/sparc/configs/sparc64_defconfig" if SPARC64
 
+config ARCH_PROC_KCORE_TEXT
+       def_bool y
+
 config IOMMU_HELPER
        bool
        default y if SPARC64
index 9b672be70dda6e84802853b0cd6acf7d5aa13e08..50d4840d9aebbfa036c7a110e9f866fa77007647 100644 (file)
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr)
 {
 }
 
-#define ioread8(X)                     readb(X)
-#define ioread16(X)                    readw(X)
-#define ioread16be(X)                  __raw_readw(X)
-#define ioread32(X)                    readl(X)
-#define ioread32be(X)                  __raw_readl(X)
-#define iowrite8(val,X)                        writeb(val,X)
-#define iowrite16(val,X)               writew(val,X)
-#define iowrite16be(val,X)             __raw_writew(val,X)
-#define iowrite32(val,X)               writel(val,X)
-#define iowrite32be(val,X)             __raw_writel(val,X)
+#define ioread8                        readb
+#define ioread16               readw
+#define ioread16be             __raw_readw
+#define ioread32               readl
+#define ioread32be             __raw_readl
+#define iowrite8               writeb
+#define iowrite16              writew
+#define iowrite16be            __raw_writew
+#define iowrite32              writel
+#define iowrite32be            __raw_writel
 
 /* Create a virtual mapping cookie for an IO port range */
 void __iomem *ioport_map(unsigned long port, unsigned int nr);
index c100dc27a0a9461e677e805224c586eb37082793..176fa0ad19f15d2d41a7234378cc36db144abd57 100644 (file)
@@ -12,7 +12,6 @@
 extern int this_is_starfire;
 
 void check_if_starfire(void);
-int starfire_hard_smp_processor_id(void);
 void starfire_hookup(int);
 unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
 
index 88d322b67fac4d4be308280dd59bd52f2ecf92c0..07cc49e541f40ea2cacc1f952aa7e07dd4a4e69b 100644 (file)
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs,
 void do_privop(struct pt_regs *regs);
 void do_privact(struct pt_regs *regs);
 void do_cee(struct pt_regs *regs);
-void do_cee_tl1(struct pt_regs *regs);
-void do_dae_tl1(struct pt_regs *regs);
-void do_iae_tl1(struct pt_regs *regs);
 void do_div0_tl1(struct pt_regs *regs);
-void do_fpdis_tl1(struct pt_regs *regs);
 void do_fpieee_tl1(struct pt_regs *regs);
 void do_fpother_tl1(struct pt_regs *regs);
 void do_ill_tl1(struct pt_regs *regs);
index da6f1a7fc4db4713425d1927af185cb797b4c1fc..61139d9924cae4a8fdf5d4d5366a31052ea29616 100644 (file)
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
        scheduler_ipi();
 }
 
-/* This is a nop because we capture all other cpus
- * anyways when making the PROM active.
- */
+static void stop_this_cpu(void *dummy)
+{
+       prom_stopself();
+}
+
 void smp_send_stop(void)
 {
+       int cpu;
+
+       if (tlb_type == hypervisor) {
+               for_each_online_cpu(cpu) {
+                       if (cpu == smp_processor_id())
+                               continue;
+#ifdef CONFIG_SUN_LDOMS
+                       if (ldom_domaining_enabled) {
+                               unsigned long hv_err;
+                               hv_err = sun4v_cpu_stop(cpu);
+                               if (hv_err)
+                                       printk(KERN_ERR "sun4v_cpu_stop() "
+                                              "failed err=%lu\n", hv_err);
+                       } else
+#endif
+                               prom_stopcpu_cpuid(cpu);
+               }
+       } else
+               smp_call_function(stop_this_cpu, NULL, 0);
 }
 
 /**
index 82281a566bb86b866525a18b8fc4ca453d2e1e60..167fdfd9c83702bd2ccdc737625fcc8f57c9619c 100644 (file)
@@ -28,11 +28,6 @@ void check_if_starfire(void)
                this_is_starfire = 1;
 }
 
-int starfire_hard_smp_processor_id(void)
-{
-       return upa_readl(0x1fff40000d0UL);
-}
-
 /*
  * Each Starfire board has 32 registers which perform translation
  * and delivery of traditional interrupt packets into the extended
index c85403d0496c24f7639a32fa06b7dc53c1559381..30e7ddb27a3a966e74e5ab79ad686bab26eab459 100644 (file)
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
        long err;
 
        /* No need for backward compatibility. We can start fresh... */
-       if (call <= SEMCTL) {
+       if (call <= SEMTIMEDOP) {
                switch (call) {
                case SEMOP:
                        err = sys_semtimedop(first, ptr,
index a27651e866e7a108dca92cf88c14ec8f21ae5b5b..0e699745d64311d44439327d3e971ce78114b7da 100644 (file)
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
                }
                user_instruction_dump ((unsigned int __user *) regs->tpc);
        }
+       if (panic_on_oops)
+               panic("Fatal exception");
        if (regs->tstate & TSTATE_PRIV)
                do_exit(SIGKILL);
        do_exit(SIGSEGV);
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs)
        die_if_kernel("TL0: Cache Error Exception", regs);
 }
 
-void do_cee_tl1(struct pt_regs *regs)
-{
-       exception_enter();
-       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-       die_if_kernel("TL1: Cache Error Exception", regs);
-}
-
-void do_dae_tl1(struct pt_regs *regs)
-{
-       exception_enter();
-       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-       die_if_kernel("TL1: Data Access Exception", regs);
-}
-
-void do_iae_tl1(struct pt_regs *regs)
-{
-       exception_enter();
-       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-       die_if_kernel("TL1: Instruction Access Exception", regs);
-}
-
 void do_div0_tl1(struct pt_regs *regs)
 {
        exception_enter();
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs)
        die_if_kernel("TL1: DIV0 Exception", regs);
 }
 
-void do_fpdis_tl1(struct pt_regs *regs)
-{
-       exception_enter();
-       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-       die_if_kernel("TL1: FPU Disabled", regs);
-}
-
 void do_fpieee_tl1(struct pt_regs *regs)
 {
        exception_enter();
index 3ea267c53320d49683ab39c9e0d95189e4adb56e..4ca0d6ba5ec8331c67f43f8515eb3737526208bb 100644 (file)
@@ -2820,7 +2820,7 @@ static int __init report_memory(void)
 
        return 0;
 }
-device_initcall(report_memory);
+arch_initcall(report_memory);
 
 #ifdef CONFIG_SMP
 #define do_flush_tlb_kernel_range      smp_flush_tlb_kernel_range
index eb1cf898ed3cb51b30416380fc2109669c4b9f77..b7d31ca5518744983c77bc8339f30756621dfea0 100644 (file)
@@ -488,6 +488,23 @@ config X86_INTEL_MID
          Intel MID platforms are based on an Intel processor and chipset which
          consume less power than most of the x86 derivatives.
 
+config X86_INTEL_QUARK
+       bool "Intel Quark platform support"
+       depends on X86_32
+       depends on X86_EXTENDED_PLATFORM
+       depends on X86_PLATFORM_DEVICES
+       depends on X86_TSC
+       depends on PCI
+       depends on PCI_GOANY
+       depends on X86_IO_APIC
+       select IOSF_MBI
+       select INTEL_IMR
+       select COMMON_CLK
+       ---help---
+         Select to include support for Quark X1000 SoC.
+         Say Y here if you have a Quark based system such as the Arduino
+         compatible Intel Galileo.
+
 config X86_INTEL_LPSS
        bool "Intel Low Power Subsystem Support"
        depends on ACPI
index 61bd2ad94281884f13b70f3bb9e397fdcef9338a..20028da8ae188ce16aaabdc1ef6da2fd1eb5b8f0 100644 (file)
@@ -313,6 +313,19 @@ config DEBUG_NMI_SELFTEST
 
          If unsure, say N.
 
+config DEBUG_IMR_SELFTEST
+       bool "Isolated Memory Region self test"
+       default n
+       depends on INTEL_IMR
+       ---help---
+         This option enables automated sanity testing of the IMR code.
+         Some simple tests are run to verify IMR bounds checking, alignment
+         and overlapping. This option is really only useful if you are
+         debugging an IMR memory map or are modifying the IMR code and want to
+         test your changes.
+
+         If unsure say N here.
+
 config X86_DEBUG_STATIC_CPU_HAS
        bool "Debug alternatives"
        depends on DEBUG_KERNEL
index 843feb3eb20bd781bf8df6cd2b004450eb2d1c5e..0a291cdfaf77100117baf53b2e3af75a43a8af4c 100644 (file)
@@ -51,6 +51,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
 vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
        $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
        $(call if_changed,ld)
index 7ff3632806b18ec9a48bd0ae88bdc7a9e9dbe091..99494dff2113e5ac63bcc77aaa58bb01b0506fbb 100644 (file)
@@ -3,28 +3,3 @@
 #include <asm/processor-flags.h>
 
 #include "../../platform/efi/efi_stub_64.S"
-
-#ifdef CONFIG_EFI_MIXED
-       .code64
-       .text
-ENTRY(efi64_thunk)
-       push    %rbp
-       push    %rbx
-
-       subq    $16, %rsp
-       leaq    efi_exit32(%rip), %rax
-       movl    %eax, 8(%rsp)
-       leaq    efi_gdt64(%rip), %rax
-       movl    %eax, 4(%rsp)
-       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
-       leaq    efi32_boot_gdt(%rip), %rax
-       movl    %eax, (%rsp)
-
-       call    __efi64_thunk
-
-       addq    $16, %rsp
-       pop     %rbx
-       pop     %rbp
-       ret
-ENDPROC(efi64_thunk)
-#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
new file mode 100644 (file)
index 0000000..630384a
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+ *
+ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+ *
+ * Because this thunking occurs before ExitBootServices() we have to
+ * restore the firmware's 32-bit GDT before we make EFI serivce calls,
+ * since the firmware's 32-bit IDT is still currently installed and it
+ * needs to be able to service interrupts.
+ *
+ * On the plus side, we don't have to worry about mangling 64-bit
+ * addresses into 32-bits because we're executing with an identify
+ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+ * yet.
+ */
+
+#include <linux/linkage.h>
+#include <asm/msr.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+       .code64
+       .text
+ENTRY(efi64_thunk)
+       push    %rbp
+       push    %rbx
+
+       subq    $8, %rsp
+       leaq    efi_exit32(%rip), %rax
+       movl    %eax, 4(%rsp)
+       leaq    efi_gdt64(%rip), %rax
+       movl    %eax, (%rsp)
+       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
+
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       /*
+        * Convert x86-64 ABI params to i386 ABI
+        */
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       sgdt    save_gdt(%rip)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /*
+        * Switch to gdt with 32-bit segments. This is the firmware GDT
+        * that was installed when the kernel started executing. This
+        * pointer was saved at the EFI stub entry point in head_64.S.
+        */
+       leaq    efi32_boot_gdt(%rip), %rax
+       lgdt    (%rax)
+
+       pushq   $__KERNEL_CS
+       leaq    efi_enter32(%rip), %rax
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       lgdt    save_gdt(%rip)
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
+
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       addq    $8, %rsp
+       pop     %rbx
+       pop     %rbp
+       ret
+ENDPROC(efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       /* Reload pgtables */
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       /* Disable paging */
+       movl    %cr0, %eax
+       btrl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+
+       /* Disable long mode via EFER */
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btrl    $_EFER_LME, %eax
+       wrmsr
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       /*
+        * Some firmware will return with interrupts enabled. Be sure to
+        * disable them before we switch GDTs.
+        */
+       cli
+
+       movl    56(%esp), %eax
+       movl    %eax, 2(%eax)
+       lgdtl   (%eax)
+
+       movl    %cr4, %eax
+       btsl    $(X86_CR4_PAE_BIT), %eax
+       movl    %eax, %cr4
+
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btsl    $_EFER_LME, %eax
+       wrmsr
+
+       xorl    %eax, %eax
+       lldt    %ax
+
+       movl    60(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       /* Enable paging */
+       movl    %cr0, %eax
+       btsl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+       .global efi32_boot_gdt
+efi32_boot_gdt:        .word   0
+               .quad   0
+
+save_gdt:      .word   0
+               .quad   0
+func_rt_ptr:   .quad   0
+
+       .global efi_gdt64
+efi_gdt64:
+       .word   efi_gdt64_end - efi_gdt64
+       .long   0                       /* Filled out by user */
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+efi_gdt64_end:
index 947c6bf52c330452cfb7ac4a19d368f68f4e4509..54f60ab41c63025fd2cb42e5579df7f69c3d9b60 100644 (file)
@@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
                src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
                if (!src)
                        return -ENOMEM;
-               assoc = (src + req->cryptlen + auth_tag_len);
+               assoc = (src + req->cryptlen);
                scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
                scatterwalk_map_and_copy(assoc, req->assoc, 0,
                        req->assoclen, 0);
@@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
                scatterwalk_done(&src_sg_walk, 0, 0);
                scatterwalk_done(&assoc_sg_walk, 0, 0);
        } else {
-               scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
+               scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
                kfree(src);
        }
        return retval;
index 92003f3c8a427b9138796ceef1b76bc237860da3..efc3b22d896eb23b7e37cf9c720065c0b6b0c717 100644 (file)
@@ -213,7 +213,15 @@ void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
 extern int APIC_init_uniprocessor(void);
+
+#ifdef CONFIG_X86_64
+static inline int apic_force_enable(unsigned long addr)
+{
+       return -1;
+}
+#else
 extern int apic_force_enable(unsigned long addr);
+#endif
 
 extern int apic_bsp_setup(bool upmode);
 extern void apic_ap_setup(void);
index 0dbc08282291044216456e6c14a802d56a0e40d8..72ba21a8b5fc2ff8b76c6c3e53b331ca6a558339 100644 (file)
@@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk)
        preempt_disable();
        tsk->thread.fpu_counter = 0;
        __drop_fpu(tsk);
-       clear_used_math();
+       clear_stopped_child_used_math(tsk);
        preempt_enable();
 }
 
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
new file mode 100644 (file)
index 0000000..cd2ce40
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * imr.h: Isolated Memory Region API
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _IMR_H
+#define _IMR_H
+
+#include <linux/types.h>
+
+/*
+ * IMR agent access mask bits
+ * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register
+ * definitions.
+ */
+#define IMR_ESRAM_FLUSH                BIT(31)
+#define IMR_CPU_SNOOP          BIT(30)         /* Applicable only to write */
+#define IMR_RMU                        BIT(29)
+#define IMR_VC1_SAI_ID3                BIT(15)
+#define IMR_VC1_SAI_ID2                BIT(14)
+#define IMR_VC1_SAI_ID1                BIT(13)
+#define IMR_VC1_SAI_ID0                BIT(12)
+#define IMR_VC0_SAI_ID3                BIT(11)
+#define IMR_VC0_SAI_ID2                BIT(10)
+#define IMR_VC0_SAI_ID1                BIT(9)
+#define IMR_VC0_SAI_ID0                BIT(8)
+#define IMR_CPU_0              BIT(1)          /* SMM mode */
+#define IMR_CPU                        BIT(0)          /* Non SMM mode */
+#define IMR_ACCESS_NONE                0
+
+/*
+ * Read/Write access-all bits here include some reserved bits
+ * These are the values firmware uses and are accepted by hardware.
+ * The kernel defines read/write access-all in the same way as firmware
+ * in order to have a consistent and crisp definition across firmware,
+ * bootloader and kernel.
+ */
+#define IMR_READ_ACCESS_ALL    0xBFFFFFFF
+#define IMR_WRITE_ACCESS_ALL   0xFFFFFFFF
+
+/* Number of IMRs provided by Quark X1000 SoC */
+#define QUARK_X1000_IMR_MAX    0x08
+#define QUARK_X1000_IMR_REGBASE 0x40
+
+/* IMR alignment bits - only bits 31:10 are checked for IMR validity */
+#define IMR_ALIGN              0x400
+#define IMR_MASK               (IMR_ALIGN - 1)
+
+int imr_add_range(phys_addr_t base, size_t size,
+                 unsigned int rmask, unsigned int wmask, bool lock);
+
+int imr_remove_range(phys_addr_t base, size_t size);
+
+#endif /* _IMR_H */
index 7050d864f5207c4fb384672b083a18a6584bdbbe..cf87de3fc39000eb21028ab2597d5187978bda4a 100644 (file)
@@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key);
 
 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
 {
-       set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+       set_bit(0, (volatile unsigned long *)&lock->tickets.head);
 }
 
 #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
 }
 
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
+{
+       return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+                                                       __ticket_t head)
+{
+       if (head & TICKET_SLOWPATH_FLAG) {
+               arch_spinlock_t old, new;
+
+               old.tickets.head = head;
+               new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
+               old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
+               new.tickets.tail = old.tickets.tail;
+
+               /* try to clear slowpath flag when there are no contenders */
+               cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+       }
+}
 
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
-       return lock.tickets.head == lock.tickets.tail;
+       return __tickets_equal(lock.tickets.head, lock.tickets.tail);
 }
 
 /*
@@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
        if (likely(inc.head == inc.tail))
                goto out;
 
-       inc.tail &= ~TICKET_SLOWPATH_FLAG;
        for (;;) {
                unsigned count = SPIN_THRESHOLD;
 
                do {
-                       if (READ_ONCE(lock->tickets.head) == inc.tail)
-                               goto out;
+                       inc.head = READ_ONCE(lock->tickets.head);
+                       if (__tickets_equal(inc.head, inc.tail))
+                               goto clear_slowpath;
                        cpu_relax();
                } while (--count);
                __ticket_lock_spinning(lock, inc.tail);
        }
-out:   barrier();      /* make sure nothing creeps before the lock is taken */
+clear_slowpath:
+       __ticket_check_and_clear_slowpath(lock, inc.head);
+out:
+       barrier();      /* make sure nothing creeps before the lock is taken */
 }
 
 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
        arch_spinlock_t old, new;
 
        old.tickets = READ_ONCE(lock->tickets);
-       if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
+       if (!__tickets_equal(old.tickets.head, old.tickets.tail))
                return 0;
 
        new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
+       new.head_tail &= ~TICKET_SLOWPATH_FLAG;
 
        /* cmpxchg is a full barrier, so nothing can move before it */
        return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
-                                           arch_spinlock_t old)
-{
-       arch_spinlock_t new;
-
-       BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
-
-       /* Perform the unlock on the "before" copy */
-       old.tickets.head += TICKET_LOCK_INC;
-
-       /* Clear the slowpath flag */
-       new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
-
-       /*
-        * If the lock is uncontended, clear the flag - use cmpxchg in
-        * case it changes behind our back though.
-        */
-       if (new.tickets.head != new.tickets.tail ||
-           cmpxchg(&lock->head_tail, old.head_tail,
-                                       new.head_tail) != old.head_tail) {
-               /*
-                * Lock still has someone queued for it, so wake up an
-                * appropriate waiter.
-                */
-               __ticket_unlock_kick(lock, old.tickets.head);
-       }
-}
-
 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        if (TICKET_SLOWPATH_FLAG &&
-           static_key_false(&paravirt_ticketlocks_enabled)) {
-               arch_spinlock_t prev;
+               static_key_false(&paravirt_ticketlocks_enabled)) {
+               __ticket_t head;
 
-               prev = *lock;
-               add_smp(&lock->tickets.head, TICKET_LOCK_INC);
+               BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
 
-               /* add_smp() is a full mb() */
+               head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
 
-               if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
-                       __ticket_unlock_slowpath(lock, prev);
+               if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
+                       head &= ~TICKET_SLOWPATH_FLAG;
+                       __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
+               }
        } else
                __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
 }
@@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
        struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-       return tmp.tail != tmp.head;
+       return !__tickets_equal(tmp.tail, tmp.head);
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
        struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-       return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+       tmp.head &= ~TICKET_SLOWPATH_FLAG;
+       return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
 #define arch_spin_is_contended arch_spin_is_contended
 
@@ -191,8 +189,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                 * We need to check "unlocked" in a loop, tmp.head == head
                 * can be false positive because of overflow.
                 */
-               if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
-                   tmp.head != head)
+               if (__tickets_equal(tmp.head, tmp.tail) ||
+                               !__tickets_equal(tmp.head, head))
                        break;
 
                cpu_relax();
index 5fa9770035dc935c0a90899f470e668ab889ec7f..c9a6d68b8d623b84d169f61c4680c194ea8d137a 100644 (file)
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                asm volatile("1:"XSAVES"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
        else
                asm volatile("1:"XSAVE"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
-
-       asm volatile(xstate_fault
-                    : "0" (0)
-                    : "memory");
-
        return err;
 }
 
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                asm volatile("1:"XRSTORS"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
        else
                asm volatile("1:"XRSTOR"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
-
-       asm volatile(xstate_fault
-                    : "0" (0)
-                    : "memory");
-
        return err;
 }
 
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
         */
        alternative_input_2(
                "1:"XSAVE,
-               "1:"XSAVEOPT,
+               XSAVEOPT,
                X86_FEATURE_XSAVEOPT,
-               "1:"XSAVES,
+               XSAVES,
                X86_FEATURE_XSAVES,
                [fx] "D" (fx), "a" (lmask), "d" (hmask) :
                "memory");
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
         */
        alternative_input(
                "1: " XRSTOR,
-               "1: " XRSTORS,
+               XRSTORS,
                X86_FEATURE_XSAVES,
                "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                : "memory");
index ae97ed0873c6e3f35e28545b521dda9c25c8d74e..803b684676ff3d0bcc6a17c530a457efcf485fca 100644 (file)
@@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 {
        int rc, irq, trigger, polarity;
 
+       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+               *irqp = gsi;
+               return 0;
+       }
+
        rc = acpi_get_override_irq(gsi, &trigger, &polarity);
        if (rc == 0) {
                trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
@@ -1332,6 +1337,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
        return 0;
 }
 
+/*
+ * ACPI offers an alternative platform interface model that removes
+ * ACPI hardware requirements for platforms that do not implement
+ * the PC Architecture.
+ *
+ * We initialize the Hardware-reduced ACPI model here:
+ */
+static void __init acpi_reduced_hw_init(void)
+{
+       if (acpi_gbl_reduced_hardware) {
+               /*
+                * Override x86_init functions and bypass legacy pic
+                * in Hardware-reduced ACPI mode
+                */
+               x86_init.timers.timer_init      = x86_init_noop;
+               x86_init.irqs.pre_vector_init   = x86_init_noop;
+               legacy_pic                      = &null_legacy_pic;
+       }
+}
+
 /*
  * If your system is blacklisted here, but you find that acpi=force
  * works for you, please contact linux-acpi@vger.kernel.org
@@ -1531,6 +1556,11 @@ int __init early_acpi_boot_init(void)
         */
        early_acpi_process_madt();
 
+       /*
+        * Hardware-reduced ACPI mode initialization:
+        */
+       acpi_reduced_hw_init();
+
        return 0;
 }
 
index c2fd21fed00284066ab138354a4daefbc1b775ac..017149cded0760ccc842db2a50d3c2013321aa52 100644 (file)
@@ -37,10 +37,12 @@ static const struct apic apic_numachip;
 static unsigned int get_apic_id(unsigned long x)
 {
        unsigned long value;
-       unsigned int id;
+       unsigned int id = (x >> 24) & 0xff;
 
-       rdmsrl(MSR_FAM10H_NODE_ID, value);
-       id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U);
+       if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+               rdmsrl(MSR_FAM10H_NODE_ID, value);
+               id |= (value << 2) & 0xff00;
+       }
 
        return id;
 }
@@ -155,10 +157,18 @@ static int __init numachip_probe(void)
 
 static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
 {
-       if (c->phys_proc_id != node) {
-               c->phys_proc_id = node;
-               per_cpu(cpu_llc_id, smp_processor_id()) = node;
+       u64 val;
+       u32 nodes = 1;
+
+       this_cpu_write(cpu_llc_id, node);
+
+       /* Account for nodes per socket in multi-core-module processors */
+       if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+               rdmsrl(MSR_FAM10H_NODE_ID, val);
+               nodes = ((val >> 3) & 7) + 1;
        }
+
+       c->phys_proc_id = node / nodes;
 }
 
 static int __init numachip_system_init(void)
index b5c8ff5e9dfcad79075a1af5f41b0ac0ee37231b..2346c95c6ab1945077fdde28c129342ffe09749d 100644 (file)
@@ -1396,6 +1396,12 @@ void cpu_init(void)
 
        wait_for_master_cpu(cpu);
 
+       /*
+        * Initialize the CR4 shadow before doing anything that could
+        * try to read it.
+        */
+       cr4_init_shadow();
+
        show_ucode_info_early();
 
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
index 94d7dcb1214530dfa86b4fef2f2bcbf007d32fe4..50163fa9034f0d2f4db28767432a249ad9e0cb35 100644 (file)
@@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = {
        { 0xb2, TLB_INST_4K,            64,     " TLB_INST 4KByte pages, 4-way set associative" },
        { 0xb3, TLB_DATA_4K,            128,    " TLB_DATA 4 KByte pages, 4-way set associative" },
        { 0xb4, TLB_DATA_4K,            256,    " TLB_DATA 4 KByte pages, 4-way associative" },
-       { 0xb5, TLB_INST_4K,            64,     " TLB_INST 4 KByte pages, 8-way set ssociative" },
-       { 0xb6, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 8-way set ssociative" },
+       { 0xb5, TLB_INST_4K,            64,     " TLB_INST 4 KByte pages, 8-way set associative" },
+       { 0xb6, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 8-way set associative" },
        { 0xba, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way associative" },
        { 0xc0, TLB_DATA_4K_4M,         8,      " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
        { 0xc1, STLB_4K_2M,             1024,   " STLB 4 KByte and 2 MByte pages, 8-way associative" },
index c6826d1e8082584268d1b5e8f3abeb176ee61187..746e7fd08aad7082747ee1d9dca80570f1a00e3b 100644 (file)
@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
                struct microcode_header_intel mc_header;
                unsigned int mc_size;
 
+               if (leftover < sizeof(mc_header)) {
+                       pr_err("error! Truncated header in microcode data file\n");
+                       break;
+               }
+
                if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
                        break;
 
index ec9df6f9cd47b35e7f4d6059eb094f922420c5cf..420eb933189ca487110607475ddbf33be8e8267b 100644 (file)
@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
        unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
        int i;
 
-       while (leftover) {
+       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+
+               if (leftover < sizeof(mc_header))
+                       break;
+
                mc_header = (struct microcode_header_intel *)ucode_ptr;
 
                mc_size = get_totalsize(mc_header);
index 000d4199b03e69905527d7972ccd918835e6cc1f..31e2d5bf3e38887ca06402bff6c647b9aa9a3c5c 100644 (file)
@@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback)
 ENTRY(xen_do_upcall)
 1:     mov %esp, %eax
        call xen_evtchn_do_upcall
+#ifndef CONFIG_PREEMPT
+       call xen_maybe_preempt_hcall
+#endif
        jmp  ret_from_intr
        CFI_ENDPROC
 ENDPROC(xen_hypervisor_callback)
index db13655c3a2aff4a4475a9adf6ce1cb5b3639220..1d74d161687c9f2a71f334b5530067356310af18 100644 (file)
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork)
        testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
        jz   1f
 
-       testl $_TIF_IA32, TI_flags(%rcx)        # 32-bit compat task needs IRET
-       jnz  int_ret_from_sys_call
-
-       RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
-       jmp ret_from_sys_call                   # go to the SYSRET fastpath
+       /*
+        * By the time we get here, we have no idea whether our pt_regs,
+        * ti flags, and ti status came from the 64-bit SYSCALL fast path,
+        * the slow path, or one of the ia32entry paths.
+        * Use int_ret_from_sys_call to return, since it can safely handle
+        * all of the above.
+        */
+       jmp  int_ret_from_sys_call
 
 1:
        subq $REST_SKIP, %rsp   # leave space for volatiles
@@ -1208,6 +1211,9 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        popq %rsp
        CFI_DEF_CFA_REGISTER rsp
        decl PER_CPU_VAR(irq_count)
+#ifndef CONFIG_PREEMPT
+       call xen_maybe_preempt_hcall
+#endif
        jmp  error_exit
        CFI_ENDPROC
 END(xen_do_hypervisor_callback)
index 705ef8d48e2dc464936672fb54eea908f8f03b4e..67b1cbe0093adba1141f8d9ebda29ad34dc9d23e 100644 (file)
@@ -302,6 +302,9 @@ int check_irq_vectors_for_cpu_disable(void)
                irq = __this_cpu_read(vector_irq[vector]);
                if (irq >= 0) {
                        desc = irq_to_desc(irq);
+                       if (!desc)
+                               continue;
+
                        data = irq_desc_get_irq_data(desc);
                        cpumask_copy(&affinity_new, data->affinity);
                        cpu_clear(this_cpu, affinity_new);
index 98f654d466e585167153e58811902675bfeb5baa..4e3d5a9621fe0052fac43d5ad6c109b9d3f54447 100644 (file)
@@ -84,7 +84,7 @@ static volatile u32 twobyte_is_boostable[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
        /*      ----------------------------------------------          */
        W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
-       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
+       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
        W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
        W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
@@ -223,27 +223,48 @@ static unsigned long
 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 {
        struct kprobe *kp;
+       unsigned long faddr;
 
        kp = get_kprobe((void *)addr);
-       /* There is no probe, return original address */
-       if (!kp)
+       faddr = ftrace_location(addr);
+       /*
+        * Addresses inside the ftrace location are refused by
+        * arch_check_ftrace_location(). Something went terribly wrong
+        * if such an address is checked here.
+        */
+       if (WARN_ON(faddr && faddr != addr))
+               return 0UL;
+       /*
+        * Use the current code if it is not modified by Kprobe
+        * and it cannot be modified by ftrace.
+        */
+       if (!kp && !faddr)
                return addr;
 
        /*
-        *  Basically, kp->ainsn.insn has an original instruction.
-        *  However, RIP-relative instruction can not do single-stepping
-        *  at different place, __copy_instruction() tweaks the displacement of
-        *  that instruction. In that case, we can't recover the instruction
-        *  from the kp->ainsn.insn.
+        * Basically, kp->ainsn.insn has an original instruction.
+        * However, RIP-relative instruction can not do single-stepping
+        * at different place, __copy_instruction() tweaks the displacement of
+        * that instruction. In that case, we can't recover the instruction
+        * from the kp->ainsn.insn.
         *
-        *  On the other hand, kp->opcode has a copy of the first byte of
-        *  the probed instruction, which is overwritten by int3. And
-        *  the instruction at kp->addr is not modified by kprobes except
-        *  for the first byte, we can recover the original instruction
-        *  from it and kp->opcode.
+        * On the other hand, in case on normal Kprobe, kp->opcode has a copy
+        * of the first byte of the probed instruction, which is overwritten
+        * by int3. And the instruction at kp->addr is not modified by kprobes
+        * except for the first byte, we can recover the original instruction
+        * from it and kp->opcode.
+        *
+        * In case of Kprobes using ftrace, we do not have a copy of
+        * the original instruction. In fact, the ftrace location might
+        * be modified at anytime and even could be in an inconsistent state.
+        * Fortunately, we know that the original code is the ideal 5-byte
+        * long NOP.
         */
-       memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-       buf[0] = kp->opcode;
+       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (faddr)
+               memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
+       else
+               buf[0] = kp->opcode;
        return (unsigned long)buf;
 }
 
@@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * for preventing to release referencing kprobes.
+ * Returns zero if the instruction can not get recovered.
  */
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 {
@@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr)
                 * normally used, we just go through if there is no kprobe.
                 */
                __addr = recover_probed_instruction(buf, addr);
+               if (!__addr)
+                       return 0;
                kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
                insn_get_length(&insn);
 
@@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src)
        unsigned long recovered_insn =
                recover_probed_instruction(buf, (unsigned long)src);
 
+       if (!recovered_insn)
+               return 0;
        kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
        insn_get_length(&insn);
        /* Another subsystem puts a breakpoint, failed to recover */
index 0dd8d089c315e0e9df338e9144799c6e900df831..7b3b9d15c47a63953d6932026cc57db795e3a507 100644 (file)
@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr)
                         */
                        return 0;
                recovered_insn = recover_probed_instruction(buf, addr);
+               if (!recovered_insn)
+                       return 0;
                kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
                insn_get_length(&insn);
                /* Another subsystem puts a breakpoint */
index 94f6434843008c38b8a091c077e0c94fc36c9a07..e354cc6446aba4286645dc799e48d8539c7005d6 100644 (file)
@@ -609,7 +609,7 @@ static inline void check_zero(void)
        u8 ret;
        u8 old;
 
-       old = ACCESS_ONCE(zero_stats);
+       old = READ_ONCE(zero_stats);
        if (unlikely(old)) {
                ret = cmpxchg(&zero_stats, old, 0);
                /* This ensures only one fellow resets the stat */
@@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
        int cpu;
        u64 start;
        unsigned long flags;
+       __ticket_t head;
 
        if (in_nmi())
                return;
@@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
         */
        __ticket_enter_slowpath(lock);
 
+       /* make sure enter_slowpath, which is atomic does not cross the read */
+       smp_mb__after_atomic();
+
        /*
         * check again make sure it didn't become free while
         * we weren't looking.
         */
-       if (ACCESS_ONCE(lock->tickets.head) == want) {
+       head = READ_ONCE(lock->tickets.head);
+       if (__tickets_equal(head, want)) {
                add_stats(TAKEN_SLOW_PICKUP, 1);
                goto out;
        }
@@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
        add_stats(RELEASED_SLOW, 1);
        for_each_cpu(cpu, &waiting_cpus) {
                const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
-               if (ACCESS_ONCE(w->lock) == lock &&
-                   ACCESS_ONCE(w->want) == ticket) {
+               if (READ_ONCE(w->lock) == lock &&
+                   READ_ONCE(w->want) == ticket) {
                        add_stats(RELEASED_SLOW_KICKED, 1);
                        kvm_kick_cpu(cpu);
                        break;
index 9d2073e2ecc92f5c97d51178236d8b2bc63ac029..4ff5d162ff9fd55381259ff8dd96f84064ecea72 100644 (file)
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
                goto exit;
        conditional_sti(regs);
 
-       if (!user_mode(regs))
+       if (!user_mode_vm(regs))
                die("bounds", regs, error_code);
 
        if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
@@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
         * then it's very likely the result of an icebp/int01 trap.
         * User wants a sigtrap for that.
         */
-       if (!dr6 && user_mode(regs))
+       if (!dr6 && user_mode_vm(regs))
                user_icebp = 1;
 
        /* Catch kmemcheck conditions first of all! */
index 8b96a947021ffe0ad3d05397d3fc44b975a98141..81f8adb0679e548d31af5982297c85141e3693f9 100644 (file)
  * Good-instruction tables for 32-bit apps.  This is non-const and volatile
  * to keep gcc from statically optimizing it out, as variable_test_bit makes
  * some versions of gcc to think only *(unsigned long*) is used.
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
+ *     (why we support bound (62) then? it's similar, and similarly unused...)
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * 07,17,1f - pop es/ss/ds
+ *     Normally not used in userspace, but would execute if used.
+ *     Can cause GP or stack exception if tries to load wrong segment descriptor.
+ *     We hesitate to run them under single step since kernel's handling
+ *     of userspace single-stepping (TF flag) is fragile.
+ *     We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
+ *     on the same grounds that they are never used.
+ * cd - int N.
+ *     Used by userspace for "int 80" syscall entry. (Other "int N"
+ *     cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *     Not supported since kernel's handling of userspace single-stepping
+ *     (TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
  */
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 static volatile u32 good_insns_32[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
+       W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
        W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
-       W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+       W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-       W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+       W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
        W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
        W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
        W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
        W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
        W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
-       W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+       W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
        W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-       W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+       W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -94,27 +121,61 @@ static volatile u32 good_insns_32[256 / 32] = {
 #define good_insns_32  NULL
 #endif
 
-/* Good-instruction tables for 64-bit apps */
+/* Good-instruction tables for 64-bit apps.
+ *
+ * Genuinely invalid opcodes:
+ * 06,07 - formerly push/pop es
+ * 0e - formerly push cs
+ * 16,17 - formerly push/pop ss
+ * 1e,1f - formerly push/pop ds
+ * 27,2f,37,3f - formerly daa/das/aaa/aas
+ * 60,61 - formerly pusha/popa
+ * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
+ * 82 - formerly redundant encoding of Group1
+ * 9a - formerly call seg:ofs
+ * ce - formerly into
+ * d4,d5 - formerly aam/aad
+ * d6 - formerly undocumented salc
+ * ea - formerly jmp seg:ofs
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * cd - int N.
+ *     Used by userspace for "int 80" syscall entry. (Other "int N"
+ *     cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *     Not supported since kernel's handling of userspace single-stepping
+ *     (TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
+ */
 #if defined(CONFIG_X86_64)
 static volatile u32 good_insns_64[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
+       W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
        W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
-       W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
-       W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
+       W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
+       W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-       W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+       W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
        W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
-       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
        W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
        W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
-       W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
+       W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
        W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
-       W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-       W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+       W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
+       W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -122,49 +183,55 @@ static volatile u32 good_insns_64[256 / 32] = {
 #define good_insns_64  NULL
 #endif
 
-/* Using this for both 64-bit and 32-bit apps */
+/* Using this for both 64-bit and 32-bit apps.
+ * Opcodes we don't support:
+ * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
+ * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
+ *     Also encodes tons of other system insns if mod=11.
+ *     Some are in fact non-system: xend, xtest, rdtscp, maybe more
+ * 0f 05 - syscall
+ * 0f 06 - clts (CPL0 insn)
+ * 0f 07 - sysret
+ * 0f 08 - invd (CPL0 insn)
+ * 0f 09 - wbinvd (CPL0 insn)
+ * 0f 0b - ud2
+ * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
+ * 0f 34 - sysenter
+ * 0f 35 - sysexit
+ * 0f 37 - getsec
+ * 0f 78 - vmread (Intel VMX. CPL0 insn)
+ * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
+ *     Note: with prefixes, these two opcodes are
+ *     extrq/insertq/AVX512 convert vector ops.
+ * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
+ *     {rd,wr}{fs,gs}base,{s,l,m}fence.
+ *     Why? They are all user-executable.
+ */
 static volatile u32 good_2byte_insns[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
-       W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
-       W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+       W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
+       W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+       W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
        W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
-       W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+       W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
        W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
-       W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
-       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+       W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
+       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
        W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
-       W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+       W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
        W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
-       W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* f0 */
+       W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
 #undef W
 
 /*
- * opcodes we'll probably never support:
- *
- *  6c-6d, e4-e5, ec-ed - in
- *  6e-6f, e6-e7, ee-ef - out
- *  cc, cd - int3, int
- *  cf - iret
- *  d6 - illegal instruction
- *  f1 - int1/icebp
- *  f4 - hlt
- *  fa, fb - cli, sti
- *  0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
- *
- * invalid opcodes in 64-bit mode:
- *
- *  06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
- *  63 - we support this opcode in x86_64 but not in i386.
- *
  * opcodes we may need to refine support for:
  *
  *  0f - 2-byte instructions: For many of these instructions, the validity
index 34f66e58a896693d392e4ed074b1f9ca664974d3..cdc6cf90307800abb83f1b4b516ba389212c3dd0 100644 (file)
@@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
                 * thread's fpu state, reconstruct fxstate from the fsave
                 * header. Sanitize the copied state etc.
                 */
-               struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+               struct fpu *fpu = &tsk->thread.fpu;
                struct user_i387_ia32_struct env;
                int err = 0;
 
@@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
                 */
                drop_fpu(tsk);
 
-               if (__copy_from_user(xsave, buf_fx, state_size) ||
+               if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
                    __copy_from_user(&env, buf, sizeof(env))) {
+                       fpu_finit(fpu);
                        err = -1;
                } else {
                        sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
-                       set_used_math();
                }
 
+               set_used_math();
                if (use_eager_fpu()) {
                        preempt_disable();
                        math_state_restore();
index e0b794a84c35cdd7ecc03bc6de6500b0ac237f57..106c01557f2b63706eca28e462a3b072b590f0c5 100644 (file)
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
                        goto done;
                }
        }
-       ctxt->dst.orig_val = ctxt->dst.val;
+       /* Copy full 64-bit value for CMPXCHG8B.  */
+       ctxt->dst.orig_val64 = ctxt->dst.val64;
 
 special_insn:
 
index cc31f7c06d3ddc8ab4ef61c775510e2dcbfde518..9541ba34126b90123ddfe383453145ddfcf789c4 100644 (file)
@@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s,
                return -EOPNOTSUPP;
 
        if (len != 1) {
+               memset(val, 0, len);
                pr_pic_unimpl("non byte read\n");
                return 0;
        }
index e55b5fc344eb911a7b4ed0c490d50a6767532d27..bd4e34de24c7a0860de0adf88d3b85fd94f8ddae 100644 (file)
@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
                apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
        }
        apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
-       apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
+       apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
        apic->highest_isr_cache = -1;
        update_divide_count(apic);
        atomic_set(&apic->lapic_timer.pending, 0);
@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
        update_divide_count(apic);
        start_apic_timer(apic);
        apic->irr_pending = true;
-       apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
+       apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
                                1 : count_vectors(apic->regs + APIC_ISR);
        apic->highest_isr_cache = -1;
        if (kvm_x86_ops->hwapic_irr_update)
index d319e0c24758876178aeab46c65fe611cb02126e..cc618c882f900ad21cb4de57d94daa91a5f4ec4c 100644 (file)
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
        return;
 }
 
-static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
-{
-       return;
-}
-
 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
        return;
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
        .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
        .vm_has_apicv = svm_vm_has_apicv,
        .load_eoi_exitmap = svm_load_eoi_exitmap,
-       .hwapic_isr_update = svm_hwapic_isr_update,
        .sync_pir_to_irr = svm_sync_pir_to_irr,
 
        .set_tss_addr = svm_set_tss_addr,
index 14c1a18d206aeee0d59637162b0f1a58056c8941..10a481b7674de285a45de0a1675ee60a9318e19d 100644 (file)
@@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
 {
        unsigned long *msr_bitmap;
 
-       if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
+       if (is_guest_mode(vcpu))
+               msr_bitmap = vmx_msr_bitmap_nested;
+       else if (irqchip_in_kernel(vcpu->kvm) &&
+               apic_x2apic_mode(vcpu->arch.apic)) {
                if (is_long_mode(vcpu))
                        msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
                else
@@ -4367,6 +4370,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_SMP
+       if (vcpu->mode == IN_GUEST_MODE) {
+               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+                               POSTED_INTR_VECTOR);
+               return true;
+       }
+#endif
+       return false;
+}
+
 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
                                                int vector)
 {
@@ -4375,9 +4390,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
        if (is_guest_mode(vcpu) &&
            vector == vmx->nested.posted_intr_nv) {
                /* the PIR and ON have been set by L1. */
-               if (vcpu->mode == IN_GUEST_MODE)
-                       apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
-                               POSTED_INTR_VECTOR);
+               kvm_vcpu_trigger_posted_interrupt(vcpu);
                /*
                 * If a posted intr is not recognized by hardware,
                 * we will accomplish it in the next vmentry.
@@ -4409,12 +4422,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
 
        r = pi_test_and_set_on(&vmx->pi_desc);
        kvm_make_request(KVM_REQ_EVENT, vcpu);
-#ifdef CONFIG_SMP
-       if (!r && (vcpu->mode == IN_GUEST_MODE))
-               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
-                               POSTED_INTR_VECTOR);
-       else
-#endif
+       if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
                kvm_vcpu_kick(vcpu);
 }
 
@@ -9213,9 +9221,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        }
 
        if (cpu_has_vmx_msr_bitmap() &&
-           exec_control & CPU_BASED_USE_MSR_BITMAPS &&
-           nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) {
-               vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested));
+           exec_control & CPU_BASED_USE_MSR_BITMAPS) {
+               nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
+               /* MSR_BITMAP will be set by following vmx_set_efer. */
        } else
                exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
 
index bd7a70be41b35fa93ad952060fc2c7f432e5628e..32bf19ef3115f65c9dffc23a655be2763babcaff 100644 (file)
@@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_USER_NMI:
        case KVM_CAP_REINJECT_CONTROL:
        case KVM_CAP_IRQ_INJECT_STATUS:
-       case KVM_CAP_IRQFD:
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_IOEVENTFD_NO_LENGTH:
        case KVM_CAP_PIT2:
index 4a0890f815c40dc2641d092ff7aeaee5cb6f0039..08f41caada45fae631599fde62f9b0d43902b7cd 100644 (file)
@@ -1,6 +1,6 @@
 config LGUEST_GUEST
        bool "Lguest guest support"
-       depends on X86_32 && PARAVIRT
+       depends on X86_32 && PARAVIRT && PCI
        select TTY
        select VIRTUALIZATION
        select VIRTIO
@@ -8,7 +8,7 @@ config LGUEST_GUEST
        help
          Lguest is a tiny in-kernel hypervisor.  Selecting this will
          allow your kernel to boot under lguest.  This option will increase
-         your kernel size by about 6k.  If in doubt, say N.
+         your kernel size by about 10k.  If in doubt, say N.
 
          If you say Y here, make sure you say Y (or M) to the virtio block
          and net drivers which lguest needs.
index 553c094b9cd7984b7334a95122931a93249f1ddf..a110efca6d068f7d881f8c1d955d8a6906457f64 100644 (file)
@@ -238,6 +238,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
        }
 }
 
+static const char *page_size_string(struct map_range *mr)
+{
+       static const char str_1g[] = "1G";
+       static const char str_2m[] = "2M";
+       static const char str_4m[] = "4M";
+       static const char str_4k[] = "4k";
+
+       if (mr->page_size_mask & (1<<PG_LEVEL_1G))
+               return str_1g;
+       /*
+        * 32-bit without PAE has a 4M large page size.
+        * PG_LEVEL_2M is misnamed, but we can at least
+        * print out the right size in the string.
+        */
+       if (IS_ENABLED(CONFIG_X86_32) &&
+           !IS_ENABLED(CONFIG_X86_PAE) &&
+           mr->page_size_mask & (1<<PG_LEVEL_2M))
+               return str_4m;
+
+       if (mr->page_size_mask & (1<<PG_LEVEL_2M))
+               return str_2m;
+
+       return str_4k;
+}
+
 static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                                     unsigned long start,
                                     unsigned long end)
@@ -333,8 +358,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
        for (i = 0; i < nr_range; i++)
                printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
                                mr[i].start, mr[i].end - 1,
-                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
-                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+                               page_size_string(&mr[i]));
 
        return nr_range;
 }
index 919b91205cd4be57760c50956eddb2d02dc13c45..df4552bd239e03b4a02e6505454e41420d530461 100644 (file)
@@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
 
-static unsigned int stack_maxrandom_size(void)
+static unsigned long stack_maxrandom_size(void)
 {
-       unsigned int max = 0;
+       unsigned long max = 0;
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
+               max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
        }
 
        return max;
index 6ac273832f284635ac1a66bf3f8551de379fa0f0..e4695985f9de85778db5e084b37eda5719d3a82a 100644 (file)
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info,
                                struct list_head *list)
 {
        int ret;
-       struct resource_entry *entry;
+       struct resource_entry *entry, *tmp;
 
        sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
        info->bridge = device;
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info,
                dev_dbg(&device->dev,
                        "no IO and memory resources present in _CRS\n");
        else
-               resource_list_for_each_entry(entry, list)
-                       entry->res->name = info->name;
+               resource_list_for_each_entry_safe(entry, tmp, list) {
+                       if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
+                           (entry->res->flags & IORESOURCE_DISABLED))
+                               resource_list_destroy_entry(entry);
+                       else
+                               entry->res->name = info->name;
+               }
 }
 
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
index 85afde1fa3e5f6465e40a0599e17669a934b88b9..a62e0be3a2f1b4f563ab5f6fda0acb59612ea523 100644 (file)
@@ -5,6 +5,7 @@ obj-y   += geode/
 obj-y  += goldfish/
 obj-y  += iris/
 obj-y  += intel-mid/
+obj-y  += intel-quark/
 obj-y  += olpc/
 obj-y  += scx200/
 obj-y  += sfi/
index 5fcda7272550a79b52660030571adeaf9d0f197b..86d0f9e08dd95eb1023d5ac7ec4fb006aafb72c9 100644 (file)
@@ -91,167 +91,6 @@ ENTRY(efi_call)
        ret
 ENDPROC(efi_call)
 
-#ifdef CONFIG_EFI_MIXED
-
-/*
- * We run this function from the 1:1 mapping.
- *
- * This function must be invoked with a 1:1 mapped stack.
- */
-ENTRY(__efi64_thunk)
-       movl    %ds, %eax
-       push    %rax
-       movl    %es, %eax
-       push    %rax
-       movl    %ss, %eax
-       push    %rax
-
-       subq    $32, %rsp
-       movl    %esi, 0x0(%rsp)
-       movl    %edx, 0x4(%rsp)
-       movl    %ecx, 0x8(%rsp)
-       movq    %r8, %rsi
-       movl    %esi, 0xc(%rsp)
-       movq    %r9, %rsi
-       movl    %esi,  0x10(%rsp)
-
-       sgdt    save_gdt(%rip)
-
-       leaq    1f(%rip), %rbx
-       movq    %rbx, func_rt_ptr(%rip)
-
-       /* Switch to gdt with 32-bit segments */
-       movl    64(%rsp), %eax
-       lgdt    (%rax)
-
-       leaq    efi_enter32(%rip), %rax
-       pushq   $__KERNEL_CS
-       pushq   %rax
-       lretq
-
-1:     addq    $32, %rsp
-
-       lgdt    save_gdt(%rip)
-
-       pop     %rbx
-       movl    %ebx, %ss
-       pop     %rbx
-       movl    %ebx, %es
-       pop     %rbx
-       movl    %ebx, %ds
-
-       /*
-        * Convert 32-bit status code into 64-bit.
-        */
-       test    %rax, %rax
-       jz      1f
-       movl    %eax, %ecx
-       andl    $0x0fffffff, %ecx
-       andl    $0xf0000000, %eax
-       shl     $32, %rax
-       or      %rcx, %rax
-1:
-       ret
-ENDPROC(__efi64_thunk)
-
-ENTRY(efi_exit32)
-       movq    func_rt_ptr(%rip), %rax
-       push    %rax
-       mov     %rdi, %rax
-       ret
-ENDPROC(efi_exit32)
-
-       .code32
-/*
- * EFI service pointer must be in %edi.
- *
- * The stack should represent the 32-bit calling convention.
- */
-ENTRY(efi_enter32)
-       movl    $__KERNEL_DS, %eax
-       movl    %eax, %ds
-       movl    %eax, %es
-       movl    %eax, %ss
-
-       /* Reload pgtables */
-       movl    %cr3, %eax
-       movl    %eax, %cr3
-
-       /* Disable paging */
-       movl    %cr0, %eax
-       btrl    $X86_CR0_PG_BIT, %eax
-       movl    %eax, %cr0
-
-       /* Disable long mode via EFER */
-       movl    $MSR_EFER, %ecx
-       rdmsr
-       btrl    $_EFER_LME, %eax
-       wrmsr
-
-       call    *%edi
-
-       /* We must preserve return value */
-       movl    %eax, %edi
-
-       /*
-        * Some firmware will return with interrupts enabled. Be sure to
-        * disable them before we switch GDTs.
-        */
-       cli
-
-       movl    68(%esp), %eax
-       movl    %eax, 2(%eax)
-       lgdtl   (%eax)
-
-       movl    %cr4, %eax
-       btsl    $(X86_CR4_PAE_BIT), %eax
-       movl    %eax, %cr4
-
-       movl    %cr3, %eax
-       movl    %eax, %cr3
-
-       movl    $MSR_EFER, %ecx
-       rdmsr
-       btsl    $_EFER_LME, %eax
-       wrmsr
-
-       xorl    %eax, %eax
-       lldt    %ax
-
-       movl    72(%esp), %eax
-       pushl   $__KERNEL_CS
-       pushl   %eax
-
-       /* Enable paging */
-       movl    %cr0, %eax
-       btsl    $X86_CR0_PG_BIT, %eax
-       movl    %eax, %cr0
-       lret
-ENDPROC(efi_enter32)
-
-       .data
-       .balign 8
-       .global efi32_boot_gdt
-efi32_boot_gdt:        .word   0
-               .quad   0
-
-save_gdt:      .word   0
-               .quad   0
-func_rt_ptr:   .quad   0
-
-       .global efi_gdt64
-efi_gdt64:
-       .word   efi_gdt64_end - efi_gdt64
-       .long   0                       /* Filled out by user */
-       .word   0
-       .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
-       .quad   0x0080890000000000      /* TS descriptor */
-       .quad   0x0000000000000000      /* TS continued */
-efi_gdt64_end:
-#endif /* CONFIG_EFI_MIXED */
-
        .data
 ENTRY(efi_scratch)
        .fill 3,8,0
index 8806fa73e6e6d22337ff69c708583adf3c38cbbf..ff85d28c50f261c728eb0731391706b1af3777af 100644 (file)
@@ -1,9 +1,26 @@
 /*
  * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ *
+ * Support for invoking 32-bit EFI runtime services from a 64-bit
+ * kernel.
+ *
+ * The below thunking functions are only used after ExitBootServices()
+ * has been called. This simplifies things considerably as compared with
+ * the early EFI thunking because we can leave all the kernel state
+ * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
+ * services from __KERNEL32_CS. This means we can continue to service
+ * interrupts across an EFI mixed mode call.
+ *
+ * We do however, need to handle the fact that we're running in a full
+ * 64-bit virtual address space. Things like the stack and instruction
+ * addresses need to be accessible by the 32-bit firmware, so we rely on
+ * using the identity mappings in the EFI page table to access the stack
+ * and kernel text (see efi_setup_page_tables()).
  */
 
 #include <linux/linkage.h>
 #include <asm/page_types.h>
+#include <asm/segment.h>
 
        .text
        .code64
@@ -33,14 +50,6 @@ ENTRY(efi64_thunk)
        leaq    efi_exit32(%rip), %rbx
        subq    %rax, %rbx
        movl    %ebx, 8(%rsp)
-       leaq    efi_gdt64(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 2(%ebx)
-       movl    %ebx, 4(%rsp)
-       leaq    efi_gdt32(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 2(%ebx)
-       movl    %ebx, (%rsp)
 
        leaq    __efi64_thunk(%rip), %rbx
        subq    %rax, %rbx
@@ -52,14 +61,92 @@ ENTRY(efi64_thunk)
        retq
 ENDPROC(efi64_thunk)
 
-       .data
-efi_gdt32:
-       .word   efi_gdt32_end - efi_gdt32
-       .long   0                       /* Filled out above */
-       .word   0
-       .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00cf9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
-efi_gdt32_end:
+/*
+ * We run this function from the 1:1 mapping.
+ *
+ * This function must be invoked with a 1:1 mapped stack.
+ */
+ENTRY(__efi64_thunk)
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /* Switch to 32-bit descriptor */
+       pushq   $__KERNEL32_CS
+       leaq    efi_enter32(%rip), %rax
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
 
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       ret
+ENDPROC(__efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       movl    72(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+func_rt_ptr:           .quad 0
 efi_saved_sp:          .quad 0
index 1bbedc4b0f88d46bee5000779c4ef5aa4e4d0411..3005f0c89f2ecfbcc817c7e379c97586d5524e7f 100644 (file)
@@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void)
                intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
        else {
                intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-               pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+               pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
        }
 
 out:
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644 (file)
index 0000000..9cc57ed
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IMR) += imr.o
+obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644 (file)
index 0000000..0ee619f
--- /dev/null
@@ -0,0 +1,661 @@
+/**
+ * imr.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR registers define an isolated region of memory that can
+ * be masked to prohibit certain system agents from accessing memory.
+ * When a device behind a masked port performs an access - snooped or
+ * not, an IMR may optionally prevent that transaction from changing
+ * the state of memory or from getting correct data in response to the
+ * operation.
+ *
+ * Write data will be dropped and reads will return 0xFFFFFFFF, the
+ * system will reset and system BIOS will print out an error message to
+ * inform the user that an IMR has been violated.
+ *
+ * This code is based on the Linux MTRR code and reference code from
+ * Intel's Quark BSP EFI, Linux and grub code.
+ *
+ * See quark-x1000-datasheet.pdf for register definitions.
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
+#include <asm/imr.h>
+#include <asm/iosf_mbi.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct imr_device {
+       struct dentry   *file;
+       bool            init;
+       struct mutex    lock;
+       int             max_imr;
+       int             reg_base;
+};
+
+static struct imr_device imr_dev;
+
+/*
+ * IMR read/write mask control registers.
+ * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
+ * bit definitions.
+ *
+ * addr_hi
+ * 31          Lock bit
+ * 30:24       Reserved
+ * 23:2                1 KiB aligned lo address
+ * 1:0         Reserved
+ *
+ * addr_hi
+ * 31:24       Reserved
+ * 23:2                1 KiB aligned hi address
+ * 1:0         Reserved
+ */
+#define IMR_LOCK       BIT(31)
+
+struct imr_regs {
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 rmask;
+       u32 wmask;
+};
+
+#define IMR_NUM_REGS   (sizeof(struct imr_regs)/sizeof(u32))
+#define IMR_SHIFT      8
+#define imr_to_phys(x) ((x) << IMR_SHIFT)
+#define phys_to_imr(x) ((x) >> IMR_SHIFT)
+
+/**
+ * imr_is_enabled - true if an IMR is enabled false otherwise.
+ *
+ * Determines if an IMR is enabled based on address range and read/write
+ * mask. An IMR set with an address range set to zero and a read/write
+ * access mask set to all is considered to be disabled. An IMR in any
+ * other state - for example set to zero but without read/write access
+ * all is considered to be enabled. This definition of disabled is how
+ * firmware switches off an IMR and is maintained in kernel for
+ * consistency.
+ *
+ * @imr:       pointer to IMR descriptor.
+ * @return:    true if IMR enabled false if disabled.
+ */
+static inline int imr_is_enabled(struct imr_regs *imr)
+{
+       return !(imr->rmask == IMR_READ_ACCESS_ALL &&
+                imr->wmask == IMR_WRITE_ACCESS_ALL &&
+                imr_to_phys(imr->addr_lo) == 0 &&
+                imr_to_phys(imr->addr_hi) == 0);
+}
+
+/**
+ * imr_read - read an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @imr_id:    IMR entry to read.
+ * @imr:       IMR structure representing address and access masks.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
+{
+       u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+       int ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->addr_lo);
+       if (ret)
+               return ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->addr_hi);
+       if (ret)
+               return ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->rmask);
+       if (ret)
+               return ret;
+
+       return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->wmask);
+}
+
+/**
+ * imr_write - write an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ * Note lock bits need to be written independently of address bits.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @imr_id:    IMR entry to write.
+ * @imr:       IMR structure representing address and access masks.
+ * @lock:      indicates if the IMR lock bit should be applied.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_write(struct imr_device *idev, u32 imr_id,
+                    struct imr_regs *imr, bool lock)
+{
+       unsigned long flags;
+       u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+       int ret;
+
+       local_irq_save(flags);
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
+                               imr->addr_lo);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->addr_hi);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->rmask);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->wmask);
+       if (ret)
+               goto failed;
+
+       /* Lock bit must be set separately to addr_lo address bits. */
+       if (lock) {
+               imr->addr_lo |= IMR_LOCK;
+               ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                                       reg - IMR_NUM_REGS, imr->addr_lo);
+               if (ret)
+                       goto failed;
+       }
+
+       local_irq_restore(flags);
+       return 0;
+failed:
+       /*
+        * If writing to the IOSF failed then we're in an unknown state,
+        * likely a very bad state. An IMR in an invalid state will almost
+        * certainly lead to a memory access violation.
+        */
+       local_irq_restore(flags);
+       WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
+            imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
+
+       return ret;
+}
+
+/**
+ * imr_dbgfs_state_show - print state of IMR registers.
+ *
+ * @s:         pointer to seq_file for output.
+ * @unused:    unused parameter.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
+{
+       phys_addr_t base;
+       phys_addr_t end;
+       int i;
+       struct imr_device *idev = s->private;
+       struct imr_regs imr;
+       size_t size;
+       int ret = -ENODEV;
+
+       mutex_lock(&idev->lock);
+
+       for (i = 0; i < idev->max_imr; i++) {
+
+               ret = imr_read(idev, i, &imr);
+               if (ret)
+                       break;
+
+               /*
+                * Remember to add IMR_ALIGN bytes to size to indicate the
+                * inherent IMR_ALIGN size bytes contained in the masked away
+                * lower ten bits.
+                */
+               if (imr_is_enabled(&imr)) {
+                       base = imr_to_phys(imr.addr_lo);
+                       end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+               } else {
+                       base = 0;
+                       end = 0;
+               }
+               size = end - base;
+               seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
+                          "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
+                          &base, &end, size, imr.rmask, imr.wmask,
+                          imr_is_enabled(&imr) ? "enabled " : "disabled",
+                          imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
+       }
+
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+
+/**
+ * imr_state_open - debugfs open callback.
+ *
+ * @inode:     pointer to struct inode.
+ * @file:      pointer to struct file.
+ * @return:    result of single open.
+ */
+static int imr_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, imr_dbgfs_state_show, inode->i_private);
+}
+
+static const struct file_operations imr_state_ops = {
+       .open           = imr_state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/**
+ * imr_debugfs_register - register debugfs hooks.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:    0 on success - errno on failure.
+ */
+static int imr_debugfs_register(struct imr_device *idev)
+{
+       idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL,
+                                        idev, &imr_state_ops);
+       return PTR_ERR_OR_ZERO(idev->file);
+}
+
+/**
+ * imr_debugfs_unregister - unregister debugfs hooks.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:
+ */
+static void imr_debugfs_unregister(struct imr_device *idev)
+{
+       debugfs_remove(idev->file);
+}
+
+/**
+ * imr_check_params - check passed address range IMR alignment and non-zero size
+ *
+ * @base:      base address of intended IMR.
+ * @size:      size of intended IMR.
+ * @return:    zero on valid range -EINVAL on unaligned base/size.
+ */
+static int imr_check_params(phys_addr_t base, size_t size)
+{
+       if ((base & IMR_MASK) || (size & IMR_MASK)) {
+               pr_err("base %pa size 0x%08zx must align to 1KiB\n",
+                       &base, size);
+               return -EINVAL;
+       }
+       if (size == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
+ *
+ * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
+ * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
+ * as a result.
+ *
+ * @size:      input size bytes.
+ * @return:    reduced size.
+ */
+static inline size_t imr_raw_size(size_t size)
+{
+       return size - IMR_ALIGN;
+}
+
+/**
+ * imr_address_overlap - detects an address overlap.
+ *
+ * @addr:      address to check against an existing IMR.
+ * @imr:       imr being checked.
+ * @return:    true for overlap false for no overlap.
+ */
+static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
+{
+       return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
+}
+
+/**
+ * imr_add_range - add an Isolated Memory Region.
+ *
+ * @base:      physical base address of region aligned to 1KiB.
+ * @size:      physical size of region in bytes must be aligned to 1KiB.
+ * @read_mask: read access mask.
+ * @write_mask:        write access mask.
+ * @lock:      indicates whether or not to permanently lock this region.
+ * @return:    zero on success or negative value indicating error.
+ */
+int imr_add_range(phys_addr_t base, size_t size,
+                 unsigned int rmask, unsigned int wmask, bool lock)
+{
+       phys_addr_t end;
+       unsigned int i;
+       struct imr_device *idev = &imr_dev;
+       struct imr_regs imr;
+       size_t raw_size;
+       int reg;
+       int ret;
+
+       if (WARN_ONCE(idev->init == false, "driver not initialized"))
+               return -ENODEV;
+
+       ret = imr_check_params(base, size);
+       if (ret)
+               return ret;
+
+       /* Tweak the size value. */
+       raw_size = imr_raw_size(size);
+       end = base + raw_size;
+
+       /*
+        * Check for reserved IMR value common to firmware, kernel and grub
+        * indicating a disabled IMR.
+        */
+       imr.addr_lo = phys_to_imr(base);
+       imr.addr_hi = phys_to_imr(end);
+       imr.rmask = rmask;
+       imr.wmask = wmask;
+       if (!imr_is_enabled(&imr))
+               return -ENOTSUPP;
+
+       mutex_lock(&idev->lock);
+
+       /*
+        * Find a free IMR while checking for an existing overlapping range.
+        * Note there's no restriction in silicon to prevent IMR overlaps.
+        * For the sake of simplicity and ease in defining/debugging an IMR
+        * memory map we exclude IMR overlaps.
+        */
+       reg = -1;
+       for (i = 0; i < idev->max_imr; i++) {
+               ret = imr_read(idev, i, &imr);
+               if (ret)
+                       goto failed;
+
+               /* Find overlap @ base or end of requested range. */
+               ret = -EINVAL;
+               if (imr_is_enabled(&imr)) {
+                       if (imr_address_overlap(base, &imr))
+                               goto failed;
+                       if (imr_address_overlap(end, &imr))
+                               goto failed;
+               } else {
+                       reg = i;
+               }
+       }
+
+       /* Error out if we have no free IMR entries. */
+       if (reg == -1) {
+               ret = -ENOMEM;
+               goto failed;
+       }
+
+       pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
+                reg, &base, &end, raw_size, rmask, wmask);
+
+       /* Enable IMR at specified range and access mask. */
+       imr.addr_lo = phys_to_imr(base);
+       imr.addr_hi = phys_to_imr(end);
+       imr.rmask = rmask;
+       imr.wmask = wmask;
+
+       ret = imr_write(idev, reg, &imr, lock);
+       if (ret < 0) {
+               /*
+                * In the highly unlikely event iosf_mbi_write failed
+                * attempt to rollback the IMR setup skipping the trapping
+                * of further IOSF write failures.
+                */
+               imr.addr_lo = 0;
+               imr.addr_hi = 0;
+               imr.rmask = IMR_READ_ACCESS_ALL;
+               imr.wmask = IMR_WRITE_ACCESS_ALL;
+               imr_write(idev, reg, &imr, false);
+       }
+failed:
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(imr_add_range);
+
+/**
+ * __imr_remove_range - delete an Isolated Memory Region.
+ *
+ * This function allows you to delete an IMR by its index specified by reg or
+ * by address range specified by base and size respectively. If you specify an
+ * index on its own the base and size parameters are ignored.
+ * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
+ * imr_remove_range(-1, base, size); delete IMR from base to base+size.
+ *
+ * @reg:       imr index to remove.
+ * @base:      physical base address of region aligned to 1 KiB.
+ * @size:      physical size of region in bytes aligned to 1 KiB.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
+{
+       phys_addr_t end;
+       bool found = false;
+       unsigned int i;
+       struct imr_device *idev = &imr_dev;
+       struct imr_regs imr;
+       size_t raw_size;
+       int ret = 0;
+
+       if (WARN_ONCE(idev->init == false, "driver not initialized"))
+               return -ENODEV;
+
+       /*
+        * Validate address range if deleting by address, else we are
+        * deleting by index where base and size will be ignored.
+        */
+       if (reg == -1) {
+               ret = imr_check_params(base, size);
+               if (ret)
+                       return ret;
+       }
+
+       /* Tweak the size value. */
+       raw_size = imr_raw_size(size);
+       end = base + raw_size;
+
+       mutex_lock(&idev->lock);
+
+       if (reg >= 0) {
+               /* If a specific IMR is given try to use it. */
+               ret = imr_read(idev, reg, &imr);
+               if (ret)
+                       goto failed;
+
+               if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
+                       ret = -ENODEV;
+                       goto failed;
+               }
+               found = true;
+       } else {
+               /* Search for match based on address range. */
+               for (i = 0; i < idev->max_imr; i++) {
+                       ret = imr_read(idev, i, &imr);
+                       if (ret)
+                               goto failed;
+
+                       if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
+                               continue;
+
+                       if ((imr_to_phys(imr.addr_lo) == base) &&
+                           (imr_to_phys(imr.addr_hi) == end)) {
+                               found = true;
+                               reg = i;
+                               break;
+                       }
+               }
+       }
+
+       if (!found) {
+               ret = -ENODEV;
+               goto failed;
+       }
+
+       pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
+
+       /* Tear down the IMR. */
+       imr.addr_lo = 0;
+       imr.addr_hi = 0;
+       imr.rmask = IMR_READ_ACCESS_ALL;
+       imr.wmask = IMR_WRITE_ACCESS_ALL;
+
+       ret = imr_write(idev, reg, &imr, false);
+
+failed:
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+
+/**
+ * imr_remove_range - delete an Isolated Memory Region by address
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by base and size respectively.
+ * imr_remove_range(base, size); delete IMR from base to base+size.
+ *
+ * @base:      physical base address of region aligned to 1 KiB.
+ * @size:      physical size of region in bytes aligned to 1 KiB.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+int imr_remove_range(phys_addr_t base, size_t size)
+{
+       return __imr_remove_range(-1, base, size);
+}
+EXPORT_SYMBOL_GPL(imr_remove_range);
+
+/**
+ * imr_clear - delete an Isolated Memory Region by index
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by the index of the IMR. Useful for initial sanitization of the IMR
+ * address map.
+ * imr_ge(base, size); delete IMR from base to base+size.
+ *
+ * @reg:       imr index to remove.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+static inline int imr_clear(int reg)
+{
+       return __imr_remove_range(reg, 0, 0);
+}
+
+/**
+ * imr_fixup_memmap - Tear down IMRs used during bootup.
+ *
+ * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
+ * that need to be removed before the kernel hands out one of the IMR
+ * encased addresses to a downstream DMA agent such as the SD or Ethernet.
+ * IMRs on Galileo are setup to immediately reset the system on violation.
+ * As a result if you're running a root filesystem from SD - you'll need
+ * the boot-time IMRs torn down or you'll find seemingly random resets when
+ * using your filesystem.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:
+ */
+static void __init imr_fixup_memmap(struct imr_device *idev)
+{
+       phys_addr_t base = virt_to_phys(&_text);
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       int i;
+       int ret;
+
+       /* Tear down all existing unlocked IMRs. */
+       for (i = 0; i < idev->max_imr; i++)
+               imr_clear(i);
+
+       /*
+        * Setup a locked IMR around the physical extent of the kernel
+        * from the beginning of the .text secton to the end of the
+        * .rodata section as one physically contiguous block.
+        */
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+       if (ret < 0) {
+               pr_err("unable to setup IMR for kernel: (%p - %p)\n",
+                       &_text, &__end_rodata);
+       } else {
+               pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
+                       size / 1024, &_text, &__end_rodata);
+       }
+
+}
+
+static const struct x86_cpu_id imr_ids[] __initconst = {
+       { X86_VENDOR_INTEL, 5, 9 },     /* Intel Quark SoC X1000. */
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, imr_ids);
+
+/**
+ * imr_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_init(void)
+{
+       struct imr_device *idev = &imr_dev;
+       int ret;
+
+       if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
+               return -ENODEV;
+
+       idev->max_imr = QUARK_X1000_IMR_MAX;
+       idev->reg_base = QUARK_X1000_IMR_REGBASE;
+       idev->init = true;
+
+       mutex_init(&idev->lock);
+       ret = imr_debugfs_register(idev);
+       if (ret != 0)
+               pr_warn("debugfs register failed!\n");
+       imr_fixup_memmap(idev);
+       return 0;
+}
+
+/**
+ * imr_exit - exit point for IMR code.
+ *
+ * Deregisters debugfs, leave IMR state as-is.
+ *
+ * return:
+ */
+static void __exit imr_exit(void)
+{
+       imr_debugfs_unregister(&imr_dev);
+}
+
+module_init(imr_init);
+module_exit(imr_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644 (file)
index 0000000..c9a0838
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * imr_selftest.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR self test. The purpose of this module is to run a set of tests on the
+ * IMR API to validate it's sanity. We check for overlapping, reserved
+ * addresses and setup/teardown sanity.
+ *
+ */
+
+#include <asm-generic/sections.h>
+#include <asm/imr.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define SELFTEST KBUILD_MODNAME ": "
+/**
+ * imr_self_test_result - Print result string for self test.
+ *
+ * @res:       result code - true if test passed false otherwise.
+ * @fmt:       format string.
+ * ...         variadic argument list.
+ */
+static void __init imr_self_test_result(int res, const char *fmt, ...)
+{
+       va_list vlist;
+
+       /* Print pass/fail. */
+       if (res)
+               pr_info(SELFTEST "pass ");
+       else
+               pr_info(SELFTEST "fail ");
+
+       /* Print variable string. */
+       va_start(vlist, fmt);
+       vprintk(fmt, vlist);
+       va_end(vlist);
+
+       /* Optional warning. */
+       WARN(res == 0, "test failed");
+}
+#undef SELFTEST
+
+/**
+ * imr_self_test
+ *
+ * Verify IMR self_test with some simple tests to verify overlap,
+ * zero sized allocations and 1 KiB sized areas.
+ *
+ */
+static void __init imr_self_test(void)
+{
+       phys_addr_t base  = virt_to_phys(&_text);
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
+       int ret;
+
+       /* Test zero zero. */
+       ret = imr_add_range(0, 0, 0, 0, false);
+       imr_self_test_result(ret < 0, "zero sized IMR\n");
+
+       /* Test exact overlap. */
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test overlap with base inside of existing. */
+       base += size - IMR_ALIGN;
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test overlap with end inside of existing. */
+       base -= size + IMR_ALIGN * 2;
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
+       ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
+                           IMR_WRITE_ACCESS_ALL, false);
+       imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
+
+       /* Test that a 1 KiB IMR @ zero with CPU only will work. */
+       ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
+       if (ret >= 0) {
+               ret = imr_remove_range(0, IMR_ALIGN);
+               imr_self_test_result(ret == 0, "teardown - cpu-access\n");
+       }
+
+       /* Test 2 KiB works. */
+       size = IMR_ALIGN * 2;
+       ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
+                           IMR_WRITE_ACCESS_ALL, false);
+       imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
+       if (ret >= 0) {
+               ret = imr_remove_range(0, size);
+               imr_self_test_result(ret == 0, "teardown 2KiB\n");
+       }
+}
+
+/**
+ * imr_self_test_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_self_test_init(void)
+{
+       imr_self_test();
+       return 0;
+}
+
+/**
+ * imr_self_test_exit - exit point for IMR code.
+ *
+ * return:
+ */
+static void __exit imr_self_test_exit(void)
+{
+}
+
+module_init(imr_self_test_init);
+module_exit(imr_self_test_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
+MODULE_LICENSE("Dual BSD/GPL");
index 31776d0efc8c40fa0aa6989731b1ddf23229029c..d7ec4e251c0a2e53572438891508dfeb4a3a016a 100644 (file)
@@ -17,6 +17,7 @@
        .text
        .globl __kernel_sigreturn
        .type __kernel_sigreturn,@function
+       nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
        ALIGN
 __kernel_sigreturn:
 .LSTART_sigreturn:
index bd8b8459c3d05923286b4dc6b5ebb7e37e51d926..5240f563076de2e0e27c92af2d04ad03d213ee8f 100644 (file)
@@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val)
        BUG_ON(val);
 }
 #endif
+
+static u64 xen_read_msr_safe(unsigned int msr, int *err)
+{
+       u64 val;
+
+       val = native_read_msr_safe(msr, err);
+       switch (msr) {
+       case MSR_IA32_APICBASE:
+#ifdef CONFIG_X86_X2APIC
+               if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
+#endif
+                       val &= ~X2APIC_ENABLE;
+               break;
+       }
+       return val;
+}
+
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 
        .wbinvd = native_wbinvd,
 
-       .read_msr = native_read_msr_safe,
+       .read_msr = xen_read_msr_safe,
        .write_msr = xen_write_msr_safe,
 
        .read_tsc = native_read_tsc,
@@ -1741,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 #ifdef CONFIG_X86_32
        i386_start_kernel();
 #else
+       cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
        x86_64_start_reservations((char *)__pa_symbol(&boot_params));
 #endif
 }
index 740ae3026a148ec54800710427a455dc890351d3..9f93af56a5fc7bd4cf263406faf96f8a5fa56466 100644 (file)
@@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn)
                if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
                        p2m_init(p2m);
                else
-                       p2m_init_identity(p2m, pfn);
+                       p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
 
                spin_lock_irqsave(&p2m_update_lock, flags);
 
index 23b45eb9a89ce4d56f8a73e181d004b107dc1f81..956374c1edbc31e4c1eb50c3fb29cb8828ad44b5 100644 (file)
@@ -41,7 +41,7 @@ static u8 zero_stats;
 static inline void check_zero(void)
 {
        u8 ret;
-       u8 old = ACCESS_ONCE(zero_stats);
+       u8 old = READ_ONCE(zero_stats);
        if (unlikely(old)) {
                ret = cmpxchg(&zero_stats, old, 0);
                /* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
        struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
        int cpu = smp_processor_id();
        u64 start;
+       __ticket_t head;
        unsigned long flags;
 
        /* If kicker interrupts not initialized yet, just spin */
@@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
         */
        __ticket_enter_slowpath(lock);
 
+       /* make sure enter_slowpath, which is atomic does not cross the read */
+       smp_mb__after_atomic();
+
        /*
         * check again make sure it didn't become free while
         * we weren't looking
         */
-       if (ACCESS_ONCE(lock->tickets.head) == want) {
+       head = READ_ONCE(lock->tickets.head);
+       if (__tickets_equal(head, want)) {
                add_stats(TAKEN_SLOW_PICKUP, 1);
                goto out;
        }
@@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
                const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
 
                /* Make sure we read lock before want */
-               if (ACCESS_ONCE(w->lock) == lock &&
-                   ACCESS_ONCE(w->want) == next) {
+               if (READ_ONCE(w->lock) == lock &&
+                   READ_ONCE(w->want) == next) {
                        add_stats(RELEASED_SLOW_KICKED, 1);
                        xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
                        break;
index 9273d0969ebd6377a02cf0be189d34ea619afe3e..5b9c6d5c3636ad6412c7b898c44e4709b35597d1 100644 (file)
@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
        struct blkg_rwstat rwstat = { }, tmp;
        int i, cpu;
 
+       if (tg->stats_cpu == NULL)
+               return 0;
+
        for_each_possible_cpu(cpu) {
                struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
index 02e835f3cf8aa76326b9994768b75f2d39c39fed..37fb1904760396751f27778e819d90c0fd9af83b 100644 (file)
@@ -65,6 +65,7 @@ struct lpss_private_data;
 
 struct lpss_device_desc {
        unsigned int flags;
+       const char *clk_con_id;
        unsigned int prv_offset;
        size_t prv_size_override;
        void (*setup)(struct lpss_private_data *pdata);
@@ -105,7 +106,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
        }
 }
 
-static void byt_i2c_setup(struct lpss_private_data *pdata)
+static void lpss_deassert_reset(struct lpss_private_data *pdata)
 {
        unsigned int offset;
        u32 val;
@@ -114,9 +115,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
        val = readl(pdata->mmio_base + offset);
        val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
        writel(val, pdata->mmio_base + offset);
+}
+
+#define LPSS_I2C_ENABLE                        0x6c
+
+static void byt_i2c_setup(struct lpss_private_data *pdata)
+{
+       lpss_deassert_reset(pdata);
 
        if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
                pdata->fixed_clk_rate = 133000000;
+
+       writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
@@ -125,12 +135,13 @@ static struct lpss_device_desc lpt_dev_desc = {
 };
 
 static struct lpss_device_desc lpt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_LTR,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
        .prv_offset = 0x800,
 };
 
 static struct lpss_device_desc lpt_uart_dev_desc = {
        .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
+       .clk_con_id = "baudclk",
        .prv_offset = 0x800,
        .setup = lpss_uart_setup,
 };
@@ -147,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
 
 static struct lpss_device_desc byt_uart_dev_desc = {
        .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+       .clk_con_id = "baudclk",
        .prv_offset = 0x800,
        .setup = lpss_uart_setup,
 };
@@ -166,6 +178,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
        .setup = byt_i2c_setup,
 };
 
+static struct lpss_device_desc bsw_spi_dev_desc = {
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+       .prv_offset = 0x400,
+       .setup = lpss_deassert_reset,
+};
+
 #else
 
 #define LPSS_ADDR(desc) (0UL)
@@ -198,7 +216,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        /* Braswell LPSS devices */
        { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
        { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
-       { "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
+       { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
        { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
 
        { "INT3430", LPSS_ADDR(lpt_dev_desc) },
@@ -298,7 +316,7 @@ out:
                return PTR_ERR(clk);
 
        pdata->clk = clk;
-       clk_register_clkdev(clk, NULL, devname);
+       clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
        return 0;
 }
 
index 982b67faaaf32c0de360aa35449a699075a6550a..a8dd2f7633822b05f5fdeeaffe5a00496f4710be 100644 (file)
@@ -680,7 +680,7 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
                /* Enable GPE for event processing (SCI_EVT=1) */
                if (!resuming)
                        acpi_ec_submit_request(ec);
-               pr_info("+++++ EC started +++++\n");
+               pr_debug("EC started\n");
        }
        spin_unlock_irqrestore(&ec->lock, flags);
 }
@@ -712,7 +712,7 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
                        acpi_ec_complete_request(ec);
                clear_bit(EC_FLAGS_STARTED, &ec->flags);
                clear_bit(EC_FLAGS_STOPPED, &ec->flags);
-               pr_info("+++++ EC stopped +++++\n");
+               pr_debug("EC stopped\n");
        }
        spin_unlock_irqrestore(&ec->lock, flags);
 }
index 4752b99399870efd068a1f1a6c656b1ebe333349..5589a6e2a02346e3b2ce48656b3facea1abfc621 100644 (file)
@@ -42,11 +42,13 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
         * CHECKME: len might be required to check versus a minimum
         * length as well. 1 for io is fine, but for memory it does
         * not make any sense at all.
+        * Note: some BIOSes report incorrect length for ACPI address space
+        * descriptor, so remove check of 'reslen == len' to avoid regression.
         */
-       if (len && reslen && reslen == len && start <= end)
+       if (len && reslen && start <= end)
                return true;
 
-       pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
+       pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
                io ? "io" : "mem", start, end, len);
 
        return false;
index 88a4f99dd2a7ccc117924a73b47ca0d5a93c8df5..26eb70c8f5184f878ae489c4130e1b382d7394a2 100644 (file)
@@ -540,6 +540,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
                },
        },
+       {
+        /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"),
+               },
+       },
 
        {
         /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
@@ -2101,7 +2110,8 @@ static int __init intel_opregion_present(void)
 
 int acpi_video_register(void)
 {
-       int result = 0;
+       int ret;
+
        if (register_count) {
                /*
                 * if the function of acpi_video_register is already called,
@@ -2113,9 +2123,9 @@ int acpi_video_register(void)
        mutex_init(&video_list_lock);
        INIT_LIST_HEAD(&video_bus_head);
 
-       result = acpi_bus_register_driver(&acpi_video_bus);
-       if (result < 0)
-               return -ENODEV;
+       ret = acpi_bus_register_driver(&acpi_video_bus);
+       if (ret)
+               return ret;
 
        /*
         * When the acpi_video_bus is loaded successfully, increase
@@ -2167,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
 
 static int __init acpi_video_init(void)
 {
+       /*
+        * Let the module load even if ACPI is disabled (e.g. due to
+        * a broken BIOS) so that i915.ko can still be loaded on such
+        * old systems without an AcpiOpRegion.
+        *
+        * acpi_video_register() will report -ENODEV later as well due
+        * to acpi_disabled when i915.ko tries to register itself afterwards.
+        */
+       if (acpi_disabled)
+               return 0;
+
        dmi_check_system(video_dmi_table);
 
        if (intel_opregion_present())
index 33b09b6568a4a464657b51667ffe8596c7893b2f..6607f3c6ace1033fd4ca449d579bd2b7638e3963 100644 (file)
@@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 {
        void *page_addr;
        unsigned long user_page_addr;
-       struct vm_struct tmp_area;
        struct page **page;
        struct mm_struct *mm;
 
@@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
                                proc->pid, page_addr);
                        goto err_alloc_page_failed;
                }
-               tmp_area.addr = page_addr;
-               tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
-               ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
-               if (ret) {
+               ret = map_kernel_range_noflush((unsigned long)page_addr,
+                                       PAGE_SIZE, PAGE_KERNEL, page);
+               flush_cache_vmap((unsigned long)page_addr,
+                               (unsigned long)page_addr + PAGE_SIZE);
+               if (ret != 1) {
                        pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                               proc->pid, page_addr);
                        goto err_map_kernel_failed;
index f9054cd36a7266bbed21db2503a63672d0d8040c..5389579c51204cf336f2689052ccf29c48b8af98 100644 (file)
@@ -869,6 +869,8 @@ try_offline_again:
         */
        ata_msleep(ap, 1);
 
+       sata_set_spd(link);
+
        /*
         * Now, bring the host controller online again, this can take time
         * as PHY reset and communication establishment, 1st D2H FIS and
index ba4abbe4693c3e29be764d66662295dd300d41c4..45937f88e77c88893f6f05430efcd2dd88449e9f 100644 (file)
@@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
 }
 
 static int pm_genpd_summary_one(struct seq_file *s,
-               struct generic_pm_domain *gpd)
+                               struct generic_pm_domain *genpd)
 {
        static const char * const status_lookup[] = {
                [GPD_STATE_ACTIVE] = "on",
@@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s,
        struct gpd_link *link;
        int ret;
 
-       ret = mutex_lock_interruptible(&gpd->lock);
+       ret = mutex_lock_interruptible(&genpd->lock);
        if (ret)
                return -ERESTARTSYS;
 
-       if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
+       if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
                goto exit;
-       seq_printf(s, "%-30s  %-15s  ", gpd->name, status_lookup[gpd->status]);
+       seq_printf(s, "%-30s  %-15s  ", genpd->name, status_lookup[genpd->status]);
 
        /*
         * Modifications on the list require holding locks on both
         * master and slave, so we are safe.
-        * Also gpd->name is immutable.
+        * Also genpd->name is immutable.
         */
-       list_for_each_entry(link, &gpd->master_links, master_node) {
+       list_for_each_entry(link, &genpd->master_links, master_node) {
                seq_printf(s, "%s", link->slave->name);
-               if (!list_is_last(&link->master_node, &gpd->master_links))
+               if (!list_is_last(&link->master_node, &genpd->master_links))
                        seq_puts(s, ", ");
        }
 
-       list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
+       list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
                kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
                if (kobj_path == NULL)
                        continue;
@@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s,
 
        seq_puts(s, "\n");
 exit:
-       mutex_unlock(&gpd->lock);
+       mutex_unlock(&genpd->lock);
 
        return 0;
 }
 
 static int pm_genpd_summary_show(struct seq_file *s, void *data)
 {
-       struct generic_pm_domain *gpd;
+       struct generic_pm_domain *genpd;
        int ret = 0;
 
        seq_puts(s, "    domain                      status         slaves\n");
@@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
        if (ret)
                return -ERESTARTSYS;
 
-       list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
-               ret = pm_genpd_summary_one(s, gpd);
+       list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+               ret = pm_genpd_summary_one(s, genpd);
                if (ret)
                        break;
        }
index c2744b30d5d92e9dde512e492cf9fdf44f21b5ef..aab7158d2afea87c5cee091eb2af3a1ae0a7222a 100644 (file)
@@ -730,6 +730,7 @@ void pm_system_wakeup(void)
        pm_abort_suspend = true;
        freeze_wake();
 }
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
 void pm_wakeup_clear(void)
 {
index d453a2c98ad0a6529170dc5a8f21bab9204255f7..81751a49d8bf2334612350bba52406b9af352258 100644 (file)
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        if (pos == 0) {
                memmove(blk + offset * map->cache_word_size,
                        blk, rbnode->blklen * map->cache_word_size);
-               bitmap_shift_right(present, present, offset, blklen);
+               bitmap_shift_left(present, present, offset, blklen);
        }
 
        /* update the rbnode block, its size and the base register */
index f373c35f9e1db239874589464b6375ebfe5d769a..da84f544c5443da0cf3928dd390ec81dcd77f669 100644 (file)
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
        for (i = start; i < end; i++) {
                regtmp = block_base + (i * map->reg_stride);
 
-               if (!regcache_reg_present(cache_present, i))
+               if (!regcache_reg_present(cache_present, i) ||
+                   !regmap_writeable(map, regtmp))
                        continue;
 
                val = regcache_get_val(map, block, i);
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
        for (i = start; i < end; i++) {
                regtmp = block_base + (i * map->reg_stride);
 
-               if (!regcache_reg_present(cache_present, i)) {
+               if (!regcache_reg_present(cache_present, i) ||
+                   !regmap_writeable(map, regtmp)) {
                        ret = regcache_sync_block_raw_flush(map, &data,
                                                            base, regtmp);
                        if (ret != 0)
index 6299a50a59607f6d5598eac804817cef47e8c106..a6c3f75b4b01e1145c6eb5bb9816182a0f9c9fde 100644 (file)
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
                goto err_alloc;
        }
 
-       ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+       ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
+                                  irq_flags | IRQF_ONESHOT,
                                   chip->name, d);
        if (ret != 0) {
                dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
index cbdfbbf983927e85a4a83d94d20047f2fadf6357..ceb32dd52a6ca5a777e97541646866985251a9f0 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/t10-pi.h>
 #include <linux/types.h>
 #include <scsi/sg.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
+#define NVME_MINORS            (1U << MINORBITS)
 #define NVME_Q_DEPTH           1024
 #define NVME_AQ_DEPTH          64
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT          (admin_timeout * HZ)
 #define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
-#define IOD_TIMEOUT            (retry_time * HZ)
 
 static unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
-static unsigned char retry_time = 30;
-module_param(retry_time, byte, 0644);
-MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
-
 static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown")
 static int nvme_major;
 module_param(nvme_major, int, 0);
 
+static int nvme_char_major;
+module_param(nvme_char_major, int, 0);
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -76,7 +76,8 @@ static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
-static struct notifier_block nvme_nb;
+
+static struct class *nvme_class;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@ struct async_cmd_info {
  * commands and one for I/O commands).
  */
 struct nvme_queue {
-       struct llist_node node;
        struct device *q_dmadev;
        struct nvme_dev *dev;
        char irqname[24];       /* nvme4294967295-65535\0 */
@@ -482,6 +482,115 @@ static int nvme_error_status(u16 status)
        }
 }
 
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+       if (be32_to_cpu(pi->ref_tag) == v)
+               pi->ref_tag = cpu_to_be32(p);
+}
+
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+       if (be32_to_cpu(pi->ref_tag) == p)
+               pi->ref_tag = cpu_to_be32(v);
+}
+
+/**
+ * nvme_dif_remap - remaps ref tags to bip seed and physical lba
+ *
+ * The virtual start sector is the one that was originally submitted by the
+ * block layer.        Due to partitioning, MD/DM cloning, etc. the actual physical
+ * start sector may be different. Remap protection information to match the
+ * physical LBA on writes, and back to the original seed on reads.
+ *
+ * Type 0 and 3 do not have a ref tag, so no remapping required.
+ */
+static void nvme_dif_remap(struct request *req,
+                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+       struct nvme_ns *ns = req->rq_disk->private_data;
+       struct bio_integrity_payload *bip;
+       struct t10_pi_tuple *pi;
+       void *p, *pmap;
+       u32 i, nlb, ts, phys, virt;
+
+       if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
+               return;
+
+       bip = bio_integrity(req->bio);
+       if (!bip)
+               return;
+
+       pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
+       if (!pmap)
+               return;
+
+       p = pmap;
+       virt = bip_get_seed(bip);
+       phys = nvme_block_nr(ns, blk_rq_pos(req));
+       nlb = (blk_rq_bytes(req) >> ns->lba_shift);
+       ts = ns->disk->integrity->tuple_size;
+
+       for (i = 0; i < nlb; i++, virt++, phys++) {
+               pi = (struct t10_pi_tuple *)p;
+               dif_swap(phys, virt, pi);
+               p += ts;
+       }
+       kunmap_atomic(pmap);
+}
+
+static int nvme_noop_verify(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+static int nvme_noop_generate(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+struct blk_integrity nvme_meta_noop = {
+       .name                   = "NVME_META_NOOP",
+       .generate_fn            = nvme_noop_generate,
+       .verify_fn              = nvme_noop_verify,
+};
+
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+       struct blk_integrity integrity;
+
+       switch (ns->pi_type) {
+       case NVME_NS_DPS_PI_TYPE3:
+               integrity = t10_pi_type3_crc;
+               break;
+       case NVME_NS_DPS_PI_TYPE1:
+       case NVME_NS_DPS_PI_TYPE2:
+               integrity = t10_pi_type1_crc;
+               break;
+       default:
+               integrity = nvme_meta_noop;
+               break;
+       }
+       integrity.tuple_size = ns->ms;
+       blk_integrity_register(ns->disk, &integrity);
+       blk_queue_max_integrity_segments(ns->queue, 1);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static void nvme_dif_remap(struct request *req,
+                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+}
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+}
+#endif
+
 static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
@@ -512,9 +621,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                        "completing aborted command with status:%04x\n",
                        status);
 
-       if (iod->nents)
+       if (iod->nents) {
                dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
                        rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               if (blk_integrity_rq(req)) {
+                       if (!rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_complete);
+                       dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
+                               rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               }
+       }
        nvme_free_iod(nvmeq->dev, iod);
 
        blk_mq_complete_request(req);
@@ -670,6 +786,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
        cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
        cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
        cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+       if (blk_integrity_rq(req)) {
+               cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+               switch (ns->pi_type) {
+               case NVME_NS_DPS_PI_TYPE3:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD;
+                       break;
+               case NVME_NS_DPS_PI_TYPE1:
+               case NVME_NS_DPS_PI_TYPE2:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD |
+                                       NVME_RW_PRINFO_PRCHK_REF;
+                       cmnd->rw.reftag = cpu_to_le32(
+                                       nvme_block_nr(ns, blk_rq_pos(req)));
+                       break;
+               }
+       } else if (ns->ms)
+               control |= NVME_RW_PRINFO_PRACT;
+
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
@@ -690,6 +824,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_iod *iod;
        enum dma_data_direction dma_dir;
 
+       /*
+        * If formated with metadata, require the block layer provide a buffer
+        * unless this namespace is formated such that the metadata can be
+        * stripped/generated by the controller with PRACT=1.
+        */
+       if (ns->ms && !blk_integrity_rq(req)) {
+               if (!(ns->pi_type && ns->ms == 8)) {
+                       req->errors = -EFAULT;
+                       blk_mq_complete_request(req);
+                       return BLK_MQ_RQ_QUEUE_OK;
+               }
+       }
+
        iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
        if (!iod)
                return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +872,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                                        iod->nents, dma_dir);
                        goto retry_cmd;
                }
+               if (blk_integrity_rq(req)) {
+                       if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
+                               goto error_cmd;
+
+                       sg_init_table(iod->meta_sg, 1);
+                       if (blk_rq_map_integrity_sg(
+                                       req->q, req->bio, iod->meta_sg) != 1)
+                               goto error_cmd;
+
+                       if (rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_prep);
+
+                       if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
+                               goto error_cmd;
+               }
        }
 
        nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +979,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
        return IRQ_WAKE_THREAD;
 }
 
-static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
-                                                               cmd_info)
-{
-       spin_lock_irq(&nvmeq->q_lock);
-       cancel_cmd_info(cmd_info, NULL);
-       spin_unlock_irq(&nvmeq->q_lock);
-}
-
 struct sync_cmd_info {
        struct task_struct *task;
        u32 result;
@@ -847,7 +1001,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
 static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
                                                u32 *result, unsigned timeout)
 {
-       int ret;
        struct sync_cmd_info cmdinfo;
        struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +1012,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
 
        nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
 
-       set_current_state(TASK_KILLABLE);
-       ret = nvme_submit_cmd(nvmeq, cmd);
-       if (ret) {
-               nvme_finish_cmd(nvmeq, req->tag, NULL);
-               set_current_state(TASK_RUNNING);
-       }
-       ret = schedule_timeout(timeout);
-
-       /*
-        * Ensure that sync_completion has either run, or that it will
-        * never run.
-        */
-       nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
-
-       /*
-        * We never got the completion
-        */
-       if (cmdinfo.status == -EINTR)
-               return -EINTR;
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       nvme_submit_cmd(nvmeq, cmd);
+       schedule();
 
        if (result)
                *result = cmdinfo.result;
-
        return cmdinfo.status;
 }
 
@@ -1158,29 +1294,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       /*
-        * The aborted req will be completed on receiving the abort req.
-        * We enable the timer again. If hit twice, it'll cause a device reset,
-        * as the device then is in a faulty state.
-        */
-       int ret = BLK_EH_RESET_TIMER;
-
        dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
                                                        nvmeq->qid);
-
        spin_lock_irq(&nvmeq->q_lock);
-       if (!nvmeq->dev->initialized) {
-               /*
-                * Force cancelled command frees the request, which requires we
-                * return BLK_EH_NOT_HANDLED.
-                */
-               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
-               ret = BLK_EH_NOT_HANDLED;
-       } else
-               nvme_abort_req(req);
+       nvme_abort_req(req);
        spin_unlock_irq(&nvmeq->q_lock);
 
-       return ret;
+       /*
+        * The aborted req will be completed on receiving the abort req.
+        * We enable the timer again. If hit twice, it'll cause a device reset,
+        * as the device then is in a faulty state.
+        */
+       return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1358,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
        struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
 
        spin_lock_irq(&nvmeq->q_lock);
-       nvme_process_cq(nvmeq);
        if (hctx && hctx->tags)
                blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1380,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
        }
        if (!qid && dev->admin_q)
                blk_mq_freeze_queue_start(dev->admin_q);
-       nvme_clear_queue(nvmeq);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +2002,24 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
        return 0;
 }
 
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+       u32 logical_block_size = queue_logical_block_size(ns->queue);
+       ns->queue->limits.discard_zeroes_data = 0;
+       ns->queue->limits.discard_alignment = logical_block_size;
+       ns->queue->limits.discard_granularity = logical_block_size;
+       ns->queue->limits.max_discard_sectors = 0xffffffff;
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
 static int nvme_revalidate_disk(struct gendisk *disk)
 {
        struct nvme_ns *ns = disk->private_data;
        struct nvme_dev *dev = ns->dev;
        struct nvme_id_ns *id;
        dma_addr_t dma_addr;
-       int lbaf;
+       int lbaf, pi_type, old_ms;
+       unsigned short bs;
 
        id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
                                                                GFP_KERNEL);
@@ -1890,16 +2028,51 @@ static int nvme_revalidate_disk(struct gendisk *disk)
                                                                __func__);
                return 0;
        }
+       if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
+               dev_warn(&dev->pci_dev->dev,
+                       "identify failed ns:%d, setting capacity to 0\n",
+                       ns->ns_id);
+               memset(id, 0, sizeof(*id));
+       }
 
-       if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
-               goto free;
-
-       lbaf = id->flbas & 0xf;
+       old_ms = ns->ms;
+       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
        ns->lba_shift = id->lbaf[lbaf].ds;
+       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+
+       /*
+        * If identify namespace failed, use default 512 byte block size so
+        * block layer can use before failing read/write for 0 capacity.
+        */
+       if (ns->lba_shift == 0)
+               ns->lba_shift = 9;
+       bs = 1 << ns->lba_shift;
+
+       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
+       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
+                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
+
+       if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
+                               ns->ms != old_ms ||
+                               bs != queue_logical_block_size(disk->queue) ||
+                               (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
+               blk_integrity_unregister(disk);
+
+       ns->pi_type = pi_type;
+       blk_queue_logical_block_size(ns->queue, bs);
+
+       if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
+                               !(id->flbas & NVME_NS_FLBAS_META_EXT))
+               nvme_init_integrity(ns);
+
+       if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
+               set_capacity(disk, 0);
+       else
+               set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+       if (dev->oncs & NVME_CTRL_ONCS_DSM)
+               nvme_config_discard(ns);
 
-       blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
- free:
        dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
        return 0;
 }
@@ -1923,8 +2096,7 @@ static int nvme_kthread(void *data)
                spin_lock(&dev_list_lock);
                list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
-                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
-                                                       dev->initialized) {
+                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
                                if (work_busy(&dev->reset_work))
                                        continue;
                                list_del_init(&dev->node);
@@ -1956,30 +2128,16 @@ static int nvme_kthread(void *data)
        return 0;
 }
 
-static void nvme_config_discard(struct nvme_ns *ns)
-{
-       u32 logical_block_size = queue_logical_block_size(ns->queue);
-       ns->queue->limits.discard_zeroes_data = 0;
-       ns->queue->limits.discard_alignment = logical_block_size;
-       ns->queue->limits.discard_granularity = logical_block_size;
-       ns->queue->limits.max_discard_sectors = 0xffffffff;
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
-}
-
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
-                       struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
 {
        struct nvme_ns *ns;
        struct gendisk *disk;
        int node = dev_to_node(&dev->pci_dev->dev);
-       int lbaf;
-
-       if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
-               return NULL;
 
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
-               return NULL;
+               return;
+
        ns->queue = blk_mq_init_queue(&dev->tagset);
        if (IS_ERR(ns->queue))
                goto out_free_ns;
@@ -1995,9 +2153,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
 
        ns->ns_id = nsid;
        ns->disk = disk;
-       lbaf = id->flbas & 0xf;
-       ns->lba_shift = id->lbaf[lbaf].ds;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+       ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+       list_add_tail(&ns->list, &dev->namespaces);
+
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        if (dev->max_hw_sectors)
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2169,26 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        disk->fops = &nvme_fops;
        disk->private_data = ns;
        disk->queue = ns->queue;
-       disk->driverfs_dev = &dev->pci_dev->dev;
+       disk->driverfs_dev = dev->device;
        disk->flags = GENHD_FL_EXT_DEVT;
        sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
-       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
-       if (dev->oncs & NVME_CTRL_ONCS_DSM)
-               nvme_config_discard(ns);
-
-       return ns;
 
+       /*
+        * Initialize capacity to 0 until we establish the namespace format and
+        * setup integrity extentions if necessary. The revalidate_disk after
+        * add_disk allows the driver to register with integrity if the format
+        * requires it.
+        */
+       set_capacity(disk, 0);
+       nvme_revalidate_disk(ns->disk);
+       add_disk(ns->disk);
+       if (ns->ms)
+               revalidate_disk(ns->disk);
+       return;
  out_free_queue:
        blk_cleanup_queue(ns->queue);
  out_free_ns:
        kfree(ns);
-       return NULL;
 }
 
 static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2313,20 @@ static int nvme_dev_add(struct nvme_dev *dev)
        struct pci_dev *pdev = dev->pci_dev;
        int res;
        unsigned nn, i;
-       struct nvme_ns *ns;
        struct nvme_id_ctrl *ctrl;
-       struct nvme_id_ns *id_ns;
        void *mem;
        dma_addr_t dma_addr;
        int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-       mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
+       mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
        res = nvme_identify(dev, 0, 1, dma_addr);
        if (res) {
                dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
-               res = -EIO;
-               goto out;
+               dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+               return -EIO;
        }
 
        ctrl = mem;
@@ -2191,6 +2352,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
                } else
                        dev->max_hw_sectors = max_hw_sectors;
        }
+       dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
 
        dev->tagset.ops = &nvme_mq_ops;
        dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2365,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
        dev->tagset.driver_data = dev;
 
        if (blk_mq_alloc_tag_set(&dev->tagset))
-               goto out;
-
-       id_ns = mem;
-       for (i = 1; i <= nn; i++) {
-               res = nvme_identify(dev, i, 0, dma_addr);
-               if (res)
-                       continue;
-
-               if (id_ns->ncap == 0)
-                       continue;
-
-               res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
-                                                       dma_addr + 4096, NULL);
-               if (res)
-                       memset(mem + 4096, 0, 4096);
+               return 0;
 
-               ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
-               if (ns)
-                       list_add_tail(&ns->list, &dev->namespaces);
-       }
-       list_for_each_entry(ns, &dev->namespaces, list)
-               add_disk(ns->disk);
-       res = 0;
+       for (i = 1; i <= nn; i++)
+               nvme_alloc_ns(dev, i);
 
- out:
-       dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
-       return res;
+       return 0;
 }
 
 static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2499,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
        struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
-       nvme_clear_queue(nvmeq);
        nvme_put_dq(dq);
 }
 
@@ -2502,7 +2641,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        int i;
        u32 csts = -1;
 
-       dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
        if (dev->bar) {
@@ -2513,7 +2651,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
                        nvme_suspend_queue(nvmeq);
-                       nvme_clear_queue(nvmeq);
                }
        } else {
                nvme_disable_io_queues(dev);
@@ -2521,6 +2658,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                nvme_disable_queue(dev, 0);
        }
        nvme_dev_unmap(dev);
+
+       for (i = dev->queue_count - 1; i >= 0; i--)
+               nvme_clear_queue(dev->queues[i]);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2668,11 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        struct nvme_ns *ns;
 
        list_for_each_entry(ns, &dev->namespaces, list) {
-               if (ns->disk->flags & GENHD_FL_UP)
+               if (ns->disk->flags & GENHD_FL_UP) {
+                       if (blk_get_integrity(ns->disk))
+                               blk_integrity_unregister(ns->disk);
                        del_gendisk(ns->disk);
+               }
                if (!blk_queue_dying(ns->queue)) {
                        blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
@@ -2611,6 +2754,7 @@ static void nvme_free_dev(struct kref *kref)
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
        pci_dev_put(dev->pci_dev);
+       put_device(dev->device);
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2766,27 @@ static void nvme_free_dev(struct kref *kref)
 
 static int nvme_dev_open(struct inode *inode, struct file *f)
 {
-       struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
-                                                               miscdev);
-       kref_get(&dev->kref);
-       f->private_data = dev;
-       return 0;
+       struct nvme_dev *dev;
+       int instance = iminor(inode);
+       int ret = -ENODEV;
+
+       spin_lock(&dev_list_lock);
+       list_for_each_entry(dev, &dev_list, node) {
+               if (dev->instance == instance) {
+                       if (!dev->admin_q) {
+                               ret = -EWOULDBLOCK;
+                               break;
+                       }
+                       if (!kref_get_unless_zero(&dev->kref))
+                               break;
+                       f->private_data = dev;
+                       ret = 0;
+                       break;
+               }
+       }
+       spin_unlock(&dev_list_lock);
+
+       return ret;
 }
 
 static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2928,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                nvme_unfreeze_queues(dev);
                nvme_set_irq_hints(dev);
        }
-       dev->initialized = 1;
        return 0;
 }
 
@@ -2799,6 +2958,7 @@ static void nvme_reset_workfn(struct work_struct *work)
        dev->reset_workfn(work);
 }
 
+static void nvme_async_probe(struct work_struct *work);
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
@@ -2834,37 +2994,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto release;
 
        kref_init(&dev->kref);
-       result = nvme_dev_start(dev);
-       if (result)
+       dev->device = device_create(nvme_class, &pdev->dev,
+                               MKDEV(nvme_char_major, dev->instance),
+                               dev, "nvme%d", dev->instance);
+       if (IS_ERR(dev->device)) {
+               result = PTR_ERR(dev->device);
                goto release_pools;
+       }
+       get_device(dev->device);
 
-       if (dev->online_queues > 1)
-               result = nvme_dev_add(dev);
-       if (result)
-               goto shutdown;
-
-       scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
-       dev->miscdev.minor = MISC_DYNAMIC_MINOR;
-       dev->miscdev.parent = &pdev->dev;
-       dev->miscdev.name = dev->name;
-       dev->miscdev.fops = &nvme_dev_fops;
-       result = misc_register(&dev->miscdev);
-       if (result)
-               goto remove;
-
-       nvme_set_irq_hints(dev);
-
-       dev->initialized = 1;
+       INIT_WORK(&dev->probe_work, nvme_async_probe);
+       schedule_work(&dev->probe_work);
        return 0;
 
- remove:
-       nvme_dev_remove(dev);
-       nvme_dev_remove_admin(dev);
-       nvme_free_namespaces(dev);
- shutdown:
-       nvme_dev_shutdown(dev);
  release_pools:
-       nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
  release:
        nvme_release_instance(dev);
@@ -2877,6 +3020,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return result;
 }
 
+static void nvme_async_probe(struct work_struct *work)
+{
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
+       int result;
+
+       result = nvme_dev_start(dev);
+       if (result)
+               goto reset;
+
+       if (dev->online_queues > 1)
+               result = nvme_dev_add(dev);
+       if (result)
+               goto reset;
+
+       nvme_set_irq_hints(dev);
+       return;
+ reset:
+       if (!work_busy(&dev->reset_work)) {
+               dev->reset_workfn = nvme_reset_failed_dev;
+               queue_work(nvme_workq, &dev->reset_work);
+       }
+}
+
 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3068,12 @@ static void nvme_remove(struct pci_dev *pdev)
        spin_unlock(&dev_list_lock);
 
        pci_set_drvdata(pdev, NULL);
+       flush_work(&dev->probe_work);
        flush_work(&dev->reset_work);
-       misc_deregister(&dev->miscdev);
        nvme_dev_shutdown(dev);
        nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
+       device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
        nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3157,26 @@ static int __init nvme_init(void)
        else if (result > 0)
                nvme_major = result;
 
+       result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
+                                                       &nvme_dev_fops);
+       if (result < 0)
+               goto unregister_blkdev;
+       else if (result > 0)
+               nvme_char_major = result;
+
+       nvme_class = class_create(THIS_MODULE, "nvme");
+       if (!nvme_class)
+               goto unregister_chrdev;
+
        result = pci_register_driver(&nvme_driver);
        if (result)
-               goto unregister_blkdev;
+               goto destroy_class;
        return 0;
 
+ destroy_class:
+       class_destroy(nvme_class);
+ unregister_chrdev:
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
  unregister_blkdev:
        unregister_blkdev(nvme_major, "nvme");
  kill_workq:
@@ -3005,9 +3187,10 @@ static int __init nvme_init(void)
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       unregister_hotcpu_notifier(&nvme_nb);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
+       class_destroy(nvme_class);
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
        _nvme_check_size();
 }
index 5e78568026c339da939a33acd54cbd80891c5a10..e10196e0182d450667cf886421e9475c218f30c8 100644 (file)
@@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_dev *dev = ns->dev;
        dma_addr_t dma_addr;
        void *mem;
-       struct nvme_id_ctrl *id_ctrl;
        int res = SNTI_TRANSLATION_SUCCESS;
        int nvme_sc;
-       u8 ieee[4];
        int xfer_len;
        __be32 tmp_id = cpu_to_be32(ns->ns_id);
 
@@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out_dma;
        }
 
-       /* nvme controller identify */
-       nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
-       res = nvme_trans_status_code(hdr, nvme_sc);
-       if (res)
-               goto out_free;
-       if (nvme_sc) {
-               res = nvme_sc;
-               goto out_free;
-       }
-       id_ctrl = mem;
-
-       /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
-       ieee[0] = id_ctrl->ieee[0] << 4;
-       ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
-       ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
-       ieee[3] = id_ctrl->ieee[2] >> 4;
-
-       memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+       memset(inq_response, 0, alloc_len);
        inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
-       inq_response[3] = 20;      /* Page Length */
-       /* Designation Descriptor start */
-       inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
-       inq_response[5] = 0x03;    /* PIV=0b | Asso=00b | Designator Type=3h */
-       inq_response[6] = 0x00;    /* Rsvd */
-       inq_response[7] = 16;      /* Designator Length */
-       /* Designator start */
-       inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
-       inq_response[9] = ieee[2];        /* IEEE ID */
-       inq_response[10] = ieee[1];       /* IEEE ID */
-       inq_response[11] = ieee[0];       /* IEEE ID| Vendor Specific ID... */
-       inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
-       inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
-       inq_response[14] = dev->serial[0];
-       inq_response[15] = dev->serial[1];
-       inq_response[16] = dev->model[0];
-       inq_response[17] = dev->model[1];
-       memcpy(&inq_response[18], &tmp_id, sizeof(u32));
-       /* Last 2 bytes are zero */
+       if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
+               struct nvme_id_ns *id_ns = mem;
+               void *eui = id_ns->eui64;
+               int len = sizeof(id_ns->eui64);
 
-       xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+               nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+               res = nvme_trans_status_code(hdr, nvme_sc);
+               if (res)
+                       goto out_free;
+               if (nvme_sc) {
+                       res = nvme_sc;
+                       goto out_free;
+               }
+
+               if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
+                       if (bitmap_empty(eui, len * 8)) {
+                               eui = id_ns->nguid;
+                               len = sizeof(id_ns->nguid);
+                       }
+               }
+               if (bitmap_empty(eui, len * 8))
+                       goto scsi_string;
+
+               inq_response[3] = 4 + len; /* Page Length */
+               /* Designation Descriptor start */
+               inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
+               inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
+               inq_response[6] = 0x00;    /* Rsvd */
+               inq_response[7] = len;     /* Designator Length */
+               memcpy(&inq_response[8], eui, len);
+       } else {
+ scsi_string:
+               if (alloc_len < 72) {
+                       res = nvme_trans_completion(hdr,
+                                       SAM_STAT_CHECK_CONDITION,
+                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+                       goto out_free;
+               }
+               inq_response[3] = 0x48;    /* Page Length */
+               /* Designation Descriptor start */
+               inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
+               inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
+               inq_response[6] = 0x00;    /* Rsvd */
+               inq_response[7] = 0x44;    /* Designator Length */
+
+               sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
+               memcpy(&inq_response[12], dev->model, sizeof(dev->model));
+               sprintf(&inq_response[52], "%04x", tmp_id);
+               memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
+       }
+       xfer_len = alloc_len;
        res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
@@ -1600,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
                /* 10 Byte CDB */
                *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
                        parm_list[MODE_SELECT_10_BD_OFFSET + 1];
-               *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+               *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
                                MODE_SELECT_10_LLBAA_MASK;
        } else {
                /* 6 Byte CDB */
@@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        page_code = GET_INQ_PAGE_CODE(cmd);
        alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
 
-       inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+       inq_response = kmalloc(alloc_len, GFP_KERNEL);
        if (inq_response == NULL) {
                res = -ENOMEM;
                goto out_mem;
index 8e233edd7a097a0f91d323071c97f16e43308e5d..871bd3550cb0e0842296771412e749225542ffac 100644 (file)
@@ -528,7 +528,7 @@ out_cleanup:
 static inline void update_used_max(struct zram *zram,
                                        const unsigned long pages)
 {
-       int old_max, cur_max;
+       unsigned long old_max, cur_max;
 
        old_max = atomic_long_read(&zram->stats.max_used_pages);
 
index b876888811432a9bad46ab73a32ca40b04ed2ce4..8bfc4c2bba87b61f46dfbb4778463f393226f735 100644 (file)
@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
 
        /* Intel Bluetooth devices */
+       { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
        { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
index ec318bf434a6c3d890d26060a9c388295bde807e..1786574536b21ef06415ae62ff14bdbfd77b3f2c 100644 (file)
@@ -157,12 +157,16 @@ static int ipmi_release(struct inode *inode, struct file *file)
 {
        struct ipmi_file_private *priv = file->private_data;
        int                      rv;
+       struct  ipmi_recv_msg *msg, *next;
 
        rv = ipmi_destroy_user(priv->user);
        if (rv)
                return rv;
 
-       /* FIXME - free the messages in the list. */
+       list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+               ipmi_free_recv_msg(msg);
+
+
        kfree(priv);
 
        return 0;
index 6b65fa4e0c5586895df2b26ee499c9e5ad4d8b2c..9bb592872532b1853efb00930c001e54df5fa7ed 100644 (file)
@@ -1483,14 +1483,10 @@ static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
        smi_msg->msgid = msgid;
 }
 
-static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
-                    struct ipmi_smi_msg *smi_msg, int priority)
+static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
+                                            struct ipmi_smi_msg *smi_msg,
+                                            int priority)
 {
-       int run_to_completion = intf->run_to_completion;
-       unsigned long flags;
-
-       if (!run_to_completion)
-               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
        if (intf->curr_msg) {
                if (priority > 0)
                        list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
@@ -1500,8 +1496,25 @@ static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
        } else {
                intf->curr_msg = smi_msg;
        }
-       if (!run_to_completion)
+
+       return smi_msg;
+}
+
+
+static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+                    struct ipmi_smi_msg *smi_msg, int priority)
+{
+       int run_to_completion = intf->run_to_completion;
+
+       if (run_to_completion) {
+               smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+       } else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+               smi_msg = smi_add_send_msg(intf, smi_msg, priority);
                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+       }
 
        if (smi_msg)
                handlers->sender(intf->send_info, smi_msg);
@@ -1985,7 +1998,9 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
        seq_printf(m, "%x", intf->channels[0].address);
        for (i = 1; i < IPMI_MAX_CHANNELS; i++)
                seq_printf(m, " %x", intf->channels[i].address);
-       return seq_putc(m, '\n');
+       seq_putc(m, '\n');
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2004,9 +2019,11 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
 {
        ipmi_smi_t intf = m->private;
 
-       return seq_printf(m, "%u.%u\n",
-                      ipmi_version_major(&intf->bmc->id),
-                      ipmi_version_minor(&intf->bmc->id));
+       seq_printf(m, "%u.%u\n",
+                  ipmi_version_major(&intf->bmc->id),
+                  ipmi_version_minor(&intf->bmc->id));
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_version_proc_open(struct inode *inode, struct file *file)
@@ -2353,11 +2370,28 @@ static struct attribute *bmc_dev_attrs[] = {
        &dev_attr_additional_device_support.attr,
        &dev_attr_manufacturer_id.attr,
        &dev_attr_product_id.attr,
+       &dev_attr_aux_firmware_revision.attr,
+       &dev_attr_guid.attr,
        NULL
 };
 
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+                                      struct attribute *attr, int idx)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct bmc_device *bmc = to_bmc_device(dev);
+       umode_t mode = attr->mode;
+
+       if (attr == &dev_attr_aux_firmware_revision.attr)
+               return bmc->id.aux_firmware_revision_set ? mode : 0;
+       if (attr == &dev_attr_guid.attr)
+               return bmc->guid_set ? mode : 0;
+       return mode;
+}
+
 static struct attribute_group bmc_dev_attr_group = {
        .attrs          = bmc_dev_attrs,
+       .is_visible     = bmc_dev_attr_is_visible,
 };
 
 static const struct attribute_group *bmc_dev_attr_groups[] = {
@@ -2380,13 +2414,6 @@ cleanup_bmc_device(struct kref *ref)
 {
        struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
 
-       if (bmc->id.aux_firmware_revision_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_aux_firmware_revision);
-       if (bmc->guid_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_guid);
-
        platform_device_unregister(&bmc->pdev);
 }
 
@@ -2407,33 +2434,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
        mutex_unlock(&ipmidriver_mutex);
 }
 
-static int create_bmc_files(struct bmc_device *bmc)
-{
-       int err;
-
-       if (bmc->id.aux_firmware_revision_set) {
-               err = device_create_file(&bmc->pdev.dev,
-                                        &dev_attr_aux_firmware_revision);
-               if (err)
-                       goto out;
-       }
-       if (bmc->guid_set) {
-               err = device_create_file(&bmc->pdev.dev,
-                                        &dev_attr_guid);
-               if (err)
-                       goto out_aux_firm;
-       }
-
-       return 0;
-
-out_aux_firm:
-       if (bmc->id.aux_firmware_revision_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_aux_firmware_revision);
-out:
-       return err;
-}
-
 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
 {
        int               rv;
@@ -2522,15 +2522,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
                        return rv;
                }
 
-               rv = create_bmc_files(bmc);
-               if (rv) {
-                       mutex_lock(&ipmidriver_mutex);
-                       platform_device_unregister(&bmc->pdev);
-                       mutex_unlock(&ipmidriver_mutex);
-
-                       return rv;
-               }
-
                dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
                         "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
                         bmc->id.manufacturer_id,
@@ -4212,7 +4203,6 @@ static void need_waiter(ipmi_smi_t intf)
 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
 
-/* FIXME - convert these to slabs. */
 static void free_smi_msg(struct ipmi_smi_msg *msg)
 {
        atomic_dec(&smi_msg_inuse_count);
index 967b73aa4e66d31481d6ae9488ab567530661f76..f6646ed3047e09a3656b089491f0afaa14982af1 100644 (file)
@@ -321,6 +321,18 @@ static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
 static void cleanup_ipmi_si(void);
 
+#ifdef DEBUG_TIMING
+void debug_timestamp(char *msg)
+{
+       struct timespec64 t;
+
+       getnstimeofday64(&t);
+       pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(x)
+#endif
+
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
 {
@@ -358,9 +370,6 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 {
        int              rv;
-#ifdef DEBUG_TIMING
-       struct timeval t;
-#endif
 
        if (!smi_info->waiting_msg) {
                smi_info->curr_msg = NULL;
@@ -370,10 +379,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 
                smi_info->curr_msg = smi_info->waiting_msg;
                smi_info->waiting_msg = NULL;
-#ifdef DEBUG_TIMING
-               do_gettimeofday(&t);
-               printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+               debug_timestamp("Start2");
                err = atomic_notifier_call_chain(&xaction_notifier_list,
                                0, smi_info);
                if (err & NOTIFY_STOP_MASK) {
@@ -582,12 +588,8 @@ static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
 static void handle_transaction_done(struct smi_info *smi_info)
 {
        struct ipmi_smi_msg *msg;
-#ifdef DEBUG_TIMING
-       struct timeval t;
 
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Done");
        switch (smi_info->si_state) {
        case SI_NORMAL:
                if (!smi_info->curr_msg)
@@ -929,24 +931,15 @@ static void sender(void                *send_info,
        struct smi_info   *smi_info = send_info;
        enum si_sm_result result;
        unsigned long     flags;
-#ifdef DEBUG_TIMING
-       struct timeval    t;
-#endif
-
-       BUG_ON(smi_info->waiting_msg);
-       smi_info->waiting_msg = msg;
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Enqueue");
 
        if (smi_info->run_to_completion) {
                /*
                 * If we are running to completion, start it and run
                 * transactions until everything is clear.
                 */
-               smi_info->curr_msg = smi_info->waiting_msg;
+               smi_info->curr_msg = msg;
                smi_info->waiting_msg = NULL;
 
                /*
@@ -964,6 +957,15 @@ static void sender(void                *send_info,
        }
 
        spin_lock_irqsave(&smi_info->si_lock, flags);
+       /*
+        * The following two lines don't need to be under the lock for
+        * the lock's sake, but they do need SMP memory barriers to
+        * avoid getting things out of order.  We are already claiming
+        * the lock, anyway, so just do it under the lock to avoid the
+        * ordering problem.
+        */
+       BUG_ON(smi_info->waiting_msg);
+       smi_info->waiting_msg = msg;
        check_start_timer_thread(smi_info);
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
@@ -989,18 +991,18 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
  * we are spinning in kipmid looking for something and not delaying
  * between checks
  */
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
+static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
 {
        ts->tv_nsec = -1;
 }
-static inline int ipmi_si_is_busy(struct timespec *ts)
+static inline int ipmi_si_is_busy(struct timespec64 *ts)
 {
        return ts->tv_nsec != -1;
 }
 
 static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
                                        const struct smi_info *smi_info,
-                                       struct timespec *busy_until)
+                                       struct timespec64 *busy_until)
 {
        unsigned int max_busy_us = 0;
 
@@ -1009,12 +1011,13 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
                ipmi_si_set_not_busy(busy_until);
        else if (!ipmi_si_is_busy(busy_until)) {
-               getnstimeofday(busy_until);
-               timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+               getnstimeofday64(busy_until);
+               timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
        } else {
-               struct timespec now;
-               getnstimeofday(&now);
-               if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+               struct timespec64 now;
+
+               getnstimeofday64(&now);
+               if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
                        ipmi_si_set_not_busy(busy_until);
                        return 0;
                }
@@ -1037,7 +1040,7 @@ static int ipmi_thread(void *data)
        struct smi_info *smi_info = data;
        unsigned long flags;
        enum si_sm_result smi_result;
-       struct timespec busy_until;
+       struct timespec64 busy_until;
 
        ipmi_si_set_not_busy(&busy_until);
        set_user_nice(current, MAX_NICE);
@@ -1128,15 +1131,10 @@ static void smi_timeout(unsigned long data)
        unsigned long     jiffies_now;
        long              time_diff;
        long              timeout;
-#ifdef DEBUG_TIMING
-       struct timeval    t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Timer");
+
        jiffies_now = jiffies;
        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
                     * SI_USEC_PER_JIFFY);
@@ -1173,18 +1171,13 @@ static irqreturn_t si_irq_handler(int irq, void *data)
 {
        struct smi_info *smi_info = data;
        unsigned long   flags;
-#ifdef DEBUG_TIMING
-       struct timeval  t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Interrupt");
+
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
        return IRQ_HANDLED;
@@ -2038,18 +2031,13 @@ static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
 {
        struct smi_info *smi_info = context;
        unsigned long   flags;
-#ifdef DEBUG_TIMING
-       struct timeval t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("ACPI_GPE");
+
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 
@@ -2071,7 +2059,6 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
        if (!info->irq)
                return 0;
 
-       /* FIXME - is level triggered right? */
        status = acpi_install_gpe_handler(NULL,
                                          info->irq,
                                          ACPI_GPE_LEVEL_TRIGGERED,
@@ -2998,7 +2985,9 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
 {
        struct smi_info *smi = m->private;
 
-       return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+       seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3060,16 +3049,18 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
 {
        struct smi_info *smi = m->private;
 
-       return seq_printf(m,
-                      "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
-                      si_to_str[smi->si_type],
-                      addr_space_to_str[smi->io.addr_type],
-                      smi->io.addr_data,
-                      smi->io.regspacing,
-                      smi->io.regsize,
-                      smi->io.regshift,
-                      smi->irq,
-                      smi->slave_addr);
+       seq_printf(m,
+                  "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+                  si_to_str[smi->si_type],
+                  addr_space_to_str[smi->io.addr_type],
+                  smi->io.addr_data,
+                  smi->io.regspacing,
+                  smi->io.regsize,
+                  smi->io.regshift,
+                  smi->irq,
+                  smi->slave_addr);
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_params_proc_open(struct inode *inode, struct file *file)
index 982b96323f823b8402ede2ceec7c0cb85c042ec4..f6e378dac5f5b1031530839d5967ab96607f1f7b 100644 (file)
@@ -1097,8 +1097,6 @@ static int ssif_remove(struct i2c_client *client)
        if (!ssif_info)
                return 0;
 
-       i2c_set_clientdata(client, NULL);
-
        /*
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
@@ -1198,7 +1196,9 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
 
 static int smi_type_proc_show(struct seq_file *m, void *v)
 {
-       return seq_puts(m, "ssif\n");
+       seq_puts(m, "ssif\n");
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
index 1d278ccd751f07002e6f6c2d43ad07e030a29bff..e096e9cddb4014f6896be0341ace9ed61803da34 100644 (file)
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
 {
        int rc;
 
-       rc = device_add(&chip->dev);
+       rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
        if (rc) {
                dev_err(&chip->dev,
-                       "unable to device_register() %s, major %d, minor %d, err=%d\n",
+                       "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
                        chip->devname, MAJOR(chip->dev.devt),
                        MINOR(chip->dev.devt), rc);
 
+               device_unregister(&chip->dev);
                return rc;
        }
 
-       rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
+       rc = device_add(&chip->dev);
        if (rc) {
                dev_err(&chip->dev,
-                       "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+                       "unable to device_register() %s, major %d, minor %d, err=%d\n",
                        chip->devname, MAJOR(chip->dev.devt),
                        MINOR(chip->dev.devt), rc);
 
-               device_unregister(&chip->dev);
                return rc;
        }
 
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
  * tpm_chip_register() - create a character device for the TPM chip
  * @chip: TPM chip to use.
  *
- * Creates a character device for the TPM chip and adds sysfs interfaces for
- * the device, PPI and TCPA. As the last step this function adds the
- * chip to the list of TPM chips available for use.
+ * Creates a character device for the TPM chip and adds sysfs attributes for
+ * the device. As the last step this function adds the chip to the list of TPM
+ * chips available for in-kernel use.
  *
- * NOTE: This function should be only called after the chip initialization
- * is complete.
- *
- * Called from tpm_<specific>.c probe function only for devices
- * the driver has determined it should claim.  Prior to calling
- * this function the specific probe function has called pci_enable_device
- * upon errant exit from this function specific probe function should call
- * pci_disable_device
+ * This function should be only called after the chip initialization is
+ * complete.
  */
 int tpm_chip_register(struct tpm_chip *chip)
 {
        int rc;
 
-       rc = tpm_dev_add_device(chip);
-       if (rc)
-               return rc;
-
        /* Populate sysfs for TPM1 devices. */
        if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
                rc = tpm_sysfs_add_device(chip);
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip)
                chip->bios_dir = tpm_bios_log_setup(chip->devname);
        }
 
+       rc = tpm_dev_add_device(chip);
+       if (rc)
+               return rc;
+
        /* Make the chip available. */
        spin_lock(&driver_lock);
        list_add_rcu(&chip->list, &tpm_chip_list);
index b1e53e3aece5639e622dcced99ba2cd8396b8af7..42ffa5e7a1e0f6c912a0e214e67a3a99d1511a08 100644 (file)
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct ibmvtpm_dev *ibmvtpm;
        struct ibmvtpm_crq crq;
-       u64 *word = (u64 *) &crq;
+       __be64 *word = (__be64 *)&crq;
        int rc;
 
        ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
        memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
        crq.valid = (u8)IBMVTPM_VALID_CMD;
        crq.msg = (u8)VTPM_TPM_COMMAND;
-       crq.len = (u16)count;
-       crq.data = ibmvtpm->rtce_dma_handle;
+       crq.len = cpu_to_be16(count);
+       crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
 
-       rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
-                             cpu_to_be64(word[1]));
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
+                             be64_to_cpu(word[1]));
        if (rc != H_SUCCESS) {
                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
                rc = 0;
index f595f14426bf1fcf50f1f95f6e062b3e9fa22e22..6af92890518f8be2bed63503f6170ddbdb1476d2 100644 (file)
@@ -22,9 +22,9 @@
 struct ibmvtpm_crq {
        u8 valid;
        u8 msg;
-       u16 len;
-       u32 data;
-       u64 reserved;
+       __be16 len;
+       __be32 data;
+       __be64 reserved;
 } __attribute__((packed, aligned(8)));
 
 struct ibmvtpm_crq_queue {
index fae2dbbf57459fe4f4ac01bb0415a02abbda9e18..72d7028f779b55801795ef0fe49d9dda8cde7970 100644 (file)
@@ -142,6 +142,7 @@ struct ports_device {
         * notification
         */
        struct work_struct control_work;
+       struct work_struct config_work;
 
        struct list_head ports;
 
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev)
 
        portdev = vdev->priv;
 
+       if (!use_multiport(portdev))
+               schedule_work(&portdev->config_work);
+}
+
+static void config_work_handler(struct work_struct *work)
+{
+       struct ports_device *portdev;
+
+       portdev = container_of(work, struct ports_device, control_work);
        if (!use_multiport(portdev)) {
+               struct virtio_device *vdev;
                struct port *port;
                u16 rows, cols;
 
+               vdev = portdev->vdev;
                virtio_cread(vdev, struct virtio_console_config, cols, &cols);
                virtio_cread(vdev, struct virtio_console_config, rows, &rows);
 
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev)
 
        virtio_device_ready(portdev->vdev);
 
+       INIT_WORK(&portdev->config_work, &config_work_handler);
+       INIT_WORK(&portdev->control_work, &control_work_handler);
+
        if (multiport) {
                unsigned int nr_added_bufs;
 
                spin_lock_init(&portdev->c_ivq_lock);
                spin_lock_init(&portdev->c_ovq_lock);
-               INIT_WORK(&portdev->control_work, &control_work_handler);
 
                nr_added_bufs = fill_queue(portdev->c_ivq,
                                           &portdev->c_ivq_lock);
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev)
        /* Finish up work that's lined up */
        if (use_multiport(portdev))
                cancel_work_sync(&portdev->control_work);
+       else
+               cancel_work_sync(&portdev->config_work);
 
        list_for_each_entry_safe(port, port2, &portdev->ports, list)
                unplug_port(port);
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
 
        virtqueue_disable_cb(portdev->c_ivq);
        cancel_work_sync(&portdev->control_work);
+       cancel_work_sync(&portdev->config_work);
        /*
         * Once more: if control_work_handler() was running, it would
         * enable the cb as the last step.
index 91f86131bb7aa62b0c4632e2defa1f8b2f4e6abc..0b474a04730fe4d2c588cb1a3a1818dbff61f995 100644 (file)
@@ -102,12 +102,12 @@ config COMMON_CLK_AXI_CLKGEN
          Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
          FPGAs. It is commonly used in Analog Devices' reference designs.
 
-config CLK_PPC_CORENET
-       bool "Clock driver for PowerPC corenet platforms"
-       depends on PPC_E500MC && OF
+config CLK_QORIQ
+       bool "Clock driver for Freescale QorIQ platforms"
+       depends on (PPC_E500MC || ARM) && OF
        ---help---
-         This adds the clock driver support for Freescale PowerPC corenet
-         platforms using common clock framework.
+         This adds the clock driver support for Freescale QorIQ platforms
+         using common clock framework.
 
 config COMMON_CLK_XGENE
        bool "Clock driver for APM XGene SoC"
@@ -135,6 +135,14 @@ config COMMON_CLK_PXA
        ---help---
          Sypport for the Marvell PXA SoC.
 
+config COMMON_CLK_CDCE706
+       tristate "Clock driver for TI CDCE706 clock synthesizer"
+       depends on I2C
+       select REGMAP_I2C
+       select RATIONAL
+       ---help---
+         This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
index d5fba5bc6e1bc1f07991be58f367c30a253a58f7..d478ceb69c5fc6b4a1bee49276b37763db36c79d 100644 (file)
@@ -16,9 +16,11 @@ endif
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_MACH_ASM9260)             += clk-asm9260.o
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)               += clk-axm5516.o
 obj-$(CONFIG_ARCH_BCM2835)             += clk-bcm2835.o
+obj-$(CONFIG_COMMON_CLK_CDCE706)       += clk-cdce706.o
 obj-$(CONFIG_ARCH_CLPS711X)            += clk-clps711x.o
 obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
 obj-$(CONFIG_ARCH_HIGHBANK)            += clk-highbank.o
@@ -30,7 +32,7 @@ obj-$(CONFIG_ARCH_MOXART)             += clk-moxart.o
 obj-$(CONFIG_ARCH_NOMADIK)             += clk-nomadik.o
 obj-$(CONFIG_ARCH_NSPIRE)              += clk-nspire.o
 obj-$(CONFIG_COMMON_CLK_PALMAS)                += clk-palmas.o
-obj-$(CONFIG_CLK_PPC_CORENET)          += clk-ppc-corenet.o
+obj-$(CONFIG_CLK_QORIQ)                        += clk-qoriq.o
 obj-$(CONFIG_COMMON_CLK_RK808)         += clk-rk808.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)       += clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)                += clk-si5351.o
index bbdb1b985c9146a5e82013fd8d8ab20ea3ebc5fc..86c8a073dcc32a20b98f4c862d5622b11ffe1d57 100644 (file)
@@ -56,6 +56,8 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
 
 static long clk_programmable_determine_rate(struct clk_hw *hw,
                                            unsigned long rate,
+                                           unsigned long min_rate,
+                                           unsigned long max_rate,
                                            unsigned long *best_parent_rate,
                                            struct clk_hw **best_parent_hw)
 {
index f07c8152e5cc42aa660f5c83c6c6470ee2e68502..3f27d21fb7297e70494bf35743f027f8c8005d73 100644 (file)
@@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type)
        return 0;
 }
 
+static void pmc_irq_suspend(struct irq_data *d)
+{
+       struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+       pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
+       pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
+}
+
+static void pmc_irq_resume(struct irq_data *d)
+{
+       struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+       pmc_write(pmc, AT91_PMC_IER, pmc->imr);
+}
+
 static struct irq_chip pmc_irq = {
        .name = "PMC",
        .irq_disable = pmc_irq_mask,
        .irq_mask = pmc_irq_mask,
        .irq_unmask = pmc_irq_unmask,
        .irq_set_type = pmc_irq_set_type,
+       .irq_suspend = pmc_irq_suspend,
+       .irq_resume = pmc_irq_resume,
 };
 
 static struct lock_class_key pmc_lock_class;
@@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
                goto out_free_pmc;
 
        pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
-       if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+       if (request_irq(pmc->virq, pmc_irq_handler,
+                       IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
                goto out_remove_irqdomain;
 
        return pmc;
index 52d2041fa3f6354a4abfdc8f45ee147a75a370ef..69abb08cf146513b0307a4a78449b2e5da971282 100644 (file)
@@ -33,6 +33,7 @@ struct at91_pmc {
        spinlock_t lock;
        const struct at91_pmc_caps *caps;
        struct irq_domain *irqdomain;
+       u32 imr;
 };
 
 static inline void pmc_lock(struct at91_pmc *pmc)
index 1c06f6f3a8c59959b90e90f554c048e1535a1893..05abae89262e20923f5f39321c33165bdab6ea7f 100644 (file)
@@ -1032,6 +1032,8 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate,
+               unsigned long max_rate,
                unsigned long *best_parent_rate, struct clk_hw **best_parent)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
new file mode 100644 (file)
index 0000000..88f4ff6
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2014 Oleksij Rempel <linux@rempel-privat.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/alphascale,asm9260.h>
+
+#define HW_AHBCLKCTRL0         0x0020
+#define HW_AHBCLKCTRL1         0x0030
+#define HW_SYSPLLCTRL          0x0100
+#define HW_MAINCLKSEL          0x0120
+#define HW_MAINCLKUEN          0x0124
+#define HW_UARTCLKSEL          0x0128
+#define HW_UARTCLKUEN          0x012c
+#define HW_I2S0CLKSEL          0x0130
+#define HW_I2S0CLKUEN          0x0134
+#define HW_I2S1CLKSEL          0x0138
+#define HW_I2S1CLKUEN          0x013c
+#define HW_WDTCLKSEL           0x0160
+#define HW_WDTCLKUEN           0x0164
+#define HW_CLKOUTCLKSEL                0x0170
+#define HW_CLKOUTCLKUEN                0x0174
+#define HW_CPUCLKDIV           0x017c
+#define HW_SYSAHBCLKDIV                0x0180
+#define HW_I2S0MCLKDIV         0x0190
+#define HW_I2S0SCLKDIV         0x0194
+#define HW_I2S1MCLKDIV         0x0188
+#define HW_I2S1SCLKDIV         0x018c
+#define HW_UART0CLKDIV         0x0198
+#define HW_UART1CLKDIV         0x019c
+#define HW_UART2CLKDIV         0x01a0
+#define HW_UART3CLKDIV         0x01a4
+#define HW_UART4CLKDIV         0x01a8
+#define HW_UART5CLKDIV         0x01ac
+#define HW_UART6CLKDIV         0x01b0
+#define HW_UART7CLKDIV         0x01b4
+#define HW_UART8CLKDIV         0x01b8
+#define HW_UART9CLKDIV         0x01bc
+#define HW_SPI0CLKDIV          0x01c0
+#define HW_SPI1CLKDIV          0x01c4
+#define HW_QUADSPICLKDIV       0x01c8
+#define HW_SSP0CLKDIV          0x01d0
+#define HW_NANDCLKDIV          0x01d4
+#define HW_TRACECLKDIV         0x01e0
+#define HW_CAMMCLKDIV          0x01e8
+#define HW_WDTCLKDIV           0x01ec
+#define HW_CLKOUTCLKDIV                0x01f4
+#define HW_MACCLKDIV           0x01f8
+#define HW_LCDCLKDIV           0x01fc
+#define HW_ADCANACLKDIV                0x0200
+
+static struct clk *clks[MAX_CLKS];
+static struct clk_onecell_data clk_data;
+static DEFINE_SPINLOCK(asm9260_clk_lock);
+
+struct asm9260_div_clk {
+       unsigned int idx;
+       const char *name;
+       const char *parent_name;
+       u32 reg;
+};
+
+struct asm9260_gate_data {
+       unsigned int idx;
+       const char *name;
+       const char *parent_name;
+       u32 reg;
+       u8 bit_idx;
+       unsigned long flags;
+};
+
+struct asm9260_mux_clock {
+       u8                      mask;
+       u32                     *table;
+       const char              *name;
+       const char              **parent_names;
+       u8                      num_parents;
+       unsigned long           offset;
+       unsigned long           flags;
+};
+
+static void __iomem *base;
+
+static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
+       { CLKID_SYS_CPU,        "cpu_div", "main_gate", HW_CPUCLKDIV },
+       { CLKID_SYS_AHB,        "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
+
+       /* i2s has two deviders: one for only external mclk and internal
+        * devider for all clks. */
+       { CLKID_SYS_I2S0M,      "i2s0m_div", "i2s0_mclk",  HW_I2S0MCLKDIV },
+       { CLKID_SYS_I2S1M,      "i2s1m_div", "i2s1_mclk",  HW_I2S1MCLKDIV },
+       { CLKID_SYS_I2S0S,      "i2s0s_div", "i2s0_gate",  HW_I2S0SCLKDIV },
+       { CLKID_SYS_I2S1S,      "i2s1s_div", "i2s0_gate",  HW_I2S1SCLKDIV },
+
+       { CLKID_SYS_UART0,      "uart0_div", "uart_gate", HW_UART0CLKDIV },
+       { CLKID_SYS_UART1,      "uart1_div", "uart_gate", HW_UART1CLKDIV },
+       { CLKID_SYS_UART2,      "uart2_div", "uart_gate", HW_UART2CLKDIV },
+       { CLKID_SYS_UART3,      "uart3_div", "uart_gate", HW_UART3CLKDIV },
+       { CLKID_SYS_UART4,      "uart4_div", "uart_gate", HW_UART4CLKDIV },
+       { CLKID_SYS_UART5,      "uart5_div", "uart_gate", HW_UART5CLKDIV },
+       { CLKID_SYS_UART6,      "uart6_div", "uart_gate", HW_UART6CLKDIV },
+       { CLKID_SYS_UART7,      "uart7_div", "uart_gate", HW_UART7CLKDIV },
+       { CLKID_SYS_UART8,      "uart8_div", "uart_gate", HW_UART8CLKDIV },
+       { CLKID_SYS_UART9,      "uart9_div", "uart_gate", HW_UART9CLKDIV },
+
+       { CLKID_SYS_SPI0,       "spi0_div",     "main_gate", HW_SPI0CLKDIV },
+       { CLKID_SYS_SPI1,       "spi1_div",     "main_gate", HW_SPI1CLKDIV },
+       { CLKID_SYS_QUADSPI,    "quadspi_div",  "main_gate", HW_QUADSPICLKDIV },
+       { CLKID_SYS_SSP0,       "ssp0_div",     "main_gate", HW_SSP0CLKDIV },
+       { CLKID_SYS_NAND,       "nand_div",     "main_gate", HW_NANDCLKDIV },
+       { CLKID_SYS_TRACE,      "trace_div",    "main_gate", HW_TRACECLKDIV },
+       { CLKID_SYS_CAMM,       "camm_div",     "main_gate", HW_CAMMCLKDIV },
+       { CLKID_SYS_MAC,        "mac_div",      "main_gate", HW_MACCLKDIV },
+       { CLKID_SYS_LCD,        "lcd_div",      "main_gate", HW_LCDCLKDIV },
+       { CLKID_SYS_ADCANA,     "adcana_div",   "main_gate", HW_ADCANACLKDIV },
+
+       { CLKID_SYS_WDT,        "wdt_div",      "wdt_gate",    HW_WDTCLKDIV },
+       { CLKID_SYS_CLKOUT,     "clkout_div",   "clkout_gate", HW_CLKOUTCLKDIV },
+};
+
+static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
+       { 0, "main_gate",       "main_mux",     HW_MAINCLKUEN,  0 },
+       { 0, "uart_gate",       "uart_mux",     HW_UARTCLKUEN,  0 },
+       { 0, "i2s0_gate",       "i2s0_mux",     HW_I2S0CLKUEN,  0 },
+       { 0, "i2s1_gate",       "i2s1_mux",     HW_I2S1CLKUEN,  0 },
+       { 0, "wdt_gate",        "wdt_mux",      HW_WDTCLKUEN,   0 },
+       { 0, "clkout_gate",     "clkout_mux",   HW_CLKOUTCLKUEN, 0 },
+};
+static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
+       /* ahb gates */
+       { CLKID_AHB_ROM,        "rom",          "ahb_div",
+               HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_RAM,        "ram",          "ahb_div",
+               HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_GPIO,       "gpio",         "ahb_div",
+               HW_AHBCLKCTRL0, 4 },
+       { CLKID_AHB_MAC,        "mac",          "ahb_div",
+               HW_AHBCLKCTRL0, 5 },
+       { CLKID_AHB_EMI,        "emi",          "ahb_div",
+               HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_USB0,       "usb0",         "ahb_div",
+               HW_AHBCLKCTRL0, 7 },
+       { CLKID_AHB_USB1,       "usb1",         "ahb_div",
+               HW_AHBCLKCTRL0, 8 },
+       { CLKID_AHB_DMA0,       "dma0",         "ahb_div",
+               HW_AHBCLKCTRL0, 9 },
+       { CLKID_AHB_DMA1,       "dma1",         "ahb_div",
+               HW_AHBCLKCTRL0, 10 },
+       { CLKID_AHB_UART0,      "uart0",        "ahb_div",
+               HW_AHBCLKCTRL0, 11 },
+       { CLKID_AHB_UART1,      "uart1",        "ahb_div",
+               HW_AHBCLKCTRL0, 12 },
+       { CLKID_AHB_UART2,      "uart2",        "ahb_div",
+               HW_AHBCLKCTRL0, 13 },
+       { CLKID_AHB_UART3,      "uart3",        "ahb_div",
+               HW_AHBCLKCTRL0, 14 },
+       { CLKID_AHB_UART4,      "uart4",        "ahb_div",
+               HW_AHBCLKCTRL0, 15 },
+       { CLKID_AHB_UART5,      "uart5",        "ahb_div",
+               HW_AHBCLKCTRL0, 16 },
+       { CLKID_AHB_UART6,      "uart6",        "ahb_div",
+               HW_AHBCLKCTRL0, 17 },
+       { CLKID_AHB_UART7,      "uart7",        "ahb_div",
+               HW_AHBCLKCTRL0, 18 },
+       { CLKID_AHB_UART8,      "uart8",        "ahb_div",
+               HW_AHBCLKCTRL0, 19 },
+       { CLKID_AHB_UART9,      "uart9",        "ahb_div",
+               HW_AHBCLKCTRL0, 20 },
+       { CLKID_AHB_I2S0,       "i2s0",         "ahb_div",
+               HW_AHBCLKCTRL0, 21 },
+       { CLKID_AHB_I2C0,       "i2c0",         "ahb_div",
+               HW_AHBCLKCTRL0, 22 },
+       { CLKID_AHB_I2C1,       "i2c1",         "ahb_div",
+               HW_AHBCLKCTRL0, 23 },
+       { CLKID_AHB_SSP0,       "ssp0",         "ahb_div",
+               HW_AHBCLKCTRL0, 24 },
+       { CLKID_AHB_IOCONFIG,   "ioconf",       "ahb_div",
+               HW_AHBCLKCTRL0, 25 },
+       { CLKID_AHB_WDT,        "wdt",          "ahb_div",
+               HW_AHBCLKCTRL0, 26 },
+       { CLKID_AHB_CAN0,       "can0",         "ahb_div",
+               HW_AHBCLKCTRL0, 27 },
+       { CLKID_AHB_CAN1,       "can1",         "ahb_div",
+               HW_AHBCLKCTRL0, 28 },
+       { CLKID_AHB_MPWM,       "mpwm",         "ahb_div",
+               HW_AHBCLKCTRL0, 29 },
+       { CLKID_AHB_SPI0,       "spi0",         "ahb_div",
+               HW_AHBCLKCTRL0, 30 },
+       { CLKID_AHB_SPI1,       "spi1",         "ahb_div",
+               HW_AHBCLKCTRL0, 31 },
+
+       { CLKID_AHB_QEI,        "qei",          "ahb_div",
+               HW_AHBCLKCTRL1, 0 },
+       { CLKID_AHB_QUADSPI0,   "quadspi0",     "ahb_div",
+               HW_AHBCLKCTRL1, 1 },
+       { CLKID_AHB_CAMIF,      "capmif",       "ahb_div",
+               HW_AHBCLKCTRL1, 2 },
+       { CLKID_AHB_LCDIF,      "lcdif",        "ahb_div",
+               HW_AHBCLKCTRL1, 3 },
+       { CLKID_AHB_TIMER0,     "timer0",       "ahb_div",
+               HW_AHBCLKCTRL1, 4 },
+       { CLKID_AHB_TIMER1,     "timer1",       "ahb_div",
+               HW_AHBCLKCTRL1, 5 },
+       { CLKID_AHB_TIMER2,     "timer2",       "ahb_div",
+               HW_AHBCLKCTRL1, 6 },
+       { CLKID_AHB_TIMER3,     "timer3",       "ahb_div",
+               HW_AHBCLKCTRL1, 7 },
+       { CLKID_AHB_IRQ,        "irq",          "ahb_div",
+               HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_RTC,        "rtc",          "ahb_div",
+               HW_AHBCLKCTRL1, 9 },
+       { CLKID_AHB_NAND,       "nand",         "ahb_div",
+               HW_AHBCLKCTRL1, 10 },
+       { CLKID_AHB_ADC0,       "adc0",         "ahb_div",
+               HW_AHBCLKCTRL1, 11 },
+       { CLKID_AHB_LED,        "led",          "ahb_div",
+               HW_AHBCLKCTRL1, 12 },
+       { CLKID_AHB_DAC0,       "dac0",         "ahb_div",
+               HW_AHBCLKCTRL1, 13 },
+       { CLKID_AHB_LCD,        "lcd",          "ahb_div",
+               HW_AHBCLKCTRL1, 14 },
+       { CLKID_AHB_I2S1,       "i2s1",         "ahb_div",
+               HW_AHBCLKCTRL1, 15 },
+       { CLKID_AHB_MAC1,       "mac1",         "ahb_div",
+               HW_AHBCLKCTRL1, 16 },
+};
+
+static const char __initdata *main_mux_p[] =   { NULL, NULL };
+static const char __initdata *i2s0_mux_p[] =   { NULL, NULL, "i2s0m_div"};
+static const char __initdata *i2s1_mux_p[] =   { NULL, NULL, "i2s1m_div"};
+static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
+static u32 three_mux_table[] = {0, 1, 3};
+
+static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
+       { 1, three_mux_table, "main_mux",       main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
+       { 1, three_mux_table, "uart_mux",       main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
+       { 1, three_mux_table, "wdt_mux",        main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
+       { 3, three_mux_table, "i2s0_mux",       i2s0_mux_p,
+               ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
+       { 3, three_mux_table, "i2s1_mux",       i2s1_mux_p,
+               ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
+       { 3, three_mux_table, "clkout_mux",     clkout_mux_p,
+               ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
+};
+
+static void __init asm9260_acc_init(struct device_node *np)
+{
+       struct clk *clk;
+       const char *ref_clk, *pll_clk = "pll";
+       u32 rate;
+       int n;
+       u32 accuracy = 0;
+
+       base = of_io_request_and_map(np, 0, np->name);
+       if (!base)
+               panic("%s: unable to map resource", np->name);
+
+       /* register pll */
+       rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
+
+       ref_clk = of_clk_get_parent_name(np, 0);
+       accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
+       clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
+                       ref_clk, 0, rate, accuracy);
+
+       if (IS_ERR(clk))
+               panic("%s: can't register REFCLK. Check DT!", np->name);
+
+       for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
+               const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
+
+               mc->parent_names[0] = ref_clk;
+               mc->parent_names[1] = pll_clk;
+               clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
+                               mc->num_parents, mc->flags, base + mc->offset,
+                               0, mc->mask, 0, mc->table, &asm9260_clk_lock);
+       }
+
+       /* clock mux gate cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
+               const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
+
+               clk = clk_register_gate(NULL, gd->name,
+                       gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
+                       base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
+       }
+
+       /* clock div cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
+               const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
+
+               clks[dc->idx] = clk_register_divider(NULL, dc->name,
+                               dc->parent_name, CLK_SET_RATE_PARENT,
+                               base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
+                               &asm9260_clk_lock);
+       }
+
+       /* clock ahb gate cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
+               const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
+
+               clks[gd->idx] = clk_register_gate(NULL, gd->name,
+                               gd->parent_name, gd->flags, base + gd->reg,
+                               gd->bit_idx, 0, &asm9260_clk_lock);
+       }
+
+       /* check for errors on leaf clocks */
+       for (n = 0; n < MAX_CLKS; n++) {
+               if (!IS_ERR(clks[n]))
+                       continue;
+
+               pr_err("%s: Unable to register leaf clock %d\n",
+                               np->full_name, n);
+               goto fail;
+       }
+
+       /* register clk-provider */
+       clk_data.clks = clks;
+       clk_data.clk_num = MAX_CLKS;
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       return;
+fail:
+       iounmap(base);
+}
+CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
+               asm9260_acc_init);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
new file mode 100644 (file)
index 0000000..c386ad2
--- /dev/null
@@ -0,0 +1,700 @@
+/*
+ * TI CDCE706 programmable 3-PLL clock synthesizer driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CDCE706_CLKIN_CLOCK            10
+#define CDCE706_CLKIN_SOURCE           11
+#define CDCE706_PLL_M_LOW(pll)         (1 + 3 * (pll))
+#define CDCE706_PLL_N_LOW(pll)         (2 + 3 * (pll))
+#define CDCE706_PLL_HI(pll)            (3 + 3 * (pll))
+#define CDCE706_PLL_MUX                        3
+#define CDCE706_PLL_FVCO               6
+#define CDCE706_DIVIDER(div)           (13 + (div))
+#define CDCE706_CLKOUT(out)            (19 + (out))
+
+#define CDCE706_CLKIN_CLOCK_MASK       0x10
+#define CDCE706_CLKIN_SOURCE_SHIFT     6
+#define CDCE706_CLKIN_SOURCE_MASK      0xc0
+#define CDCE706_CLKIN_SOURCE_LVCMOS    0x40
+
+#define CDCE706_PLL_MUX_MASK(pll)      (0x80 >> (pll))
+#define CDCE706_PLL_LOW_M_MASK         0xff
+#define CDCE706_PLL_LOW_N_MASK         0xff
+#define CDCE706_PLL_HI_M_MASK          0x1
+#define CDCE706_PLL_HI_N_MASK          0x1e
+#define CDCE706_PLL_HI_N_SHIFT         1
+#define CDCE706_PLL_M_MAX              0x1ff
+#define CDCE706_PLL_N_MAX              0xfff
+#define CDCE706_PLL_FVCO_MASK(pll)     (0x80 >> (pll))
+#define CDCE706_PLL_FREQ_MIN            80000000
+#define CDCE706_PLL_FREQ_MAX           300000000
+#define CDCE706_PLL_FREQ_HI            180000000
+
+#define CDCE706_DIVIDER_PLL(div)       (9 + (div) - ((div) > 2) - ((div) > 4))
+#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1))
+#define CDCE706_DIVIDER_PLL_MASK(div)  (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
+#define CDCE706_DIVIDER_DIVIDER_MASK   0x7f
+#define CDCE706_DIVIDER_DIVIDER_MAX    0x7f
+
+#define CDCE706_CLKOUT_DIVIDER_MASK    0x7
+#define CDCE706_CLKOUT_ENABLE_MASK     0x8
+
+static struct regmap_config cdce706_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
+
+struct cdce706_hw_data {
+       struct cdce706_dev_data *dev_data;
+       unsigned idx;
+       unsigned parent;
+       struct clk *clk;
+       struct clk_hw hw;
+       unsigned div;
+       unsigned mul;
+       unsigned mux;
+};
+
+struct cdce706_dev_data {
+       struct i2c_client *client;
+       struct regmap *regmap;
+       struct clk_onecell_data onecell;
+       struct clk *clks[6];
+       struct clk *clkin_clk[2];
+       const char *clkin_name[2];
+       struct cdce706_hw_data clkin[1];
+       struct cdce706_hw_data pll[3];
+       struct cdce706_hw_data divider[6];
+       struct cdce706_hw_data clkout[6];
+};
+
+static const char * const cdce706_source_name[] = {
+       "clk_in0", "clk_in1",
+};
+
+static const char *cdce706_clkin_name[] = {
+       "clk_in",
+};
+
+static const char * const cdce706_pll_name[] = {
+       "pll1", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_parent_name[] = {
+       "clk_in", "pll1", "pll2", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_name[] = {
+       "p0", "p1", "p2", "p3", "p4", "p5",
+};
+
+static const char * const cdce706_clkout_name[] = {
+       "clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
+};
+
+static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
+                           unsigned *val)
+{
+       int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error reading reg %u", reg);
+       return rc;
+}
+
+static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
+                            unsigned val)
+{
+       int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error writing reg %u", reg);
+       return rc;
+}
+
+static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
+                             unsigned mask, unsigned val)
+{
+       int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error updating reg %u", reg);
+       return rc;
+}
+
+static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       hwd->parent = index;
+       return 0;
+}
+
+static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static const struct clk_ops cdce706_clkin_ops = {
+       .set_parent = cdce706_clkin_set_parent,
+       .get_parent = cdce706_clkin_get_parent,
+};
+
+static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mux: %d, mul: %u, div: %u\n",
+               __func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
+
+       if (!hwd->mux) {
+               if (hwd->div && hwd->mul) {
+                       u64 res = (u64)parent_rate * hwd->mul;
+
+                       do_div(res, hwd->div);
+                       return res;
+               }
+       } else {
+               if (hwd->div)
+                       return parent_rate / hwd->div;
+       }
+       return 0;
+}
+
+static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long *parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       unsigned long mul, div;
+       u64 res;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, rate: %lu, parent_rate: %lu\n",
+               __func__, rate, *parent_rate);
+
+       rational_best_approximation(rate, *parent_rate,
+                                   CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
+                                   &mul, &div);
+       hwd->mul = mul;
+       hwd->div = div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mul: %lu, div: %lu\n",
+               __func__, hwd->idx, mul, div);
+
+       res = (u64)*parent_rate * hwd->mul;
+       do_div(res, hwd->div);
+       return res;
+}
+
+static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       unsigned long mul = hwd->mul, div = hwd->div;
+       int err;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mul: %lu, div: %lu\n",
+               __func__, hwd->idx, mul, div);
+
+       err = cdce706_reg_update(hwd->dev_data,
+                                CDCE706_PLL_HI(hwd->idx),
+                                CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
+                                ((div >> 8) & CDCE706_PLL_HI_M_MASK) |
+                                ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
+                                 CDCE706_PLL_HI_N_MASK));
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_write(hwd->dev_data,
+                               CDCE706_PLL_M_LOW(hwd->idx),
+                               div & CDCE706_PLL_LOW_M_MASK);
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_write(hwd->dev_data,
+                               CDCE706_PLL_N_LOW(hwd->idx),
+                               mul & CDCE706_PLL_LOW_N_MASK);
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_update(hwd->dev_data,
+                                CDCE706_PLL_FVCO,
+                                CDCE706_PLL_FVCO_MASK(hwd->idx),
+                                rate > CDCE706_PLL_FREQ_HI ?
+                                CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
+       return err;
+}
+
+static const struct clk_ops cdce706_pll_ops = {
+       .recalc_rate = cdce706_pll_recalc_rate,
+       .round_rate = cdce706_pll_round_rate,
+       .set_rate = cdce706_pll_set_rate,
+};
+
+static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       if (hwd->parent == index)
+               return 0;
+       hwd->parent = index;
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_DIVIDER_PLL(hwd->idx),
+                                 CDCE706_DIVIDER_PLL_MASK(hwd->idx),
+                                 index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
+}
+
+static u8 cdce706_divider_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
+                                                unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %u\n",
+               __func__, hwd->idx, hwd->div);
+       if (hwd->div)
+               return parent_rate / hwd->div;
+       return 0;
+}
+
+static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long *parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       struct cdce706_dev_data *cdce = hwd->dev_data;
+       unsigned long mul, div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, rate: %lu, parent_rate: %lu\n",
+               __func__, rate, *parent_rate);
+
+       rational_best_approximation(rate, *parent_rate,
+                                   1, CDCE706_DIVIDER_DIVIDER_MAX,
+                                   &mul, &div);
+       if (!mul)
+               div = CDCE706_DIVIDER_DIVIDER_MAX;
+
+       if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+               unsigned long best_diff = rate;
+               unsigned long best_div = 0;
+               struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
+               unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
+
+               for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
+                    div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
+                       unsigned long n, m;
+                       unsigned long diff;
+                       unsigned long div_rate;
+                       u64 div_rate64;
+
+                       if (rate * div < CDCE706_PLL_FREQ_MIN)
+                               continue;
+
+                       rational_best_approximation(rate * div, gp_rate,
+                                                   CDCE706_PLL_N_MAX,
+                                                   CDCE706_PLL_M_MAX,
+                                                   &n, &m);
+                       div_rate64 = (u64)gp_rate * n;
+                       do_div(div_rate64, m);
+                       do_div(div_rate64, div);
+                       div_rate = div_rate64;
+                       diff = max(div_rate, rate) - min(div_rate, rate);
+
+                       if (diff < best_diff) {
+                               best_diff = diff;
+                               best_div = div;
+                               dev_dbg(&hwd->dev_data->client->dev,
+                                       "%s, %lu * %lu / %lu / %lu = %lu\n",
+                                       __func__, gp_rate, n, m, div, div_rate);
+                       }
+               }
+
+               div = best_div;
+
+               dev_dbg(&hwd->dev_data->client->dev,
+                       "%s, altering parent rate: %lu -> %lu\n",
+                       __func__, *parent_rate, rate * div);
+               *parent_rate = rate * div;
+       }
+       hwd->div = div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %lu\n",
+               __func__, hwd->idx, div);
+
+       return *parent_rate / div;
+}
+
+static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %u\n",
+               __func__, hwd->idx, hwd->div);
+
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_DIVIDER(hwd->idx),
+                                 CDCE706_DIVIDER_DIVIDER_MASK,
+                                 hwd->div);
+}
+
+static const struct clk_ops cdce706_divider_ops = {
+       .set_parent = cdce706_divider_set_parent,
+       .get_parent = cdce706_divider_get_parent,
+       .recalc_rate = cdce706_divider_recalc_rate,
+       .round_rate = cdce706_divider_round_rate,
+       .set_rate = cdce706_divider_set_rate,
+};
+
+static int cdce706_clkout_prepare(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+                                 CDCE706_CLKOUT_ENABLE_MASK,
+                                 CDCE706_CLKOUT_ENABLE_MASK);
+}
+
+static void cdce706_clkout_unprepare(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+                          CDCE706_CLKOUT_ENABLE_MASK, 0);
+}
+
+static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       if (hwd->parent == index)
+               return 0;
+       hwd->parent = index;
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_CLKOUT(hwd->idx),
+                                 CDCE706_CLKOUT_ENABLE_MASK, index);
+}
+
+static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       return parent_rate;
+}
+
+static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+                                     unsigned long *parent_rate)
+{
+       *parent_rate = rate;
+       return rate;
+}
+
+static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       return 0;
+}
+
+static const struct clk_ops cdce706_clkout_ops = {
+       .prepare = cdce706_clkout_prepare,
+       .unprepare = cdce706_clkout_unprepare,
+       .set_parent = cdce706_clkout_set_parent,
+       .get_parent = cdce706_clkout_get_parent,
+       .recalc_rate = cdce706_clkout_recalc_rate,
+       .round_rate = cdce706_clkout_round_rate,
+       .set_rate = cdce706_clkout_set_rate,
+};
+
+static int cdce706_register_hw(struct cdce706_dev_data *cdce,
+                              struct cdce706_hw_data *hw, unsigned num_hw,
+                              const char * const *clk_names,
+                              struct clk_init_data *init)
+{
+       unsigned i;
+
+       for (i = 0; i < num_hw; ++i, ++hw) {
+               init->name = clk_names[i];
+               hw->dev_data = cdce;
+               hw->idx = i;
+               hw->hw.init = init;
+               hw->clk = devm_clk_register(&cdce->client->dev,
+                                           &hw->hw);
+               if (IS_ERR(hw->clk)) {
+                       dev_err(&cdce->client->dev, "Failed to register %s\n",
+                               clk_names[i]);
+                       return PTR_ERR(hw->clk);
+               }
+       }
+       return 0;
+}
+
+static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_clkin_ops,
+               .parent_names = cdce->clkin_name,
+               .num_parents = ARRAY_SIZE(cdce->clkin_name),
+       };
+       unsigned i;
+       int ret;
+       unsigned clock, source;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
+               struct clk *parent = devm_clk_get(&cdce->client->dev,
+                                                 cdce706_source_name[i]);
+
+               if (IS_ERR(parent)) {
+                       cdce->clkin_name[i] = cdce706_source_name[i];
+               } else {
+                       cdce->clkin_name[i] = __clk_get_name(parent);
+                       cdce->clkin_clk[i] = parent;
+               }
+       }
+
+       ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
+       if (ret < 0)
+               return ret;
+       if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
+           CDCE706_CLKIN_SOURCE_LVCMOS) {
+               ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
+               if (ret < 0)
+                       return ret;
+               cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->clkin,
+                                 ARRAY_SIZE(cdce->clkin),
+                                 cdce706_clkin_name, &init);
+       return ret;
+}
+
+static int cdce706_register_plls(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_pll_ops,
+               .parent_names = cdce706_clkin_name,
+               .num_parents = ARRAY_SIZE(cdce706_clkin_name),
+       };
+       unsigned i;
+       int ret;
+       unsigned mux;
+
+       ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
+               unsigned m, n, v;
+
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
+               if (ret < 0)
+                       return ret;
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
+               if (ret < 0)
+                       return ret;
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
+               if (ret < 0)
+                       return ret;
+               cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
+               cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
+                                       (8 - CDCE706_PLL_HI_N_SHIFT));
+               cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
+                       cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->pll,
+                                 ARRAY_SIZE(cdce->pll),
+                                 cdce706_pll_name, &init);
+       return ret;
+}
+
+static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_divider_ops,
+               .parent_names = cdce706_divider_parent_name,
+               .num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
+               .flags = CLK_SET_RATE_PARENT,
+       };
+       unsigned i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
+               unsigned val;
+
+               ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->divider[i].parent =
+                       (val & CDCE706_DIVIDER_PLL_MASK(i)) >>
+                       CDCE706_DIVIDER_PLL_SHIFT(i);
+
+               ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, parent: %u, div: %u\n", __func__, i,
+                       cdce->divider[i].parent, cdce->divider[i].div);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->divider,
+                                 ARRAY_SIZE(cdce->divider),
+                                 cdce706_divider_name, &init);
+       return ret;
+}
+
+static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_clkout_ops,
+               .parent_names = cdce706_divider_name,
+               .num_parents = ARRAY_SIZE(cdce706_divider_name),
+               .flags = CLK_SET_RATE_PARENT,
+       };
+       unsigned i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
+               unsigned val;
+
+               ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, parent: %u\n", __func__, i,
+                       cdce->clkout[i].parent);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->clkout,
+                                 ARRAY_SIZE(cdce->clkout),
+                                 cdce706_clkout_name, &init);
+       for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
+               cdce->clks[i] = cdce->clkout[i].clk;
+
+       return ret;
+}
+
+static int cdce706_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+       struct cdce706_dev_data *cdce;
+       int ret;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -EIO;
+
+       cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
+       if (!cdce)
+               return -ENOMEM;
+
+       cdce->client = client;
+       cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
+       if (IS_ERR(cdce->regmap)) {
+               dev_err(&client->dev, "Failed to initialize regmap\n");
+               return -EINVAL;
+       }
+
+       i2c_set_clientdata(client, cdce);
+
+       ret = cdce706_register_clkin(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_plls(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_dividers(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_clkouts(cdce);
+       if (ret < 0)
+               return ret;
+       cdce->onecell.clks = cdce->clks;
+       cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
+       ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+                                 &cdce->onecell);
+
+       return ret;
+}
+
+static int cdce706_remove(struct i2c_client *client)
+{
+       return 0;
+}
+
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdce706_dt_match[] = {
+       { .compatible = "ti,cdce706" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, cdce706_dt_match);
+#endif
+
+static const struct i2c_device_id cdce706_id[] = {
+       { "cdce706", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, cdce706_id);
+
+static struct i2c_driver cdce706_i2c_driver = {
+       .driver = {
+               .name   = "cdce706",
+               .of_match_table = of_match_ptr(cdce706_dt_match),
+       },
+       .probe          = cdce706_probe,
+       .remove         = cdce706_remove,
+       .id_table       = cdce706_id,
+};
+module_i2c_driver(cdce706_i2c_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
+MODULE_LICENSE("GPL");
index 4386697236a78dc23aea66d0c4792873d558f71e..956b7e54fa1c5f4f3583ac642c3f3f2ae255fee2 100644 (file)
@@ -27,7 +27,7 @@ static u8 clk_composite_get_parent(struct clk_hw *hw)
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *mux_hw = composite->mux_hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->get_parent(mux_hw);
 }
@@ -38,7 +38,7 @@ static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *mux_hw = composite->mux_hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->set_parent(mux_hw, index);
 }
@@ -50,12 +50,14 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->recalc_rate(rate_hw, parent_rate);
 }
 
 static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_p)
 {
@@ -72,8 +74,10 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
        int i;
 
        if (rate_hw && rate_ops && rate_ops->determine_rate) {
-               rate_hw->clk = hw->clk;
-               return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+               __clk_hw_set_clk(rate_hw, hw);
+               return rate_ops->determine_rate(rate_hw, rate, min_rate,
+                                               max_rate,
+                                               best_parent_rate,
                                                best_parent_p);
        } else if (rate_hw && rate_ops && rate_ops->round_rate &&
                   mux_hw && mux_ops && mux_ops->set_parent) {
@@ -116,8 +120,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
 
                return best_rate;
        } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
-               mux_hw->clk = hw->clk;
-               return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+               __clk_hw_set_clk(mux_hw, hw);
+               return mux_ops->determine_rate(mux_hw, rate, min_rate,
+                                              max_rate, best_parent_rate,
                                               best_parent_p);
        } else {
                pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
@@ -132,7 +137,7 @@ static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->round_rate(rate_hw, rate, prate);
 }
@@ -144,7 +149,7 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->set_rate(rate_hw, rate, parent_rate);
 }
@@ -155,7 +160,7 @@ static int clk_composite_is_enabled(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->is_enabled(gate_hw);
 }
@@ -166,7 +171,7 @@ static int clk_composite_enable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->enable(gate_hw);
 }
@@ -177,7 +182,7 @@ static void clk_composite_disable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        gate_ops->disable(gate_hw);
 }
index c0a842b335c520c6c28f08308a1b62a743038dd3..25006a8bb8e6d5af8d145472fc76e2ef45281f09 100644 (file)
@@ -30,7 +30,7 @@
 
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
-#define div_mask(d)    ((1 << ((d)->width)) - 1)
+#define div_mask(width)        ((1 << (width)) - 1)
 
 static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
 {
@@ -54,15 +54,16 @@ static unsigned int _get_table_mindiv(const struct clk_div_table *table)
        return mindiv;
 }
 
-static unsigned int _get_maxdiv(struct clk_divider *divider)
+static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
+                               unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
-               return div_mask(divider);
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
-               return 1 << div_mask(divider);
-       if (divider->table)
-               return _get_table_maxdiv(divider->table);
-       return div_mask(divider) + 1;
+       if (flags & CLK_DIVIDER_ONE_BASED)
+               return div_mask(width);
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << div_mask(width);
+       if (table)
+               return _get_table_maxdiv(table);
+       return div_mask(width) + 1;
 }
 
 static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -76,14 +77,15 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+static unsigned int _get_div(const struct clk_div_table *table,
+                            unsigned int val, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+       if (flags & CLK_DIVIDER_ONE_BASED)
                return val;
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return 1 << val;
-       if (divider->table)
-               return _get_table_div(divider->table, val);
+       if (table)
+               return _get_table_div(table, val);
        return val + 1;
 }
 
@@ -98,29 +100,28 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
+static unsigned int _get_val(const struct clk_div_table *table,
+                            unsigned int div, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+       if (flags & CLK_DIVIDER_ONE_BASED)
                return div;
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return __ffs(div);
-       if (divider->table)
-               return  _get_table_val(divider->table, div);
+       if (table)
+               return  _get_table_val(table, div);
        return div - 1;
 }
 
-static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+                                 unsigned int val,
+                                 const struct clk_div_table *table,
+                                 unsigned long flags)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
-       unsigned int div, val;
-
-       val = clk_readl(divider->reg) >> divider->shift;
-       val &= div_mask(divider);
+       unsigned int div;
 
-       div = _get_div(divider, val);
+       div = _get_div(table, val, flags);
        if (!div) {
-               WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+               WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
                        "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
                        __clk_get_name(hw->clk));
                return parent_rate;
@@ -128,12 +129,20 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
 
        return DIV_ROUND_UP(parent_rate, div);
 }
+EXPORT_SYMBOL_GPL(divider_recalc_rate);
 
-/*
- * The reverse of DIV_ROUND_UP: The maximum number which
- * divided by m is r
- */
-#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int val;
+
+       val = clk_readl(divider->reg) >> divider->shift;
+       val &= div_mask(divider->width);
+
+       return divider_recalc_rate(hw, parent_rate, val, divider->table,
+                                  divider->flags);
+}
 
 static bool _is_valid_table_div(const struct clk_div_table *table,
                                                         unsigned int div)
@@ -146,12 +155,13 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
        return false;
 }
 
-static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
+                         unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return is_power_of_2(div);
-       if (divider->table)
-               return _is_valid_table_div(divider->table, div);
+       if (table)
+               return _is_valid_table_div(table, div);
        return true;
 }
 
@@ -191,71 +201,81 @@ static int _round_down_table(const struct clk_div_table *table, int div)
        return down;
 }
 
-static int _div_round_up(struct clk_divider *divider,
-               unsigned long parent_rate, unsigned long rate)
+static int _div_round_up(const struct clk_div_table *table,
+                        unsigned long parent_rate, unsigned long rate,
+                        unsigned long flags)
 {
        int div = DIV_ROUND_UP(parent_rate, rate);
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                div = __roundup_pow_of_two(div);
-       if (divider->table)
-               div = _round_up_table(divider->table, div);
+       if (table)
+               div = _round_up_table(table, div);
 
        return div;
 }
 
-static int _div_round_closest(struct clk_divider *divider,
-               unsigned long parent_rate, unsigned long rate)
+static int _div_round_closest(const struct clk_div_table *table,
+                             unsigned long parent_rate, unsigned long rate,
+                             unsigned long flags)
 {
-       int up, down, div;
-
-       up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
-
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) {
-               up = __roundup_pow_of_two(div);
-               down = __rounddown_pow_of_two(div);
-       } else if (divider->table) {
-               up = _round_up_table(divider->table, div);
-               down = _round_down_table(divider->table, div);
+       int up, down;
+       unsigned long up_rate, down_rate;
+
+       up = DIV_ROUND_UP(parent_rate, rate);
+       down = parent_rate / rate;
+
+       if (flags & CLK_DIVIDER_POWER_OF_TWO) {
+               up = __roundup_pow_of_two(up);
+               down = __rounddown_pow_of_two(down);
+       } else if (table) {
+               up = _round_up_table(table, up);
+               down = _round_down_table(table, down);
        }
 
-       return (up - div) <= (div - down) ? up : down;
+       up_rate = DIV_ROUND_UP(parent_rate, up);
+       down_rate = DIV_ROUND_UP(parent_rate, down);
+
+       return (rate - up_rate) <= (down_rate - rate) ? up : down;
 }
 
-static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
-               unsigned long rate)
+static int _div_round(const struct clk_div_table *table,
+                     unsigned long parent_rate, unsigned long rate,
+                     unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
-               return _div_round_closest(divider, parent_rate, rate);
+       if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+               return _div_round_closest(table, parent_rate, rate, flags);
 
-       return _div_round_up(divider, parent_rate, rate);
+       return _div_round_up(table, parent_rate, rate, flags);
 }
 
-static bool _is_best_div(struct clk_divider *divider,
-               unsigned long rate, unsigned long now, unsigned long best)
+static bool _is_best_div(unsigned long rate, unsigned long now,
+                        unsigned long best, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
+       if (flags & CLK_DIVIDER_ROUND_CLOSEST)
                return abs(rate - now) < abs(rate - best);
 
        return now <= rate && now > best;
 }
 
-static int _next_div(struct clk_divider *divider, int div)
+static int _next_div(const struct clk_div_table *table, int div,
+                    unsigned long flags)
 {
        div++;
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return __roundup_pow_of_two(div);
-       if (divider->table)
-               return _round_up_table(divider->table, div);
+       if (table)
+               return _round_up_table(table, div);
 
        return div;
 }
 
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
-               unsigned long *best_parent_rate)
+                              unsigned long *best_parent_rate,
+                              const struct clk_div_table *table, u8 width,
+                              unsigned long flags)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
        int i, bestdiv = 0;
        unsigned long parent_rate, best = 0, now, maxdiv;
        unsigned long parent_rate_saved = *best_parent_rate;
@@ -263,19 +283,11 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (!rate)
                rate = 1;
 
-       /* if read only, just return current value */
-       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
-               bestdiv = readl(divider->reg) >> divider->shift;
-               bestdiv &= div_mask(divider);
-               bestdiv = _get_div(divider, bestdiv);
-               return bestdiv;
-       }
-
-       maxdiv = _get_maxdiv(divider);
+       maxdiv = _get_maxdiv(table, width, flags);
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
                parent_rate = *best_parent_rate;
-               bestdiv = _div_round(divider, parent_rate, rate);
+               bestdiv = _div_round(table, parent_rate, rate, flags);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
                return bestdiv;
@@ -287,8 +299,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
         */
        maxdiv = min(ULONG_MAX / rate, maxdiv);
 
-       for (i = 1; i <= maxdiv; i = _next_div(divider, i)) {
-               if (!_is_valid_div(divider, i))
+       for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
+               if (!_is_valid_div(table, i, flags))
                        continue;
                if (rate * i == parent_rate_saved) {
                        /*
@@ -300,9 +312,9 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
                        return i;
                }
                parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
-                               MULT_ROUND_UP(rate, i));
+                                              rate * i);
                now = DIV_ROUND_UP(parent_rate, i);
-               if (_is_best_div(divider, rate, now, best)) {
+               if (_is_best_div(rate, now, best, flags)) {
                        bestdiv = i;
                        best = now;
                        *best_parent_rate = parent_rate;
@@ -310,48 +322,79 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        }
 
        if (!bestdiv) {
-               bestdiv = _get_maxdiv(divider);
+               bestdiv = _get_maxdiv(table, width, flags);
                *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
        }
 
        return bestdiv;
 }
 
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long *prate)
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long *prate, const struct clk_div_table *table,
+                       u8 width, unsigned long flags)
 {
        int div;
-       div = clk_divider_bestdiv(hw, rate, prate);
+
+       div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
 
        return DIV_ROUND_UP(*prate, div);
 }
+EXPORT_SYMBOL_GPL(divider_round_rate);
 
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long parent_rate)
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long *prate)
 {
        struct clk_divider *divider = to_clk_divider(hw);
+       int bestdiv;
+
+       /* if read only, just return current value */
+       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+               bestdiv = readl(divider->reg) >> divider->shift;
+               bestdiv &= div_mask(divider->width);
+               bestdiv = _get_div(divider->table, bestdiv, divider->flags);
+               return DIV_ROUND_UP(*prate, bestdiv);
+       }
+
+       return divider_round_rate(hw, rate, prate, divider->table,
+                                 divider->width, divider->flags);
+}
+
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+                   const struct clk_div_table *table, u8 width,
+                   unsigned long flags)
+{
        unsigned int div, value;
-       unsigned long flags = 0;
-       u32 val;
 
        div = DIV_ROUND_UP(parent_rate, rate);
 
-       if (!_is_valid_div(divider, div))
+       if (!_is_valid_div(table, div, flags))
                return -EINVAL;
 
-       value = _get_val(divider, div);
+       value = _get_val(table, div, flags);
 
-       if (value > div_mask(divider))
-               value = div_mask(divider);
+       return min_t(unsigned int, value, div_mask(width));
+}
+EXPORT_SYMBOL_GPL(divider_get_val);
+
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int value;
+       unsigned long flags = 0;
+       u32 val;
+
+       value = divider_get_val(rate, parent_rate, divider->table,
+                               divider->width, divider->flags);
 
        if (divider->lock)
                spin_lock_irqsave(divider->lock, flags);
 
        if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
-               val = div_mask(divider) << (divider->shift + 16);
+               val = div_mask(divider->width) << (divider->shift + 16);
        } else {
                val = clk_readl(divider->reg);
-               val &= ~(div_mask(divider) << divider->shift);
+               val &= ~(div_mask(divider->width) << divider->shift);
        }
        val |= value << divider->shift;
        clk_writel(val, divider->reg);
@@ -463,3 +506,19 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                        width, clk_divider_flags, table, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_divider_table);
+
+void clk_unregister_divider(struct clk *clk)
+{
+       struct clk_divider *div;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       div = to_clk_divider(hw);
+
+       clk_unregister(clk);
+       kfree(div);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_divider);
index 51fd87fb7ba691e8a52e40ddc1ee4524ecd3e781..3f0e4200cb5d4ca4a680c78479ac86ed116766a5 100644 (file)
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        struct clk_init_data init;
 
        if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
-               if (bit_idx > 16) {
+               if (bit_idx > 15) {
                        pr_err("gate bit exceeds LOWORD field\n");
                        return ERR_PTR(-EINVAL);
                }
@@ -162,3 +162,19 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        return clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_gate);
+
+void clk_unregister_gate(struct clk *clk)
+{
+       struct clk_gate *gate;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       gate = to_clk_gate(hw);
+
+       clk_unregister(clk);
+       kfree(gate);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_gate);
index 6e1ecf94bf58daa279cb47e42065da9a9db3c581..69a094c3783d8eb2a2c0d3624f3a641f97a5d484 100644 (file)
@@ -177,3 +177,19 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
                                      NULL, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_mux);
+
+void clk_unregister_mux(struct clk *clk)
+{
+       struct clk_mux *mux;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       mux = to_clk_mux(hw);
+
+       clk_unregister(clk);
+       kfree(mux);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_mux);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
deleted file mode 100644 (file)
index 0a47d6f..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * clock driver for Freescale PowerPC corenet SoCs.
- */
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-struct cmux_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-       u32 flags;
-};
-
-#define PLL_KILL                       BIT(31)
-#define        CLKSEL_SHIFT            27
-#define CLKSEL_ADJUST          BIT(0)
-#define to_cmux_clk(p)         container_of(p, struct cmux_clk, hw)
-
-static unsigned int clocks_per_pll;
-
-static int cmux_set_parent(struct clk_hw *hw, u8 idx)
-{
-       struct cmux_clk *clk = to_cmux_clk(hw);
-       u32 clksel;
-
-       clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll;
-       if (clk->flags & CLKSEL_ADJUST)
-               clksel += 8;
-       clksel = (clksel & 0xf) << CLKSEL_SHIFT;
-       iowrite32be(clksel, clk->reg);
-
-       return 0;
-}
-
-static u8 cmux_get_parent(struct clk_hw *hw)
-{
-       struct cmux_clk *clk = to_cmux_clk(hw);
-       u32 clksel;
-
-       clksel = ioread32be(clk->reg);
-       clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
-       if (clk->flags & CLKSEL_ADJUST)
-               clksel -= 8;
-       clksel = (clksel >> 2) * clocks_per_pll + clksel % 4;
-
-       return clksel;
-}
-
-const struct clk_ops cmux_ops = {
-       .get_parent = cmux_get_parent,
-       .set_parent = cmux_set_parent,
-};
-
-static void __init core_mux_init(struct device_node *np)
-{
-       struct clk *clk;
-       struct clk_init_data init;
-       struct cmux_clk *cmux_clk;
-       struct device_node *node;
-       int rc, count, i;
-       u32     offset;
-       const char *clk_name;
-       const char **parent_names;
-
-       rc = of_property_read_u32(np, "reg", &offset);
-       if (rc) {
-               pr_err("%s: could not get reg property\n", np->name);
-               return;
-       }
-
-       /* get the input clock source count */
-       count = of_property_count_strings(np, "clock-names");
-       if (count < 0) {
-               pr_err("%s: get clock count error\n", np->name);
-               return;
-       }
-       parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL);
-       if (!parent_names) {
-               pr_err("%s: could not allocate parent_names\n", __func__);
-               return;
-       }
-
-       for (i = 0; i < count; i++)
-               parent_names[i] = of_clk_get_parent_name(np, i);
-
-       cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL);
-       if (!cmux_clk) {
-               pr_err("%s: could not allocate cmux_clk\n", __func__);
-               goto err_name;
-       }
-       cmux_clk->reg = of_iomap(np, 0);
-       if (!cmux_clk->reg) {
-               pr_err("%s: could not map register\n", __func__);
-               goto err_clk;
-       }
-
-       node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
-       if (node && (offset >= 0x80))
-               cmux_clk->flags = CLKSEL_ADJUST;
-
-       rc = of_property_read_string_index(np, "clock-output-names",
-                       0, &clk_name);
-       if (rc) {
-               pr_err("%s: read clock names error\n", np->name);
-               goto err_clk;
-       }
-
-       init.name = clk_name;
-       init.ops = &cmux_ops;
-       init.parent_names = parent_names;
-       init.num_parents = count;
-       init.flags = 0;
-       cmux_clk->hw.init = &init;
-
-       clk = clk_register(NULL, &cmux_clk->hw);
-       if (IS_ERR(clk)) {
-               pr_err("%s: could not register clock\n", clk_name);
-               goto err_clk;
-       }
-
-       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
-       if (rc) {
-               pr_err("Could not register clock provider for node:%s\n",
-                        np->name);
-               goto err_clk;
-       }
-       goto err_name;
-
-err_clk:
-       kfree(cmux_clk);
-err_name:
-       /* free *_names because they are reallocated when registered */
-       kfree(parent_names);
-}
-
-static void __init core_pll_init(struct device_node *np)
-{
-       u32 mult;
-       int i, rc, count;
-       const char *clk_name, *parent_name;
-       struct clk_onecell_data *onecell_data;
-       struct clk      **subclks;
-       void __iomem *base;
-
-       base = of_iomap(np, 0);
-       if (!base) {
-               pr_err("clk-ppc: iomap error\n");
-               return;
-       }
-
-       /* get the multiple of PLL */
-       mult = ioread32be(base);
-
-       /* check if this PLL is disabled */
-       if (mult & PLL_KILL) {
-               pr_debug("PLL:%s is disabled\n", np->name);
-               goto err_map;
-       }
-       mult = (mult >> 1) & 0x3f;
-
-       parent_name = of_clk_get_parent_name(np, 0);
-       if (!parent_name) {
-               pr_err("PLL: %s must have a parent\n", np->name);
-               goto err_map;
-       }
-
-       count = of_property_count_strings(np, "clock-output-names");
-       if (count < 0 || count > 4) {
-               pr_err("%s: clock is not supported\n", np->name);
-               goto err_map;
-       }
-
-       /* output clock number per PLL */
-       clocks_per_pll = count;
-
-       subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
-       if (!subclks) {
-               pr_err("%s: could not allocate subclks\n", __func__);
-               goto err_map;
-       }
-
-       onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
-       if (!onecell_data) {
-               pr_err("%s: could not allocate onecell_data\n", __func__);
-               goto err_clks;
-       }
-
-       for (i = 0; i < count; i++) {
-               rc = of_property_read_string_index(np, "clock-output-names",
-                               i, &clk_name);
-               if (rc) {
-                       pr_err("%s: could not get clock names\n", np->name);
-                       goto err_cell;
-               }
-
-               /*
-                * when count == 4, there are 4 output clocks:
-                * /1, /2, /3, /4 respectively
-                * when count < 4, there are at least 2 output clocks:
-                * /1, /2, (/4, if count == 3) respectively.
-                */
-               if (count == 4)
-                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-                                       parent_name, 0, mult, 1 + i);
-               else
-
-                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-                                       parent_name, 0, mult, 1 << i);
-
-               if (IS_ERR(subclks[i])) {
-                       pr_err("%s: could not register clock\n", clk_name);
-                       goto err_cell;
-               }
-       }
-
-       onecell_data->clks = subclks;
-       onecell_data->clk_num = count;
-
-       rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
-       if (rc) {
-               pr_err("Could not register clk provider for node:%s\n",
-                        np->name);
-               goto err_cell;
-       }
-
-       iounmap(base);
-       return;
-err_cell:
-       kfree(onecell_data);
-err_clks:
-       kfree(subclks);
-err_map:
-       iounmap(base);
-}
-
-static void __init sysclk_init(struct device_node *node)
-{
-       struct clk *clk;
-       const char *clk_name = node->name;
-       struct device_node *np = of_get_parent(node);
-       u32 rate;
-
-       if (!np) {
-               pr_err("ppc-clk: could not get parent node\n");
-               return;
-       }
-
-       if (of_property_read_u32(np, "clock-frequency", &rate)) {
-               of_node_put(node);
-               return;
-       }
-
-       of_property_read_string(np, "clock-output-names", &clk_name);
-
-       clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
-       if (!IS_ERR(clk))
-               of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
-static const struct of_device_id clk_match[] __initconst = {
-       { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
-       { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
-       { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
-       { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
-       { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
-       { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
-       {}
-};
-
-static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
-{
-       of_clk_init(clk_match);
-
-       return 0;
-}
-
-static const struct of_device_id ppc_clk_ids[] __initconst = {
-       { .compatible = "fsl,qoriq-clockgen-1.0", },
-       { .compatible = "fsl,qoriq-clockgen-2.0", },
-       {}
-};
-
-static struct platform_driver ppc_corenet_clk_driver = {
-       .driver = {
-               .name = "ppc_corenet_clock",
-               .of_match_table = ppc_clk_ids,
-       },
-       .probe = ppc_corenet_clk_probe,
-};
-
-static int __init ppc_corenet_clk_init(void)
-{
-       return platform_driver_register(&ppc_corenet_clk_driver);
-}
-subsys_initcall(ppc_corenet_clk_init);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
new file mode 100644 (file)
index 0000000..cda90a9
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * clock driver for Freescale QorIQ SoCs.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+struct cmux_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+       unsigned int clk_per_pll;
+       u32 flags;
+};
+
+#define PLL_KILL                       BIT(31)
+#define        CLKSEL_SHIFT            27
+#define CLKSEL_ADJUST          BIT(0)
+#define to_cmux_clk(p)         container_of(p, struct cmux_clk, hw)
+
+static int cmux_set_parent(struct clk_hw *hw, u8 idx)
+{
+       struct cmux_clk *clk = to_cmux_clk(hw);
+       u32 clksel;
+
+       clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
+       if (clk->flags & CLKSEL_ADJUST)
+               clksel += 8;
+       clksel = (clksel & 0xf) << CLKSEL_SHIFT;
+       iowrite32be(clksel, clk->reg);
+
+       return 0;
+}
+
+static u8 cmux_get_parent(struct clk_hw *hw)
+{
+       struct cmux_clk *clk = to_cmux_clk(hw);
+       u32 clksel;
+
+       clksel = ioread32be(clk->reg);
+       clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
+       if (clk->flags & CLKSEL_ADJUST)
+               clksel -= 8;
+       clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
+
+       return clksel;
+}
+
+static const struct clk_ops cmux_ops = {
+       .get_parent = cmux_get_parent,
+       .set_parent = cmux_set_parent,
+};
+
+static void __init core_mux_init(struct device_node *np)
+{
+       struct clk *clk;
+       struct clk_init_data init;
+       struct cmux_clk *cmux_clk;
+       struct device_node *node;
+       int rc, count, i;
+       u32     offset;
+       const char *clk_name;
+       const char **parent_names;
+       struct of_phandle_args clkspec;
+
+       rc = of_property_read_u32(np, "reg", &offset);
+       if (rc) {
+               pr_err("%s: could not get reg property\n", np->name);
+               return;
+       }
+
+       /* get the input clock source count */
+       count = of_property_count_strings(np, "clock-names");
+       if (count < 0) {
+               pr_err("%s: get clock count error\n", np->name);
+               return;
+       }
+       parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
+       if (!parent_names)
+               return;
+
+       for (i = 0; i < count; i++)
+               parent_names[i] = of_clk_get_parent_name(np, i);
+
+       cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
+       if (!cmux_clk)
+               goto err_name;
+
+       cmux_clk->reg = of_iomap(np, 0);
+       if (!cmux_clk->reg) {
+               pr_err("%s: could not map register\n", __func__);
+               goto err_clk;
+       }
+
+       rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
+                                       &clkspec);
+       if (rc) {
+               pr_err("%s: parse clock node error\n", __func__);
+               goto err_clk;
+       }
+
+       cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
+                       "clock-output-names");
+       of_node_put(clkspec.np);
+
+       node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
+       if (node && (offset >= 0x80))
+               cmux_clk->flags = CLKSEL_ADJUST;
+
+       rc = of_property_read_string_index(np, "clock-output-names",
+                                          0, &clk_name);
+       if (rc) {
+               pr_err("%s: read clock names error\n", np->name);
+               goto err_clk;
+       }
+
+       init.name = clk_name;
+       init.ops = &cmux_ops;
+       init.parent_names = parent_names;
+       init.num_parents = count;
+       init.flags = 0;
+       cmux_clk->hw.init = &init;
+
+       clk = clk_register(NULL, &cmux_clk->hw);
+       if (IS_ERR(clk)) {
+               pr_err("%s: could not register clock\n", clk_name);
+               goto err_clk;
+       }
+
+       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       if (rc) {
+               pr_err("Could not register clock provider for node:%s\n",
+                      np->name);
+               goto err_clk;
+       }
+       goto err_name;
+
+err_clk:
+       kfree(cmux_clk);
+err_name:
+       /* free *_names because they are reallocated when registered */
+       kfree(parent_names);
+}
+
+static void __init core_pll_init(struct device_node *np)
+{
+       u32 mult;
+       int i, rc, count;
+       const char *clk_name, *parent_name;
+       struct clk_onecell_data *onecell_data;
+       struct clk      **subclks;
+       void __iomem *base;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("iomap error\n");
+               return;
+       }
+
+       /* get the multiple of PLL */
+       mult = ioread32be(base);
+
+       /* check if this PLL is disabled */
+       if (mult & PLL_KILL) {
+               pr_debug("PLL:%s is disabled\n", np->name);
+               goto err_map;
+       }
+       mult = (mult >> 1) & 0x3f;
+
+       parent_name = of_clk_get_parent_name(np, 0);
+       if (!parent_name) {
+               pr_err("PLL: %s must have a parent\n", np->name);
+               goto err_map;
+       }
+
+       count = of_property_count_strings(np, "clock-output-names");
+       if (count < 0 || count > 4) {
+               pr_err("%s: clock is not supported\n", np->name);
+               goto err_map;
+       }
+
+       subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+       if (!subclks)
+               goto err_map;
+
+       onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
+       if (!onecell_data)
+               goto err_clks;
+
+       for (i = 0; i < count; i++) {
+               rc = of_property_read_string_index(np, "clock-output-names",
+                                                  i, &clk_name);
+               if (rc) {
+                       pr_err("%s: could not get clock names\n", np->name);
+                       goto err_cell;
+               }
+
+               /*
+                * when count == 4, there are 4 output clocks:
+                * /1, /2, /3, /4 respectively
+                * when count < 4, there are at least 2 output clocks:
+                * /1, /2, (/4, if count == 3) respectively.
+                */
+               if (count == 4)
+                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                       parent_name, 0, mult, 1 + i);
+               else
+
+                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                       parent_name, 0, mult, 1 << i);
+
+               if (IS_ERR(subclks[i])) {
+                       pr_err("%s: could not register clock\n", clk_name);
+                       goto err_cell;
+               }
+       }
+
+       onecell_data->clks = subclks;
+       onecell_data->clk_num = count;
+
+       rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
+       if (rc) {
+               pr_err("Could not register clk provider for node:%s\n",
+                      np->name);
+               goto err_cell;
+       }
+
+       iounmap(base);
+       return;
+err_cell:
+       kfree(onecell_data);
+err_clks:
+       kfree(subclks);
+err_map:
+       iounmap(base);
+}
+
+static void __init sysclk_init(struct device_node *node)
+{
+       struct clk *clk;
+       const char *clk_name = node->name;
+       struct device_node *np = of_get_parent(node);
+       u32 rate;
+
+       if (!np) {
+               pr_err("could not get parent node\n");
+               return;
+       }
+
+       if (of_property_read_u32(np, "clock-frequency", &rate)) {
+               of_node_put(node);
+               return;
+       }
+
+       of_property_read_string(np, "clock-output-names", &clk_name);
+
+       clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+       if (!IS_ERR(clk))
+               of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static void __init pltfrm_pll_init(struct device_node *np)
+{
+       void __iomem *base;
+       uint32_t mult;
+       const char *parent_name, *clk_name;
+       int i, _errno;
+       struct clk_onecell_data *cod;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
+               return;
+       }
+
+       /* Get the multiple of PLL */
+       mult = ioread32be(base);
+
+       iounmap(base);
+
+       /* Check if this PLL is disabled */
+       if (mult & PLL_KILL) {
+               pr_debug("%s(): %s: Disabled\n", __func__, np->name);
+               return;
+       }
+       mult = (mult & GENMASK(6, 1)) >> 1;
+
+       parent_name = of_clk_get_parent_name(np, 0);
+       if (!parent_name) {
+               pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
+                      __func__, np->name);
+               return;
+       }
+
+       i = of_property_count_strings(np, "clock-output-names");
+       if (i < 0) {
+               pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
+                      __func__, np->name, i);
+               return;
+       }
+
+       cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
+       if (!cod)
+               return;
+       cod->clks = (struct clk **)(cod + 1);
+       cod->clk_num = i;
+
+       for (i = 0; i < cod->clk_num; i++) {
+               _errno = of_property_read_string_index(np, "clock-output-names",
+                                                      i, &clk_name);
+               if (_errno < 0) {
+                       pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
+                              __func__, np->name, _errno);
+                       goto return_clk_unregister;
+               }
+
+               cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                              parent_name, 0, mult, 1 + i);
+               if (IS_ERR(cod->clks[i])) {
+                       pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
+                              __func__, np->name,
+                              clk_name, PTR_ERR(cod->clks[i]));
+                       goto return_clk_unregister;
+               }
+       }
+
+       _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
+       if (_errno < 0) {
+               pr_err("%s(): %s: of_clk_add_provider() = %d\n",
+                      __func__, np->name, _errno);
+               goto return_clk_unregister;
+       }
+
+       return;
+
+return_clk_unregister:
+       while (--i >= 0)
+               clk_unregister(cod->clks[i]);
+       kfree(cod);
+}
+
+CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
index 642cf37124d3780024d1739bf9298aa788c336e2..237f23f68bfce18cdfee3306487ba924906125c4 100644 (file)
@@ -9,7 +9,7 @@
  * Standard functionality for the common clock API.  See Documentation/clk.txt
  */
 
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
 #include <linux/clk/clk-conf.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -37,6 +37,55 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+static long clk_core_get_accuracy(struct clk_core *clk);
+static unsigned long clk_core_get_rate(struct clk_core *clk);
+static int clk_core_get_phase(struct clk_core *clk);
+static bool clk_core_is_prepared(struct clk_core *clk);
+static bool clk_core_is_enabled(struct clk_core *clk);
+static struct clk_core *clk_core_lookup(const char *name);
+
+/***    private data structures    ***/
+
+struct clk_core {
+       const char              *name;
+       const struct clk_ops    *ops;
+       struct clk_hw           *hw;
+       struct module           *owner;
+       struct clk_core         *parent;
+       const char              **parent_names;
+       struct clk_core         **parents;
+       u8                      num_parents;
+       u8                      new_parent_index;
+       unsigned long           rate;
+       unsigned long           req_rate;
+       unsigned long           new_rate;
+       struct clk_core         *new_parent;
+       struct clk_core         *new_child;
+       unsigned long           flags;
+       unsigned int            enable_count;
+       unsigned int            prepare_count;
+       unsigned long           accuracy;
+       int                     phase;
+       struct hlist_head       children;
+       struct hlist_node       child_node;
+       struct hlist_node       debug_node;
+       struct hlist_head       clks;
+       unsigned int            notifier_count;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *dentry;
+#endif
+       struct kref             ref;
+};
+
+struct clk {
+       struct clk_core *core;
+       const char *dev_id;
+       const char *con_id;
+       unsigned long min_rate;
+       unsigned long max_rate;
+       struct hlist_node child_node;
+};
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -114,7 +163,8 @@ static struct hlist_head *orphan_list[] = {
        NULL,
 };
 
-static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+                                int level)
 {
        if (!c)
                return;
@@ -122,14 +172,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
                   level * 3 + 1, "",
                   30 - level * 3, c->name,
-                  c->enable_count, c->prepare_count, clk_get_rate(c),
-                  clk_get_accuracy(c), clk_get_phase(c));
+                  c->enable_count, c->prepare_count, clk_core_get_rate(c),
+                  clk_core_get_accuracy(c), clk_core_get_phase(c));
 }
 
-static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
                                     int level)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        if (!c)
                return;
@@ -142,7 +192,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
 
 static int clk_summary_show(struct seq_file *s, void *data)
 {
-       struct clk *c;
+       struct clk_core *c;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
        seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
@@ -172,7 +222,7 @@ static const struct file_operations clk_summary_fops = {
        .release        = single_release,
 };
 
-static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
 {
        if (!c)
                return;
@@ -180,14 +230,14 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "\"%s\": { ", c->name);
        seq_printf(s, "\"enable_count\": %d,", c->enable_count);
        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
-       seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
-       seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
-       seq_printf(s, "\"phase\": %d", clk_get_phase(c));
+       seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
+       seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
+       seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
 }
 
-static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        if (!c)
                return;
@@ -204,7 +254,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
 
 static int clk_dump(struct seq_file *s, void *data)
 {
-       struct clk *c;
+       struct clk_core *c;
        bool first_node = true;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
@@ -240,7 +290,7 @@ static const struct file_operations clk_dump_fops = {
        .release        = single_release,
 };
 
-static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
+static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
 {
        struct dentry *d;
        int ret = -ENOMEM;
@@ -315,7 +365,7 @@ out:
  * initialized.  Otherwise it bails out early since the debugfs clk tree
  * will be created lazily by clk_debug_init as part of a late_initcall.
  */
-static int clk_debug_register(struct clk *clk)
+static int clk_debug_register(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -340,16 +390,12 @@ unlock:
  * debugfs clk tree if clk->dentry points to debugfs created by
  * clk_debug_register in __clk_init.
  */
-static void clk_debug_unregister(struct clk *clk)
+static void clk_debug_unregister(struct clk_core *clk)
 {
        mutex_lock(&clk_debug_lock);
-       if (!clk->dentry)
-               goto out;
-
        hlist_del_init(&clk->debug_node);
        debugfs_remove_recursive(clk->dentry);
        clk->dentry = NULL;
-out:
        mutex_unlock(&clk_debug_lock);
 }
 
@@ -358,8 +404,9 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
 {
        struct dentry *d = NULL;
 
-       if (hw->clk->dentry)
-               d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
+       if (hw->core->dentry)
+               d = debugfs_create_file(name, mode, hw->core->dentry, data,
+                                       fops);
 
        return d;
 }
@@ -379,7 +426,7 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
  */
 static int __init clk_debug_init(void)
 {
-       struct clk *clk;
+       struct clk_core *clk;
        struct dentry *d;
 
        rootdir = debugfs_create_dir("clk", NULL);
@@ -418,22 +465,20 @@ static int __init clk_debug_init(void)
 }
 late_initcall(clk_debug_init);
 #else
-static inline int clk_debug_register(struct clk *clk) { return 0; }
-static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+static inline int clk_debug_register(struct clk_core *clk) { return 0; }
+static inline void clk_debug_reparent(struct clk_core *clk,
+                                     struct clk_core *new_parent)
 {
 }
-static inline void clk_debug_unregister(struct clk *clk)
+static inline void clk_debug_unregister(struct clk_core *clk)
 {
 }
 #endif
 
 /* caller must hold prepare_lock */
-static void clk_unprepare_unused_subtree(struct clk *clk)
+static void clk_unprepare_unused_subtree(struct clk_core *clk)
 {
-       struct clk *child;
-
-       if (!clk)
-               return;
+       struct clk_core *child;
 
        hlist_for_each_entry(child, &clk->children, child_node)
                clk_unprepare_unused_subtree(child);
@@ -444,7 +489,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
        if (clk->flags & CLK_IGNORE_UNUSED)
                return;
 
-       if (__clk_is_prepared(clk)) {
+       if (clk_core_is_prepared(clk)) {
                if (clk->ops->unprepare_unused)
                        clk->ops->unprepare_unused(clk->hw);
                else if (clk->ops->unprepare)
@@ -453,14 +498,11 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
 }
 
 /* caller must hold prepare_lock */
-static void clk_disable_unused_subtree(struct clk *clk)
+static void clk_disable_unused_subtree(struct clk_core *clk)
 {
-       struct clk *child;
+       struct clk_core *child;
        unsigned long flags;
 
-       if (!clk)
-               goto out;
-
        hlist_for_each_entry(child, &clk->children, child_node)
                clk_disable_unused_subtree(child);
 
@@ -477,7 +519,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
         * sequence.  call .disable_unused if available, otherwise fall
         * back to .disable
         */
-       if (__clk_is_enabled(clk)) {
+       if (clk_core_is_enabled(clk)) {
                if (clk->ops->disable_unused)
                        clk->ops->disable_unused(clk->hw);
                else if (clk->ops->disable)
@@ -486,9 +528,6 @@ static void clk_disable_unused_subtree(struct clk *clk)
 
 unlock_out:
        clk_enable_unlock(flags);
-
-out:
-       return;
 }
 
 static bool clk_ignore_unused;
@@ -501,7 +540,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
 
 static int clk_disable_unused(void)
 {
-       struct clk *clk;
+       struct clk_core *clk;
 
        if (clk_ignore_unused) {
                pr_warn("clk: Not disabling unused clocks\n");
@@ -532,48 +571,65 @@ late_initcall_sync(clk_disable_unused);
 
 const char *__clk_get_name(struct clk *clk)
 {
-       return !clk ? NULL : clk->name;
+       return !clk ? NULL : clk->core->name;
 }
 EXPORT_SYMBOL_GPL(__clk_get_name);
 
 struct clk_hw *__clk_get_hw(struct clk *clk)
 {
-       return !clk ? NULL : clk->hw;
+       return !clk ? NULL : clk->core->hw;
 }
 EXPORT_SYMBOL_GPL(__clk_get_hw);
 
 u8 __clk_get_num_parents(struct clk *clk)
 {
-       return !clk ? 0 : clk->num_parents;
+       return !clk ? 0 : clk->core->num_parents;
 }
 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
 
 struct clk *__clk_get_parent(struct clk *clk)
 {
-       return !clk ? NULL : clk->parent;
+       if (!clk)
+               return NULL;
+
+       /* TODO: Create a per-user clk and change callers to call clk_put */
+       return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
 }
 EXPORT_SYMBOL_GPL(__clk_get_parent);
 
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
+                                                        u8 index)
 {
        if (!clk || index >= clk->num_parents)
                return NULL;
        else if (!clk->parents)
-               return __clk_lookup(clk->parent_names[index]);
+               return clk_core_lookup(clk->parent_names[index]);
        else if (!clk->parents[index])
                return clk->parents[index] =
-                       __clk_lookup(clk->parent_names[index]);
+                       clk_core_lookup(clk->parent_names[index]);
        else
                return clk->parents[index];
 }
+
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+       struct clk_core *parent;
+
+       if (!clk)
+               return NULL;
+
+       parent = clk_core_get_parent_by_index(clk->core, index);
+
+       return !parent ? NULL : parent->hw->clk;
+}
 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
 
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
-       return !clk ? 0 : clk->enable_count;
+       return !clk ? 0 : clk->core->enable_count;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
 {
        unsigned long ret;
 
@@ -593,9 +649,17 @@ unsigned long __clk_get_rate(struct clk *clk)
 out:
        return ret;
 }
+
+unsigned long __clk_get_rate(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_rate_nolock(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_get_rate);
 
-static unsigned long __clk_get_accuracy(struct clk *clk)
+static unsigned long __clk_get_accuracy(struct clk_core *clk)
 {
        if (!clk)
                return 0;
@@ -605,11 +669,11 @@ static unsigned long __clk_get_accuracy(struct clk *clk)
 
 unsigned long __clk_get_flags(struct clk *clk)
 {
-       return !clk ? 0 : clk->flags;
+       return !clk ? 0 : clk->core->flags;
 }
 EXPORT_SYMBOL_GPL(__clk_get_flags);
 
-bool __clk_is_prepared(struct clk *clk)
+static bool clk_core_is_prepared(struct clk_core *clk)
 {
        int ret;
 
@@ -630,7 +694,15 @@ out:
        return !!ret;
 }
 
-bool __clk_is_enabled(struct clk *clk)
+bool __clk_is_prepared(struct clk *clk)
+{
+       if (!clk)
+               return false;
+
+       return clk_core_is_prepared(clk->core);
+}
+
+static bool clk_core_is_enabled(struct clk_core *clk)
 {
        int ret;
 
@@ -650,12 +722,21 @@ bool __clk_is_enabled(struct clk *clk)
 out:
        return !!ret;
 }
+
+bool __clk_is_enabled(struct clk *clk)
+{
+       if (!clk)
+               return false;
+
+       return clk_core_is_enabled(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_is_enabled);
 
-static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
+static struct clk_core *__clk_lookup_subtree(const char *name,
+                                            struct clk_core *clk)
 {
-       struct clk *child;
-       struct clk *ret;
+       struct clk_core *child;
+       struct clk_core *ret;
 
        if (!strcmp(clk->name, name))
                return clk;
@@ -669,10 +750,10 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
        return NULL;
 }
 
-struct clk *__clk_lookup(const char *name)
+static struct clk_core *clk_core_lookup(const char *name)
 {
-       struct clk *root_clk;
-       struct clk *ret;
+       struct clk_core *root_clk;
+       struct clk_core *ret;
 
        if (!name)
                return NULL;
@@ -694,42 +775,53 @@ struct clk *__clk_lookup(const char *name)
        return NULL;
 }
 
-/*
- * Helper for finding best parent to provide a given frequency. This can be used
- * directly as a determine_rate callback (e.g. for a mux), or from a more
- * complex clock that may combine a mux with other operations.
- */
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
-                             unsigned long *best_parent_rate,
-                             struct clk_hw **best_parent_p)
+static bool mux_is_better_rate(unsigned long rate, unsigned long now,
+                          unsigned long best, unsigned long flags)
+{
+       if (flags & CLK_MUX_ROUND_CLOSEST)
+               return abs(now - rate) < abs(best - rate);
+
+       return now <= rate && now > best;
+}
+
+static long
+clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
+                            unsigned long min_rate,
+                            unsigned long max_rate,
+                            unsigned long *best_parent_rate,
+                            struct clk_hw **best_parent_p,
+                            unsigned long flags)
 {
-       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       struct clk_core *core = hw->core, *parent, *best_parent = NULL;
        int i, num_parents;
        unsigned long parent_rate, best = 0;
 
        /* if NO_REPARENT flag set, pass through to current parent */
-       if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
-               parent = clk->parent;
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       best = __clk_round_rate(parent, rate);
+       if (core->flags & CLK_SET_RATE_NO_REPARENT) {
+               parent = core->parent;
+               if (core->flags & CLK_SET_RATE_PARENT)
+                       best = __clk_determine_rate(parent ? parent->hw : NULL,
+                                                   rate, min_rate, max_rate);
                else if (parent)
-                       best = __clk_get_rate(parent);
+                       best = clk_core_get_rate_nolock(parent);
                else
-                       best = __clk_get_rate(clk);
+                       best = clk_core_get_rate_nolock(core);
                goto out;
        }
 
        /* find the parent that can provide the fastest rate <= rate */
-       num_parents = clk->num_parents;
+       num_parents = core->num_parents;
        for (i = 0; i < num_parents; i++) {
-               parent = clk_get_parent_by_index(clk, i);
+               parent = clk_core_get_parent_by_index(core, i);
                if (!parent)
                        continue;
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       parent_rate = __clk_round_rate(parent, rate);
+               if (core->flags & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_determine_rate(parent->hw, rate,
+                                                          min_rate,
+                                                          max_rate);
                else
-                       parent_rate = __clk_get_rate(parent);
-               if (parent_rate <= rate && parent_rate > best) {
+                       parent_rate = clk_core_get_rate_nolock(parent);
+               if (mux_is_better_rate(rate, parent_rate, best, flags)) {
                        best_parent = parent;
                        best = parent_rate;
                }
@@ -742,11 +834,63 @@ out:
 
        return best;
 }
+
+struct clk *__clk_lookup(const char *name)
+{
+       struct clk_core *core = clk_core_lookup(name);
+
+       return !core ? NULL : core->hw->clk;
+}
+
+static void clk_core_get_boundaries(struct clk_core *clk,
+                                   unsigned long *min_rate,
+                                   unsigned long *max_rate)
+{
+       struct clk *clk_user;
+
+       *min_rate = 0;
+       *max_rate = ULONG_MAX;
+
+       hlist_for_each_entry(clk_user, &clk->clks, child_node)
+               *min_rate = max(*min_rate, clk_user->min_rate);
+
+       hlist_for_each_entry(clk_user, &clk->clks, child_node)
+               *max_rate = min(*max_rate, clk_user->max_rate);
+}
+
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p)
+{
+       return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+                                           best_parent_rate,
+                                           best_parent_p, 0);
+}
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p)
+{
+       return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+                                           best_parent_rate,
+                                           best_parent_p,
+                                           CLK_MUX_ROUND_CLOSEST);
+}
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
+
 /***        clk api        ***/
 
-void __clk_unprepare(struct clk *clk)
+static void clk_core_unprepare(struct clk_core *clk)
 {
        if (!clk)
                return;
@@ -762,7 +906,7 @@ void __clk_unprepare(struct clk *clk)
        if (clk->ops->unprepare)
                clk->ops->unprepare(clk->hw);
 
-       __clk_unprepare(clk->parent);
+       clk_core_unprepare(clk->parent);
 }
 
 /**
@@ -782,12 +926,12 @@ void clk_unprepare(struct clk *clk)
                return;
 
        clk_prepare_lock();
-       __clk_unprepare(clk);
+       clk_core_unprepare(clk->core);
        clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
-int __clk_prepare(struct clk *clk)
+static int clk_core_prepare(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -795,14 +939,14 @@ int __clk_prepare(struct clk *clk)
                return 0;
 
        if (clk->prepare_count == 0) {
-               ret = __clk_prepare(clk->parent);
+               ret = clk_core_prepare(clk->parent);
                if (ret)
                        return ret;
 
                if (clk->ops->prepare) {
                        ret = clk->ops->prepare(clk->hw);
                        if (ret) {
-                               __clk_unprepare(clk->parent);
+                               clk_core_unprepare(clk->parent);
                                return ret;
                        }
                }
@@ -829,15 +973,18 @@ int clk_prepare(struct clk *clk)
 {
        int ret;
 
+       if (!clk)
+               return 0;
+
        clk_prepare_lock();
-       ret = __clk_prepare(clk);
+       ret = clk_core_prepare(clk->core);
        clk_prepare_unlock();
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
-static void __clk_disable(struct clk *clk)
+static void clk_core_disable(struct clk_core *clk)
 {
        if (!clk)
                return;
@@ -851,7 +998,15 @@ static void __clk_disable(struct clk *clk)
        if (clk->ops->disable)
                clk->ops->disable(clk->hw);
 
-       __clk_disable(clk->parent);
+       clk_core_disable(clk->parent);
+}
+
+static void __clk_disable(struct clk *clk)
+{
+       if (!clk)
+               return;
+
+       clk_core_disable(clk->core);
 }
 
 /**
@@ -879,7 +1034,7 @@ void clk_disable(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
-static int __clk_enable(struct clk *clk)
+static int clk_core_enable(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -890,7 +1045,7 @@ static int __clk_enable(struct clk *clk)
                return -ESHUTDOWN;
 
        if (clk->enable_count == 0) {
-               ret = __clk_enable(clk->parent);
+               ret = clk_core_enable(clk->parent);
 
                if (ret)
                        return ret;
@@ -898,7 +1053,7 @@ static int __clk_enable(struct clk *clk)
                if (clk->ops->enable) {
                        ret = clk->ops->enable(clk->hw);
                        if (ret) {
-                               __clk_disable(clk->parent);
+                               clk_core_disable(clk->parent);
                                return ret;
                        }
                }
@@ -908,6 +1063,14 @@ static int __clk_enable(struct clk *clk)
        return 0;
 }
 
+static int __clk_enable(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_enable(clk->core);
+}
+
 /**
  * clk_enable - ungate a clock
  * @clk: the clk being ungated
@@ -934,17 +1097,13 @@ int clk_enable(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_enable);
 
-/**
- * __clk_round_rate - round the given rate for a clk
- * @clk: round the rate of this clock
- * @rate: the rate which is to be rounded
- *
- * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
- */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
+                                               unsigned long rate,
+                                               unsigned long min_rate,
+                                               unsigned long max_rate)
 {
        unsigned long parent_rate = 0;
-       struct clk *parent;
+       struct clk_core *parent;
        struct clk_hw *parent_hw;
 
        if (!clk)
@@ -956,15 +1115,59 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 
        if (clk->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
-               return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
-                                               &parent_hw);
+               return clk->ops->determine_rate(clk->hw, rate,
+                                               min_rate, max_rate,
+                                               &parent_rate, &parent_hw);
        } else if (clk->ops->round_rate)
                return clk->ops->round_rate(clk->hw, rate, &parent_rate);
        else if (clk->flags & CLK_SET_RATE_PARENT)
-               return __clk_round_rate(clk->parent, rate);
+               return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
+                                                 max_rate);
        else
                return clk->rate;
 }
+
+/**
+ * __clk_determine_rate - get the closest rate actually supported by a clock
+ * @hw: determine the rate of this clock
+ * @rate: target rate
+ * @min_rate: returned rate must be greater than this rate
+ * @max_rate: returned rate must be less than this rate
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate and
+ * .determine_rate.
+ */
+unsigned long __clk_determine_rate(struct clk_hw *hw,
+                                  unsigned long rate,
+                                  unsigned long min_rate,
+                                  unsigned long max_rate)
+{
+       if (!hw)
+               return 0;
+
+       return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(__clk_determine_rate);
+
+/**
+ * __clk_round_rate - round the given rate for a clk
+ * @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
+ */
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+{
+       unsigned long min_rate;
+       unsigned long max_rate;
+
+       if (!clk)
+               return 0;
+
+       clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
+
+       return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
+}
 EXPORT_SYMBOL_GPL(__clk_round_rate);
 
 /**
@@ -980,6 +1183,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long ret;
 
+       if (!clk)
+               return 0;
+
        clk_prepare_lock();
        ret = __clk_round_rate(clk, rate);
        clk_prepare_unlock();
@@ -1002,22 +1208,21 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
  * a driver returns that.
  */
-static int __clk_notify(struct clk *clk, unsigned long msg,
+static int __clk_notify(struct clk_core *clk, unsigned long msg,
                unsigned long old_rate, unsigned long new_rate)
 {
        struct clk_notifier *cn;
        struct clk_notifier_data cnd;
        int ret = NOTIFY_DONE;
 
-       cnd.clk = clk;
        cnd.old_rate = old_rate;
        cnd.new_rate = new_rate;
 
        list_for_each_entry(cn, &clk_notifier_list, node) {
-               if (cn->clk == clk) {
+               if (cn->clk->core == clk) {
+                       cnd.clk = cn->clk;
                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
                                        &cnd);
-                       break;
                }
        }
 
@@ -1035,10 +1240,10 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_accuracies(struct clk *clk)
+static void __clk_recalc_accuracies(struct clk_core *clk)
 {
        unsigned long parent_accuracy = 0;
-       struct clk *child;
+       struct clk_core *child;
 
        if (clk->parent)
                parent_accuracy = clk->parent->accuracy;
@@ -1053,6 +1258,20 @@ static void __clk_recalc_accuracies(struct clk *clk)
                __clk_recalc_accuracies(child);
 }
 
+static long clk_core_get_accuracy(struct clk_core *clk)
+{
+       unsigned long accuracy;
+
+       clk_prepare_lock();
+       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
+               __clk_recalc_accuracies(clk);
+
+       accuracy = __clk_get_accuracy(clk);
+       clk_prepare_unlock();
+
+       return accuracy;
+}
+
 /**
  * clk_get_accuracy - return the accuracy of clk
  * @clk: the clk whose accuracy is being returned
@@ -1064,20 +1283,15 @@ static void __clk_recalc_accuracies(struct clk *clk)
  */
 long clk_get_accuracy(struct clk *clk)
 {
-       unsigned long accuracy;
-
-       clk_prepare_lock();
-       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
-               __clk_recalc_accuracies(clk);
-
-       accuracy = __clk_get_accuracy(clk);
-       clk_prepare_unlock();
+       if (!clk)
+               return 0;
 
-       return accuracy;
+       return clk_core_get_accuracy(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_get_accuracy);
 
-static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
+static unsigned long clk_recalc(struct clk_core *clk,
+                               unsigned long parent_rate)
 {
        if (clk->ops->recalc_rate)
                return clk->ops->recalc_rate(clk->hw, parent_rate);
@@ -1098,11 +1312,11 @@ static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
 {
        unsigned long old_rate;
        unsigned long parent_rate = 0;
-       struct clk *child;
+       struct clk_core *child;
 
        old_rate = clk->rate;
 
@@ -1122,15 +1336,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
                __clk_recalc_rates(child, msg);
 }
 
-/**
- * clk_get_rate - return the rate of clk
- * @clk: the clk whose rate is being returned
- *
- * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
- */
-unsigned long clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate(struct clk_core *clk)
 {
        unsigned long rate;
 
@@ -1139,14 +1345,31 @@ unsigned long clk_get_rate(struct clk *clk)
        if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
                __clk_recalc_rates(clk, 0);
 
-       rate = __clk_get_rate(clk);
+       rate = clk_core_get_rate_nolock(clk);
        clk_prepare_unlock();
 
        return rate;
 }
+
+/**
+ * clk_get_rate - return the rate of clk
+ * @clk: the clk whose rate is being returned
+ *
+ * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
+ * is set, which means a recalc_rate will be issued.
+ * If clk is NULL then returns 0.
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_rate(clk->core);
+}
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
-static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+static int clk_fetch_parent_index(struct clk_core *clk,
+                                 struct clk_core *parent)
 {
        int i;
 
@@ -1160,7 +1383,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
        /*
         * find index of new parent clock using cached parent ptrs,
         * or if not yet cached, use string name comparison and cache
-        * them now to avoid future calls to __clk_lookup.
+        * them now to avoid future calls to clk_core_lookup.
         */
        for (i = 0; i < clk->num_parents; i++) {
                if (clk->parents[i] == parent)
@@ -1170,7 +1393,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
                        continue;
 
                if (!strcmp(clk->parent_names[i], parent->name)) {
-                       clk->parents[i] = __clk_lookup(parent->name);
+                       clk->parents[i] = clk_core_lookup(parent->name);
                        return i;
                }
        }
@@ -1178,7 +1401,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
        return -EINVAL;
 }
 
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
 {
        hlist_del(&clk->child_node);
 
@@ -1195,10 +1418,11 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
        clk->parent = new_parent;
 }
 
-static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
+static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
+                                          struct clk_core *parent)
 {
        unsigned long flags;
-       struct clk *old_parent = clk->parent;
+       struct clk_core *old_parent = clk->parent;
 
        /*
         * Migrate prepare state between parents and prevent race with
@@ -1218,9 +1442,9 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
         * See also: Comment for clk_set_parent() below.
         */
        if (clk->prepare_count) {
-               __clk_prepare(parent);
-               clk_enable(parent);
-               clk_enable(clk);
+               clk_core_prepare(parent);
+               clk_core_enable(parent);
+               clk_core_enable(clk);
        }
 
        /* update the clk tree topology */
@@ -1231,25 +1455,27 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
        return old_parent;
 }
 
-static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
-               struct clk *old_parent)
+static void __clk_set_parent_after(struct clk_core *core,
+                                  struct clk_core *parent,
+                                  struct clk_core *old_parent)
 {
        /*
         * Finish the migration of prepare state and undo the changes done
         * for preventing a race with clk_enable().
         */
-       if (clk->prepare_count) {
-               clk_disable(clk);
-               clk_disable(old_parent);
-               __clk_unprepare(old_parent);
+       if (core->prepare_count) {
+               clk_core_disable(core);
+               clk_core_disable(old_parent);
+               clk_core_unprepare(old_parent);
        }
 }
 
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
+                           u8 p_index)
 {
        unsigned long flags;
        int ret = 0;
-       struct clk *old_parent;
+       struct clk_core *old_parent;
 
        old_parent = __clk_set_parent_before(clk, parent);
 
@@ -1263,9 +1489,9 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
                clk_enable_unlock(flags);
 
                if (clk->prepare_count) {
-                       clk_disable(clk);
-                       clk_disable(parent);
-                       __clk_unprepare(parent);
+                       clk_core_disable(clk);
+                       clk_core_disable(parent);
+                       clk_core_unprepare(parent);
                }
                return ret;
        }
@@ -1291,9 +1517,10 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
  *
  * Caller must hold prepare_lock.
  */
-static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
+static int __clk_speculate_rates(struct clk_core *clk,
+                                unsigned long parent_rate)
 {
-       struct clk *child;
+       struct clk_core *child;
        unsigned long new_rate;
        int ret = NOTIFY_DONE;
 
@@ -1319,10 +1546,10 @@ out:
        return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
-                            struct clk *new_parent, u8 p_index)
+static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
+                            struct clk_core *new_parent, u8 p_index)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        clk->new_rate = new_rate;
        clk->new_parent = new_parent;
@@ -1342,13 +1569,16 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
  * calculate the new rates returning the topmost clock that has to be
  * changed.
  */
-static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
+static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
+                                          unsigned long rate)
 {
-       struct clk *top = clk;
-       struct clk *old_parent, *parent;
+       struct clk_core *top = clk;
+       struct clk_core *old_parent, *parent;
        struct clk_hw *parent_hw;
        unsigned long best_parent_rate = 0;
        unsigned long new_rate;
+       unsigned long min_rate;
+       unsigned long max_rate;
        int p_index = 0;
 
        /* sanity */
@@ -1360,16 +1590,22 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
        if (parent)
                best_parent_rate = parent->rate;
 
+       clk_core_get_boundaries(clk, &min_rate, &max_rate);
+
        /* find the closest rate and parent clk/rate */
        if (clk->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
                new_rate = clk->ops->determine_rate(clk->hw, rate,
+                                                   min_rate,
+                                                   max_rate,
                                                    &best_parent_rate,
                                                    &parent_hw);
-               parent = parent_hw ? parent_hw->clk : NULL;
+               parent = parent_hw ? parent_hw->core : NULL;
        } else if (clk->ops->round_rate) {
                new_rate = clk->ops->round_rate(clk->hw, rate,
                                                &best_parent_rate);
+               if (new_rate < min_rate || new_rate > max_rate)
+                       return NULL;
        } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
                /* pass-through clock without adjustable parent */
                clk->new_rate = clk->rate;
@@ -1390,7 +1626,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
        }
 
        /* try finding the new parent index */
-       if (parent) {
+       if (parent && clk->num_parents > 1) {
                p_index = clk_fetch_parent_index(clk, parent);
                if (p_index < 0) {
                        pr_debug("%s: clk %s can not be parent of clk %s\n",
@@ -1414,9 +1650,10 @@ out:
  * so that in case of an error we can walk down the whole tree again and
  * abort the change.
  */
-static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
+static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
+                                                 unsigned long event)
 {
-       struct clk *child, *tmp_clk, *fail_clk = NULL;
+       struct clk_core *child, *tmp_clk, *fail_clk = NULL;
        int ret = NOTIFY_DONE;
 
        if (clk->rate == clk->new_rate)
@@ -1451,14 +1688,14 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk *clk)
+static void clk_change_rate(struct clk_core *clk)
 {
-       struct clk *child;
+       struct clk_core *child;
        struct hlist_node *tmp;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
-       struct clk *old_parent;
+       struct clk_core *old_parent;
 
        old_rate = clk->rate;
 
@@ -1506,6 +1743,45 @@ static void clk_change_rate(struct clk *clk)
                clk_change_rate(clk->new_child);
 }
 
+static int clk_core_set_rate_nolock(struct clk_core *clk,
+                                   unsigned long req_rate)
+{
+       struct clk_core *top, *fail_clk;
+       unsigned long rate = req_rate;
+       int ret = 0;
+
+       if (!clk)
+               return 0;
+
+       /* bail early if nothing to do */
+       if (rate == clk_core_get_rate_nolock(clk))
+               return 0;
+
+       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
+               return -EBUSY;
+
+       /* calculate new rates and get the topmost changed clock */
+       top = clk_calc_new_rates(clk, rate);
+       if (!top)
+               return -EINVAL;
+
+       /* notify that we are about to change rates */
+       fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+       if (fail_clk) {
+               pr_debug("%s: failed to set %s rate\n", __func__,
+                               fail_clk->name);
+               clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+               return -EBUSY;
+       }
+
+       /* change the rates */
+       clk_change_rate(top);
+
+       clk->req_rate = req_rate;
+
+       return ret;
+}
+
 /**
  * clk_set_rate - specify a new rate for clk
  * @clk: the clk whose rate is being changed
@@ -1529,8 +1805,7 @@ static void clk_change_rate(struct clk *clk)
  */
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-       struct clk *top, *fail_clk;
-       int ret = 0;
+       int ret;
 
        if (!clk)
                return 0;
@@ -1538,41 +1813,81 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        /* prevent racing with updates to the clock topology */
        clk_prepare_lock();
 
-       /* bail early if nothing to do */
-       if (rate == clk_get_rate(clk))
-               goto out;
+       ret = clk_core_set_rate_nolock(clk->core, rate);
 
-       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
-               ret = -EBUSY;
-               goto out;
-       }
+       clk_prepare_unlock();
 
-       /* calculate new rates and get the topmost changed clock */
-       top = clk_calc_new_rates(clk, rate);
-       if (!top) {
-               ret = -EINVAL;
-               goto out;
-       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
 
-       /* notify that we are about to change rates */
-       fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
-       if (fail_clk) {
-               pr_debug("%s: failed to set %s rate\n", __func__,
-                               fail_clk->name);
-               clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-               ret = -EBUSY;
-               goto out;
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+       int ret = 0;
+
+       if (!clk)
+               return 0;
+
+       if (min > max) {
+               pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
+                      __func__, clk->core->name, clk->dev_id, clk->con_id,
+                      min, max);
+               return -EINVAL;
        }
 
-       /* change the rates */
-       clk_change_rate(top);
+       clk_prepare_lock();
+
+       if (min != clk->min_rate || max != clk->max_rate) {
+               clk->min_rate = min;
+               clk->max_rate = max;
+               ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+       }
 
-out:
        clk_prepare_unlock();
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(clk_set_rate);
+EXPORT_SYMBOL_GPL(clk_set_rate_range);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+       if (!clk)
+               return 0;
+
+       return clk_set_rate_range(clk, rate, clk->max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_min_rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+       if (!clk)
+               return 0;
+
+       return clk_set_rate_range(clk, clk->min_rate, rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_max_rate);
 
 /**
  * clk_get_parent - return the parent of a clk
@@ -1599,11 +1914,11 @@ EXPORT_SYMBOL_GPL(clk_get_parent);
  *
  * For single-parent clocks without .get_parent, first check to see if the
  * .parents array exists, and if so use it to avoid an expensive tree
- * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
+ * traversal.  If .parents does not exist then walk the tree.
  */
-static struct clk *__clk_init_parent(struct clk *clk)
+static struct clk_core *__clk_init_parent(struct clk_core *clk)
 {
-       struct clk *ret = NULL;
+       struct clk_core *ret = NULL;
        u8 index;
 
        /* handle the trivial cases */
@@ -1613,7 +1928,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (clk->num_parents == 1) {
                if (IS_ERR_OR_NULL(clk->parent))
-                       clk->parent = __clk_lookup(clk->parent_names[0]);
+                       clk->parent = clk_core_lookup(clk->parent_names[0]);
                ret = clk->parent;
                goto out;
        }
@@ -1627,8 +1942,8 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        /*
         * Do our best to cache parent clocks in clk->parents.  This prevents
-        * unnecessary and expensive calls to __clk_lookup.  We don't set
-        * clk->parent here; that is done by the calling function
+        * unnecessary and expensive lookups.  We don't set clk->parent here;
+        * that is done by the calling function.
         */
 
        index = clk->ops->get_parent(clk->hw);
@@ -1638,13 +1953,14 @@ static struct clk *__clk_init_parent(struct clk *clk)
                        kcalloc(clk->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
 
-       ret = clk_get_parent_by_index(clk, index);
+       ret = clk_core_get_parent_by_index(clk, index);
 
 out:
        return ret;
 }
 
-void __clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_core_reparent(struct clk_core *clk,
+                                 struct clk_core *new_parent)
 {
        clk_reparent(clk, new_parent);
        __clk_recalc_accuracies(clk);
@@ -1652,23 +1968,40 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
 }
 
 /**
- * clk_set_parent - switch the parent of a mux clk
- * @clk: the mux clk whose input we are switching
- * @parent: the new input to clk
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
  *
- * Re-parent clk to use parent as its new input source.  If clk is in
- * prepared state, the clk will get enabled for the duration of this call. If
- * that's not acceptable for a specific clk (Eg: the consumer can't handle
- * that, the reparenting is glitchy in hardware, etc), use the
- * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
- *
- * After successfully changing clk's parent clk_set_parent will update the
- * clk topology, sysfs topology and propagate rate recalculation via
- * __clk_recalc_rates.
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
  *
- * Returns 0 on success, -EERROR otherwise.
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
  */
-int clk_set_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+       struct clk_core *core, *parent_core;
+       unsigned int i;
+
+       /* NULL clocks should be nops, so return success if either is NULL. */
+       if (!clk || !parent)
+               return true;
+
+       core = clk->core;
+       parent_core = parent->core;
+
+       /* Optimize for the case where the parent is already the parent. */
+       if (core->parent == parent_core)
+               return true;
+
+       for (i = 0; i < core->num_parents; i++)
+               if (strcmp(core->parent_names[i], parent_core->name) == 0)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(clk_has_parent);
+
+static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
 {
        int ret = 0;
        int p_index = 0;
@@ -1728,6 +2061,31 @@ out:
 
        return ret;
 }
+
+/**
+ * clk_set_parent - switch the parent of a mux clk
+ * @clk: the mux clk whose input we are switching
+ * @parent: the new input to clk
+ *
+ * Re-parent clk to use parent as its new input source.  If clk is in
+ * prepared state, the clk will get enabled for the duration of this call. If
+ * that's not acceptable for a specific clk (Eg: the consumer can't handle
+ * that, the reparenting is glitchy in hardware, etc), use the
+ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
+ *
+ * After successfully changing clk's parent clk_set_parent will update the
+ * clk topology, sysfs topology and propagate rate recalculation via
+ * __clk_recalc_rates.
+ *
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+}
 EXPORT_SYMBOL_GPL(clk_set_parent);
 
 /**
@@ -1764,13 +2122,13 @@ int clk_set_phase(struct clk *clk, int degrees)
 
        clk_prepare_lock();
 
-       if (!clk->ops->set_phase)
+       if (!clk->core->ops->set_phase)
                goto out_unlock;
 
-       ret = clk->ops->set_phase(clk->hw, degrees);
+       ret = clk->core->ops->set_phase(clk->core->hw, degrees);
 
        if (!ret)
-               clk->phase = degrees;
+               clk->core->phase = degrees;
 
 out_unlock:
        clk_prepare_unlock();
@@ -1778,15 +2136,9 @@ out_unlock:
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(clk_set_phase);
 
-/**
- * clk_get_phase - return the phase shift of a clock signal
- * @clk: clock signal source
- *
- * Returns the phase shift of a clock node in degrees, otherwise returns
- * -EERROR.
- */
-int clk_get_phase(struct clk *clk)
+static int clk_core_get_phase(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -1800,28 +2152,74 @@ int clk_get_phase(struct clk *clk)
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(clk_get_phase);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_phase(clk->core);
+}
+
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+       /* trivial case: identical struct clk's or both NULL */
+       if (p == q)
+               return true;
+
+       /* true if clk->core pointers match. Avoid derefing garbage */
+       if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
+               if (p->core == q->core)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(clk_is_match);
 
 /**
  * __clk_init - initialize the data structures in a struct clk
  * @dev:       device initializing this clk, placeholder for now
  * @clk:       clk being initialized
  *
- * Initializes the lists in struct clk, queries the hardware for the
+ * Initializes the lists in struct clk_core, queries the hardware for the
  * parent and rate and sets them both.
  */
-int __clk_init(struct device *dev, struct clk *clk)
+static int __clk_init(struct device *dev, struct clk *clk_user)
 {
        int i, ret = 0;
-       struct clk *orphan;
+       struct clk_core *orphan;
        struct hlist_node *tmp2;
+       struct clk_core *clk;
+       unsigned long rate;
 
-       if (!clk)
+       if (!clk_user)
                return -EINVAL;
 
+       clk = clk_user->core;
+
        clk_prepare_lock();
 
        /* check to see if a clock with this name is already registered */
-       if (__clk_lookup(clk->name)) {
+       if (clk_core_lookup(clk->name)) {
                pr_debug("%s: clk %s already initialized\n",
                                __func__, clk->name);
                ret = -EEXIST;
@@ -1873,7 +2271,7 @@ int __clk_init(struct device *dev, struct clk *clk)
                clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
                /*
-                * __clk_lookup returns NULL for parents that have not been
+                * clk_core_lookup returns NULL for parents that have not been
                 * clk_init'd; thus any access to clk->parents[] must check
                 * for a NULL pointer.  We can always perform lazy lookups for
                 * missing parents later on.
@@ -1881,7 +2279,7 @@ int __clk_init(struct device *dev, struct clk *clk)
                if (clk->parents)
                        for (i = 0; i < clk->num_parents; i++)
                                clk->parents[i] =
-                                       __clk_lookup(clk->parent_names[i]);
+                                       clk_core_lookup(clk->parent_names[i]);
        }
 
        clk->parent = __clk_init_parent(clk);
@@ -1936,12 +2334,13 @@ int __clk_init(struct device *dev, struct clk *clk)
         * then rate is set to zero.
         */
        if (clk->ops->recalc_rate)
-               clk->rate = clk->ops->recalc_rate(clk->hw,
-                               __clk_get_rate(clk->parent));
+               rate = clk->ops->recalc_rate(clk->hw,
+                               clk_core_get_rate_nolock(clk->parent));
        else if (clk->parent)
-               clk->rate = clk->parent->rate;
+               rate = clk->parent->rate;
        else
-               clk->rate = 0;
+               rate = 0;
+       clk->rate = clk->req_rate = rate;
 
        /*
         * walk the list of orphan clocks and reparent any that are children of
@@ -1951,13 +2350,13 @@ int __clk_init(struct device *dev, struct clk *clk)
                if (orphan->num_parents && orphan->ops->get_parent) {
                        i = orphan->ops->get_parent(orphan->hw);
                        if (!strcmp(clk->name, orphan->parent_names[i]))
-                               __clk_reparent(orphan, clk);
+                               clk_core_reparent(orphan, clk);
                        continue;
                }
 
                for (i = 0; i < orphan->num_parents; i++)
                        if (!strcmp(clk->name, orphan->parent_names[i])) {
-                               __clk_reparent(orphan, clk);
+                               clk_core_reparent(orphan, clk);
                                break;
                        }
         }
@@ -1983,47 +2382,39 @@ out:
        return ret;
 }
 
-/**
- * __clk_register - register a clock and return a cookie.
- *
- * Same as clk_register, except that the .clk field inside hw shall point to a
- * preallocated (generally statically allocated) struct clk. None of the fields
- * of the struct clk need to be initialized.
- *
- * The data pointed to by .init and .clk field shall NOT be marked as init
- * data.
- *
- * __clk_register is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized.  It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements its operations.  Returns 0
- * on success, otherwise an error code.
- */
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+                            const char *con_id)
 {
-       int ret;
        struct clk *clk;
 
-       clk = hw->clk;
-       clk->name = hw->init->name;
-       clk->ops = hw->init->ops;
-       clk->hw = hw;
-       clk->flags = hw->init->flags;
-       clk->parent_names = hw->init->parent_names;
-       clk->num_parents = hw->init->num_parents;
-       if (dev && dev->driver)
-               clk->owner = dev->driver->owner;
-       else
-               clk->owner = NULL;
+       /* This is to allow this function to be chained to others */
+       if (!hw || IS_ERR(hw))
+               return (struct clk *) hw;
 
-       ret = __clk_init(dev, clk);
-       if (ret)
-               return ERR_PTR(ret);
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk)
+               return ERR_PTR(-ENOMEM);
+
+       clk->core = hw->core;
+       clk->dev_id = dev_id;
+       clk->con_id = con_id;
+       clk->max_rate = ULONG_MAX;
+
+       clk_prepare_lock();
+       hlist_add_head(&clk->child_node, &hw->core->clks);
+       clk_prepare_unlock();
 
        return clk;
 }
-EXPORT_SYMBOL_GPL(__clk_register);
+
+void __clk_free_clk(struct clk *clk)
+{
+       clk_prepare_lock();
+       hlist_del(&clk->child_node);
+       clk_prepare_unlock();
+
+       kfree(clk);
+}
 
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
@@ -2039,7 +2430,7 @@ EXPORT_SYMBOL_GPL(__clk_register);
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
-       struct clk *clk;
+       struct clk_core *clk;
 
        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
        if (!clk) {
@@ -2060,7 +2451,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
        clk->hw = hw;
        clk->flags = hw->init->flags;
        clk->num_parents = hw->init->num_parents;
-       hw->clk = clk;
+       hw->core = clk;
 
        /* allocate local copy in case parent_names is __initdata */
        clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
@@ -2084,9 +2475,21 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
                }
        }
 
-       ret = __clk_init(dev, clk);
+       INIT_HLIST_HEAD(&clk->clks);
+
+       hw->clk = __clk_create_clk(hw, NULL, NULL);
+       if (IS_ERR(hw->clk)) {
+               pr_err("%s: could not allocate per-user clk\n", __func__);
+               ret = PTR_ERR(hw->clk);
+               goto fail_parent_names_copy;
+       }
+
+       ret = __clk_init(dev, hw->clk);
        if (!ret)
-               return clk;
+               return hw->clk;
+
+       __clk_free_clk(hw->clk);
+       hw->clk = NULL;
 
 fail_parent_names_copy:
        while (--i >= 0)
@@ -2107,7 +2510,7 @@ EXPORT_SYMBOL_GPL(clk_register);
  */
 static void __clk_release(struct kref *ref)
 {
-       struct clk *clk = container_of(ref, struct clk, ref);
+       struct clk_core *clk = container_of(ref, struct clk_core, ref);
        int i = clk->num_parents;
 
        kfree(clk->parents);
@@ -2165,12 +2568,13 @@ void clk_unregister(struct clk *clk)
        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
                return;
 
-       clk_debug_unregister(clk);
+       clk_debug_unregister(clk->core);
 
        clk_prepare_lock();
 
-       if (clk->ops == &clk_nodrv_ops) {
-               pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
+       if (clk->core->ops == &clk_nodrv_ops) {
+               pr_err("%s: unregistered clock: %s\n", __func__,
+                      clk->core->name);
                return;
        }
        /*
@@ -2178,24 +2582,25 @@ void clk_unregister(struct clk *clk)
         * a reference to this clock.
         */
        flags = clk_enable_lock();
-       clk->ops = &clk_nodrv_ops;
+       clk->core->ops = &clk_nodrv_ops;
        clk_enable_unlock(flags);
 
-       if (!hlist_empty(&clk->children)) {
-               struct clk *child;
+       if (!hlist_empty(&clk->core->children)) {
+               struct clk_core *child;
                struct hlist_node *t;
 
                /* Reparent all children to the orphan list. */
-               hlist_for_each_entry_safe(child, t, &clk->children, child_node)
-                       clk_set_parent(child, NULL);
+               hlist_for_each_entry_safe(child, t, &clk->core->children,
+                                         child_node)
+                       clk_core_set_parent(child, NULL);
        }
 
-       hlist_del_init(&clk->child_node);
+       hlist_del_init(&clk->core->child_node);
 
-       if (clk->prepare_count)
+       if (clk->core->prepare_count)
                pr_warn("%s: unregistering prepared clock: %s\n",
-                                       __func__, clk->name);
-       kref_put(&clk->ref, __clk_release);
+                                       __func__, clk->core->name);
+       kref_put(&clk->core->ref, __clk_release);
 
        clk_prepare_unlock();
 }
@@ -2263,11 +2668,13 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
  */
 int __clk_get(struct clk *clk)
 {
-       if (clk) {
-               if (!try_module_get(clk->owner))
+       struct clk_core *core = !clk ? NULL : clk->core;
+
+       if (core) {
+               if (!try_module_get(core->owner))
                        return 0;
 
-               kref_get(&clk->ref);
+               kref_get(&core->ref);
        }
        return 1;
 }
@@ -2280,11 +2687,20 @@ void __clk_put(struct clk *clk)
                return;
 
        clk_prepare_lock();
-       owner = clk->owner;
-       kref_put(&clk->ref, __clk_release);
+
+       hlist_del(&clk->child_node);
+       if (clk->min_rate > clk->core->req_rate ||
+           clk->max_rate < clk->core->req_rate)
+               clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+       owner = clk->core->owner;
+       kref_put(&clk->core->ref, __clk_release);
+
        clk_prepare_unlock();
 
        module_put(owner);
+
+       kfree(clk);
 }
 
 /***        clk rate change notifiers        ***/
@@ -2339,7 +2755,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 
        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
-       clk->notifier_count++;
+       clk->core->notifier_count++;
 
 out:
        clk_prepare_unlock();
@@ -2376,7 +2792,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
        if (cn->clk == clk) {
                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-               clk->notifier_count--;
+               clk->core->notifier_count--;
 
                /* XXX the notifier code should handle this better */
                if (!cn->notifier_head.head) {
@@ -2506,7 +2922,8 @@ void of_clk_del_provider(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_del_provider);
 
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+                                      const char *dev_id, const char *con_id)
 {
        struct of_clk_provider *provider;
        struct clk *clk = ERR_PTR(-EPROBE_DEFER);
@@ -2515,8 +2932,17 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
        list_for_each_entry(provider, &of_clk_providers, link) {
                if (provider->node == clkspec->np)
                        clk = provider->get(clkspec, provider->data);
-               if (!IS_ERR(clk))
+               if (!IS_ERR(clk)) {
+                       clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
+                                              con_id);
+
+                       if (!IS_ERR(clk) && !__clk_get(clk)) {
+                               __clk_free_clk(clk);
+                               clk = ERR_PTR(-ENOENT);
+                       }
+
                        break;
+               }
        }
 
        return clk;
@@ -2527,7 +2953,7 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
        struct clk *clk;
 
        mutex_lock(&of_clk_mutex);
-       clk = __of_clk_get_from_provider(clkspec);
+       clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
        mutex_unlock(&of_clk_mutex);
 
        return clk;
index c798138f023f6ae6a38c252c0946135a82814eba..ba845408cc3e8515ef8c9457f686727b57027185 100644 (file)
@@ -9,9 +9,31 @@
  * published by the Free Software Foundation.
  */
 
+struct clk_hw;
+
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec);
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+                                      const char *dev_id, const char *con_id);
 void of_clk_lock(void);
 void of_clk_unlock(void);
 #endif
+
+#ifdef CONFIG_COMMON_CLK
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+                            const char *con_id);
+void __clk_free_clk(struct clk *clk);
+#else
+/* All these casts to avoid ifdefs in clkdev... */
+static inline struct clk *
+__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
+{
+       return (struct clk *)hw;
+}
+static inline void __clk_free_clk(struct clk *clk) { }
+static struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+       return (struct clk_hw *)clk;
+}
+
+#endif
index da4bda8b7fc7e99d24598b041978d5c19bfa52f6..043fd3633373982f0408de1618370c6d63d5887b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mutex.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/of.h>
 
 #include "clk.h"
@@ -28,6 +29,20 @@ static DEFINE_MUTEX(clocks_mutex);
 
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 
+static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
+                                        const char *dev_id, const char *con_id)
+{
+       struct clk *clk;
+
+       if (!clkspec)
+               return ERR_PTR(-EINVAL);
+
+       of_clk_lock();
+       clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
+       of_clk_unlock();
+       return clk;
+}
+
 /**
  * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
  * @clkspec: pointer to a clock specifier data structure
@@ -38,22 +53,11 @@ static DEFINE_MUTEX(clocks_mutex);
  */
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
 {
-       struct clk *clk;
-
-       if (!clkspec)
-               return ERR_PTR(-EINVAL);
-
-       of_clk_lock();
-       clk = __of_clk_get_from_provider(clkspec);
-
-       if (!IS_ERR(clk) && !__clk_get(clk))
-               clk = ERR_PTR(-ENOENT);
-
-       of_clk_unlock();
-       return clk;
+       return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
 }
 
-struct clk *of_clk_get(struct device_node *np, int index)
+static struct clk *__of_clk_get(struct device_node *np, int index,
+                              const char *dev_id, const char *con_id)
 {
        struct of_phandle_args clkspec;
        struct clk *clk;
@@ -67,22 +71,21 @@ struct clk *of_clk_get(struct device_node *np, int index)
        if (rc)
                return ERR_PTR(rc);
 
-       clk = of_clk_get_by_clkspec(&clkspec);
+       clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
        of_node_put(clkspec.np);
+
        return clk;
 }
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+       return __of_clk_get(np, index, np->full_name, NULL);
+}
 EXPORT_SYMBOL(of_clk_get);
 
-/**
- * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
- * @np: pointer to clock consumer node
- * @name: name of consumer's clock input, or NULL for the first clock reference
- *
- * This function parses the clocks and clock-names properties,
- * and uses them to look up the struct clk from the registered list of clock
- * providers.
- */
-struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+                                       const char *dev_id,
+                                       const char *name)
 {
        struct clk *clk = ERR_PTR(-ENOENT);
 
@@ -97,10 +100,10 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
                 */
                if (name)
                        index = of_property_match_string(np, "clock-names", name);
-               clk = of_clk_get(np, index);
-               if (!IS_ERR(clk))
+               clk = __of_clk_get(np, index, dev_id, name);
+               if (!IS_ERR(clk)) {
                        break;
-               else if (name && index >= 0) {
+               else if (name && index >= 0) {
                        if (PTR_ERR(clk) != -EPROBE_DEFER)
                                pr_err("ERROR: could not get clock %s:%s(%i)\n",
                                        np->full_name, name ? name : "", index);
@@ -119,7 +122,33 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
 
        return clk;
 }
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+       if (!np)
+               return ERR_PTR(-ENOENT);
+
+       return __of_clk_get_by_name(np, np->full_name, name);
+}
 EXPORT_SYMBOL(of_clk_get_by_name);
+
+#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
+
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+                                       const char *dev_id,
+                                       const char *name)
+{
+       return ERR_PTR(-ENOENT);
+}
 #endif
 
 /*
@@ -168,14 +197,28 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 {
        struct clk_lookup *cl;
+       struct clk *clk = NULL;
 
        mutex_lock(&clocks_mutex);
+
        cl = clk_find(dev_id, con_id);
-       if (cl && !__clk_get(cl->clk))
+       if (!cl)
+               goto out;
+
+       clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
+       if (IS_ERR(clk))
+               goto out;
+
+       if (!__clk_get(clk)) {
+               __clk_free_clk(clk);
                cl = NULL;
+               goto out;
+       }
+
+out:
        mutex_unlock(&clocks_mutex);
 
-       return cl ? cl->clk : ERR_PTR(-ENOENT);
+       return cl ? clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
@@ -185,10 +228,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
        struct clk *clk;
 
        if (dev) {
-               clk = of_clk_get_by_name(dev->of_node, con_id);
-               if (!IS_ERR(clk))
-                       return clk;
-               if (PTR_ERR(clk) == -EPROBE_DEFER)
+               clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
+               if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
                        return clk;
        }
 
@@ -331,6 +372,7 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
 
        return 0;
 }
+EXPORT_SYMBOL(clk_register_clkdev);
 
 /**
  * clk_register_clkdevs - register a set of clk_lookup for a struct clk
index 007144f81f50b63301f211103dd200f7f21a4ec4..2e4f6d432bebeb21b6c543494764320407b9a488 100644 (file)
@@ -295,6 +295,8 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
 }
 
 static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p)
 {
index 48fa53c7ce5e0dbf4cfb7f5eb30abe0104015ef8..de6a873175d2b833f64d95f8e2bf5729da6fd1b4 100644 (file)
@@ -202,6 +202,8 @@ error:
 }
 
 static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
index 38e9153446059a1fb0c0728c841517cf99747f40..38e37bf6b821d8c6207c73179c36f98977933578 100644 (file)
@@ -1,3 +1,4 @@
 obj-y                          += clk-pxa.o
 obj-$(CONFIG_PXA25x)           += clk-pxa25x.o
 obj-$(CONFIG_PXA27x)           += clk-pxa27x.o
+obj-$(CONFIG_PXA3xx)           += clk-pxa3xx.o
index 4e834753ab094500677811702c37ad005503929b..29cee9e8d4d91bf34312207b7016e2f05c7d2746 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw,
                fix = &pclk->lp;
        else
                fix = &pclk->hp;
-       fix->hw.clk = hw->clk;
+       __clk_hw_set_clk(&fix->hw, hw);
        return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
 }
 
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
new file mode 100644 (file)
index 0000000..39f891b
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Marvell PXA3xxx family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
+ * should go away.
+ */
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <mach/smemc.h>
+#include <mach/pxa3xx-regs.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+       PXA_CORE_60Mhz = 0,
+       PXA_CORE_RUN,
+       PXA_CORE_TURBO,
+};
+
+enum {
+       PXA_BUS_60Mhz = 0,
+       PXA_BUS_HSS,
+};
+
+/* crystal frequency to HSIO bus frequency multiplier (HSS) */
+static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
+
+/* crystal frequency to static memory controller multiplier (SMCFS) */
+static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
+static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
+
+static const char * const get_freq_khz[] = {
+       "core", "ring_osc_60mhz", "run", "cpll", "system_bus"
+};
+
+/*
+ * Get the clock frequency as reflected by ACSR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa3xx_get_clk_frequency_khz(int info)
+{
+       struct clk *clk;
+       unsigned long clks[5];
+       int i;
+
+       for (i = 0; i < 5; i++) {
+               clk = clk_get(NULL, get_freq_khz[i]);
+               if (IS_ERR(clk)) {
+                       clks[i] = 0;
+               } else {
+                       clks[i] = clk_get_rate(clk);
+                       clk_put(clk);
+               }
+       }
+       if (info) {
+               pr_info("RO Mode clock: %ld.%02ldMHz\n",
+                       clks[1] / 1000000, (clks[0] % 1000000) / 10000);
+               pr_info("Run Mode clock: %ld.%02ldMHz\n",
+                       clks[2] / 1000000, (clks[1] % 1000000) / 10000);
+               pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+                       clks[3] / 1000000, (clks[2] % 1000000) / 10000);
+               pr_info("System bus clock: %ld.%02ldMHz\n",
+                       clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+       }
+       return (unsigned int)clks[0];
+}
+
+static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long ac97_div, rate;
+
+       ac97_div = AC97_DIV;
+
+       /* This may loose precision for some rates but won't for the
+        * standard 24.576MHz.
+        */
+       rate = parent_rate / 2;
+       rate /= ((ac97_div >> 12) & 0x7fff);
+       rate *= (ac97_div & 0xfff);
+
+       return rate;
+}
+PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
+
+static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+
+       return (parent_rate / 48)  * smcfs_mult[(acsr >> 23) & 0x7] /
+               df_clkdiv[(memclkcfg >> 16) & 0x3];
+}
+PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
+
+static bool pxa3xx_is_ring_osc_forced(void)
+{
+       unsigned long acsr = ACSR;
+
+       return acsr & ACCR_D0CS;
+}
+
+PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" };
+PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" };
+PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" };
+PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
+PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
+PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
+
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
+                   div_hp, bit, is_lp, flags)                          \
+       PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
+                mult_hp, div_hp, is_lp,  CKEN_AB(bit),                 \
+                (CKEN_ ## bit % 32), flags)
+#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp,         \
+                        mult_hp, div_hp, delay)                        \
+       PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp,       \
+                   div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0)
+#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents)                        \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      CKEN_AB(bit), (CKEN_ ## bit % 32), 0)
+
+static struct desc_clk_cken pxa3xx_clocks[] __initdata = {
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5),
+       PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0),
+
+       PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
+                         pxa3xx_32Khz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents),
+
+       PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4,
+                   1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED),
+};
+
+static struct desc_clk_cken pxa300_310_clocks[] __initdata = {
+
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+       PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa320_clocks[] __initdata = {
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa93x_clocks[] __initdata = {
+
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+       PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
+                                           unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int hss = (acsr >> 14) & 0x3;
+
+       if (pxa3xx_is_ring_osc_forced())
+               return parent_rate;
+       return parent_rate / 48 * hss_mult[hss];
+}
+
+static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw)
+{
+       if (pxa3xx_is_ring_osc_forced())
+               return PXA_BUS_60Mhz;
+       else
+               return PXA_BUS_HSS;
+}
+
+PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus");
+
+static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       return parent_rate;
+}
+
+static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw)
+{
+       unsigned long xclkcfg;
+       unsigned int t;
+
+       if (pxa3xx_is_ring_osc_forced())
+               return PXA_CORE_60Mhz;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       if (t)
+               return PXA_CORE_TURBO;
+       return PXA_CORE_RUN;
+}
+PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
+
+static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+       unsigned int t, xclkcfg;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       return t ? (parent_rate / xn) * 2 : parent_rate;
+}
+PARENTS(clk_pxa3xx_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa3xx_run, "run");
+
+static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+       unsigned int xl = acsr & ACCR_XL_MASK;
+       unsigned int t, xclkcfg;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn);
+       return t ? parent_rate * xl * xn : parent_rate * xl;
+}
+PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" };
+RATE_RO_OPS(clk_pxa3xx_cpll, "cpll");
+
+static void __init pxa3xx_register_core(void)
+{
+       clk_register_clk_pxa3xx_cpll();
+       clk_register_clk_pxa3xx_run();
+
+       clkdev_pxa_register(CLK_CORE, "core", NULL,
+                           clk_register_clk_pxa3xx_core());
+}
+
+static void __init pxa3xx_register_plls(void)
+{
+       clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               13 * MHz);
+       clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               32768);
+       clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               120 * MHz);
+       clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+       clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
+       clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
+                                 0, 1, 2);
+}
+
+#define DUMMY_CLK(_con_id, _dev_id, _parent) \
+       { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
+struct dummy_clk {
+       const char *con_id;
+       const char *dev_id;
+       const char *parent;
+};
+static struct dummy_clk dummy_clks[] __initdata = {
+       DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"),
+       DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
+       DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
+       DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"),
+};
+
+static void __init pxa3xx_dummy_clocks_init(void)
+{
+       struct clk *clk;
+       struct dummy_clk *d;
+       const char *name;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
+               d = &dummy_clks[i];
+               name = d->dev_id ? d->dev_id : d->con_id;
+               clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
+               clk_register_clkdev(clk, d->con_id, d->dev_id);
+       }
+}
+
+static void __init pxa3xx_base_clocks_init(void)
+{
+       pxa3xx_register_plls();
+       pxa3xx_register_core();
+       clk_register_clk_pxa3xx_system_bus();
+       clk_register_clk_pxa3xx_ac97();
+       clk_register_clk_pxa3xx_smemc();
+       clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
+                         (void __iomem *)&OSCC, 11, 0, NULL);
+}
+
+int __init pxa3xx_clocks_init(void)
+{
+       int ret;
+
+       pxa3xx_base_clocks_init();
+       pxa3xx_dummy_clocks_init();
+       ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+       if (ret)
+               return ret;
+       if (cpu_is_pxa320())
+               return clk_pxa_cken_init(pxa320_clocks,
+                                        ARRAY_SIZE(pxa320_clocks));
+       if (cpu_is_pxa300() || cpu_is_pxa310())
+               return clk_pxa_cken_init(pxa300_310_clocks,
+                                        ARRAY_SIZE(pxa300_310_clocks));
+       return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+}
+
+static void __init pxa3xx_dt_clocks_init(struct device_node *np)
+{
+       pxa3xx_clocks_init();
+       clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
index 1107351ed34682e7cd8fa04ffcd61c0e4bc146f9..0d7ab52b7ab0076fec09340410c09bca0efe3b4f 100644 (file)
@@ -29,6 +29,15 @@ config IPQ_GCC_806X
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, etc.
 
+config IPQ_LCC_806X
+       tristate "IPQ806x LPASS Clock Controller"
+       select IPQ_GCC_806X
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the LPASS clock controller on ipq806x devices.
+         Say Y if you want to use audio devices such as i2s, pcm,
+         S/PDIF, etc.
+
 config MSM_GCC_8660
        tristate "MSM8660 Global Clock Controller"
        depends on COMMON_CLK_QCOM
@@ -45,6 +54,15 @@ config MSM_GCC_8960
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, SATA, PCIe, etc.
 
+config MSM_LCC_8960
+       tristate "APQ8064/MSM8960 LPASS Clock Controller"
+       select MSM_GCC_8960
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the LPASS clock controller on apq8064/msm8960 devices.
+         Say Y if you want to use audio devices such as i2s, pcm,
+         SLIMBus, etc.
+
 config MSM_MMCC_8960
        tristate "MSM8960 Multimedia Clock Controller"
        select MSM_GCC_8960
index 783cfb24faa41cbbac81e676d5ed129733529aac..61782646959534bec2b19d445762abbec7fb1591 100644 (file)
@@ -6,13 +6,17 @@ clk-qcom-y += clk-pll.o
 clk-qcom-y += clk-rcg.o
 clk-qcom-y += clk-rcg2.o
 clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
 clk-qcom-y += reset.o
 
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
index 60873a7f45d94b3687bb342b86dd7bbfb9db67df..b4325f65a1bf6f225811936c9a00927391c62787 100644 (file)
@@ -141,6 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
 
 static long
 clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                      unsigned long min_rate, unsigned long max_rate,
                       unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_pll *pll = to_clk_pll(hw);
index 0b93972c8807f11ef5e27bf6eb9f14e85b9f1055..0039bd7d3965370108bba619abdf40009495e693 100644 (file)
@@ -368,6 +368,7 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
                const struct freq_tbl *f, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p_hw)
 {
        unsigned long clk_flags;
@@ -397,22 +398,27 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 }
 
 static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
 
-       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+                       max_rate, p_rate, p);
 }
 
 static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
 
-       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+                       max_rate, p_rate, p);
 }
 
 static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
index 08b8b3729f539ee769f15d1f474e8c3718d1e640..742acfa18d63798c19c25884ef2b50d508965858 100644 (file)
@@ -208,6 +208,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 }
 
 static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -361,6 +362,8 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
 }
 
 static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -412,6 +415,7 @@ const struct clk_ops clk_edp_pixel_ops = {
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
 static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
+                        unsigned long min_rate, unsigned long max_rate,
                         unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -476,6 +480,8 @@ static const struct frac_entry frac_table_pixel[] = {
 };
 
 static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
new file mode 100644 (file)
index 0000000..5348491
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-divider.h"
+
+static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
+{
+       return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
+}
+
+static long div_round_rate(struct clk_hw *hw, unsigned long rate,
+                          unsigned long *prate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+
+       return divider_round_rate(hw, rate, prate, NULL, divider->width,
+                                 CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int div_set_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long parent_rate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+       struct clk_regmap *clkr = &divider->clkr;
+       u32 div;
+
+       div = divider_get_val(rate, parent_rate, NULL, divider->width,
+                             CLK_DIVIDER_ROUND_CLOSEST);
+
+       return regmap_update_bits(clkr->regmap, divider->reg,
+                                 (BIT(divider->width) - 1) << divider->shift,
+                                 div << divider->shift);
+}
+
+static unsigned long div_recalc_rate(struct clk_hw *hw,
+                                    unsigned long parent_rate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+       struct clk_regmap *clkr = &divider->clkr;
+       u32 div;
+
+       regmap_read(clkr->regmap, divider->reg, &div);
+       div >>= divider->shift;
+       div &= BIT(divider->width) - 1;
+
+       return divider_recalc_rate(hw, parent_rate, div, NULL,
+                                  CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+const struct clk_ops clk_regmap_div_ops = {
+       .round_rate = div_round_rate,
+       .set_rate = div_set_rate,
+       .recalc_rate = div_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
new file mode 100644 (file)
index 0000000..fc4492e
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
+#define __QCOM_CLK_REGMAP_DIVIDER_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_div {
+       u32                     reg;
+       u32                     shift;
+       u32                     width;
+       struct clk_regmap       clkr;
+};
+
+extern const struct clk_ops clk_regmap_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
new file mode 100644 (file)
index 0000000..cae3071
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-mux.h"
+
+static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw)
+{
+       return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr);
+}
+
+static u8 mux_get_parent(struct clk_hw *hw)
+{
+       struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+       struct clk_regmap *clkr = to_clk_regmap(hw);
+       unsigned int mask = GENMASK(mux->width - 1, 0);
+       unsigned int val;
+
+       regmap_read(clkr->regmap, mux->reg, &val);
+
+       val >>= mux->shift;
+       val &= mask;
+
+       return val;
+}
+
+static int mux_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+       struct clk_regmap *clkr = to_clk_regmap(hw);
+       unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+       unsigned int val;
+
+       val = index;
+       val <<= mux->shift;
+
+       return regmap_update_bits(clkr->regmap, mux->reg, mask, val);
+}
+
+const struct clk_ops clk_regmap_mux_closest_ops = {
+       .get_parent = mux_get_parent,
+       .set_parent = mux_set_parent,
+       .determine_rate = __clk_mux_determine_rate_closest,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h
new file mode 100644 (file)
index 0000000..5cec761
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_H__
+#define __QCOM_CLK_REGMAP_MUX_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_mux {
+       u32                     reg;
+       u32                     shift;
+       u32                     width;
+       struct clk_regmap       clkr;
+};
+
+extern const struct clk_ops clk_regmap_mux_closest_ops;
+
+#endif
index afed5eb0691e101c246275b1b6a80d44de9d1eed..cbdc31dea7f4311d091b938959179b3dd97e0fbe 100644 (file)
@@ -75,6 +75,17 @@ static struct clk_pll pll3 = {
        },
 };
 
+static struct clk_regmap pll4_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(4),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll4_vote",
+               .parent_names = (const char *[]){ "pll4" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
 static struct clk_pll pll8 = {
        .l_reg = 0x3144,
        .m_reg = 0x3148,
@@ -2163,6 +2174,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL0] = &pll0.clkr,
        [PLL0_VOTE] = &pll0_vote,
        [PLL3] = &pll3.clkr,
+       [PLL4_VOTE] = &pll4_vote,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
index b0b562b9ce0e0cc16949f588045b121cfc7d8046..e60feffc10a151dd77d291c71a81e8cb73234804 100644 (file)
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = {
        },
 };
 
+static struct clk_regmap pll4_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(4),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll4_vote",
+               .parent_names = (const char *[]){ "pll4" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
 static struct clk_pll pll8 = {
        .l_reg = 0x3144,
        .m_reg = 0x3148,
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = {
 
 static struct clk_regmap *gcc_msm8960_clks[] = {
        [PLL3] = &pll3.clkr,
+       [PLL4_VOTE] = &pll4_vote,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = {
 
 static struct clk_regmap *gcc_apq8064_clks[] = {
        [PLL3] = &pll3.clkr,
+       [PLL4_VOTE] = &pll4_vote,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
new file mode 100644 (file)
index 0000000..c9ff27b
--- /dev/null
@@ -0,0 +1,472 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+       .l_reg = 0x4,
+       .m_reg = 0x8,
+       .n_reg = 0xc,
+       .config_reg = 0x14,
+       .mode_reg = 0x0,
+       .status_reg = 0x18,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll4",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static const struct pll_config pll4_config = {
+       .l = 0xf,
+       .m = 0x91,
+       .n = 0xc7,
+       .vco_val = 0x0,
+       .vco_mask = BIT(17) | BIT(16),
+       .pre_div_val = 0x0,
+       .pre_div_mask = BIT(19),
+       .post_div_val = 0x0,
+       .post_div_mask = BIT(21) | BIT(20),
+       .mn_ena_mask = BIT(22),
+       .main_output_mask = BIT(23),
+};
+
+#define P_PXO  0
+#define P_PLL4 1
+
+static const u8 lcc_pxo_pll4_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL4]        = 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+       "pxo",
+       "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_mi2s[] = {
+       {  1024000, P_PLL4, 4,  1,  96 },
+       {  1411200, P_PLL4, 4,  2, 139 },
+       {  1536000, P_PLL4, 4,  1,  64 },
+       {  2048000, P_PLL4, 4,  1,  48 },
+       {  2116800, P_PLL4, 4,  2,  93 },
+       {  2304000, P_PLL4, 4,  2,  85 },
+       {  2822400, P_PLL4, 4,  6, 209 },
+       {  3072000, P_PLL4, 4,  1,  32 },
+       {  3175200, P_PLL4, 4,  1,  31 },
+       {  4096000, P_PLL4, 4,  1,  24 },
+       {  4233600, P_PLL4, 4,  9, 209 },
+       {  4608000, P_PLL4, 4,  3,  64 },
+       {  5644800, P_PLL4, 4, 12, 209 },
+       {  6144000, P_PLL4, 4,  1,  16 },
+       {  6350400, P_PLL4, 4,  2,  31 },
+       {  8192000, P_PLL4, 4,  1,  12 },
+       {  8467200, P_PLL4, 4, 18, 209 },
+       {  9216000, P_PLL4, 4,  3,  32 },
+       { 11289600, P_PLL4, 4, 24, 209 },
+       { 12288000, P_PLL4, 4,  1,   8 },
+       { 12700800, P_PLL4, 4, 27, 209 },
+       { 13824000, P_PLL4, 4,  9,  64 },
+       { 16384000, P_PLL4, 4,  1,   6 },
+       { 16934400, P_PLL4, 4, 41, 238 },
+       { 18432000, P_PLL4, 4,  3,  16 },
+       { 22579200, P_PLL4, 2, 24, 209 },
+       { 24576000, P_PLL4, 4,  1,   4 },
+       { 27648000, P_PLL4, 4,  9,  32 },
+       { 33868800, P_PLL4, 4, 41, 119 },
+       { 36864000, P_PLL4, 4,  3,   8 },
+       { 45158400, P_PLL4, 1, 24, 209 },
+       { 49152000, P_PLL4, 4,  1,   2 },
+       { 50803200, P_PLL4, 1, 27, 209 },
+       { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+       .ns_reg = 0x48,
+       .md_reg = 0x4c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_mi2s,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_mi2s_parents[] = {
+       "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+       .reg = 0x48,
+       .shift = 10,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_div_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_div_clk",
+                       .parent_names = (const char *[]){ "mi2s_div_clk" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+       .reg = 0x48,
+       .shift = 14,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_clk",
+                       .parent_names = (const char *[]){
+                               "mi2s_bit_div_clk",
+                               "mi2s_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_pcm[] = {
+       {   64000, P_PLL4, 4, 1, 1536 },
+       {  128000, P_PLL4, 4, 1,  768 },
+       {  256000, P_PLL4, 4, 1,  384 },
+       {  512000, P_PLL4, 4, 1,  192 },
+       { 1024000, P_PLL4, 4, 1,   96 },
+       { 2048000, P_PLL4, 4, 1,   48 },
+       { },
+};
+
+static struct clk_rcg pcm_src = {
+       .ns_reg = 0x54,
+       .md_reg = 0x58,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_pcm,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcm_clk_out = {
+       .halt_reg = 0x5c,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk_out",
+                       .parent_names = (const char *[]){ "pcm_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+       .reg = 0x54,
+       .shift = 10,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk",
+                       .parent_names = (const char *[]){
+                               "pcm_clk_out",
+                               "pcm_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_aif_osr[] = {
+       {  22050, P_PLL4, 1, 147, 20480 },
+       {  32000, P_PLL4, 1,   1,    96 },
+       {  44100, P_PLL4, 1, 147, 10240 },
+       {  48000, P_PLL4, 1,   1,    64 },
+       {  88200, P_PLL4, 1, 147,  5120 },
+       {  96000, P_PLL4, 1,   1,    32 },
+       { 176400, P_PLL4, 1, 147,  2560 },
+       { 192000, P_PLL4, 1,   1,    16 },
+       { },
+};
+
+static struct clk_rcg spdif_src = {
+       .ns_reg = 0xcc,
+       .md_reg = 0xd0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "spdif_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_spdif_parents[] = {
+       "spdif_src",
+};
+
+static struct clk_branch spdif_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "spdif_clk",
+                       .parent_names = lcc_spdif_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_ahbix[] = {
+       { 131072, P_PLL4, 1, 1, 3 },
+       { },
+};
+
+static struct clk_rcg ahbix_clk = {
+       .ns_reg = 0x38,
+       .md_reg = 0x3c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_ahbix,
+       .clkr = {
+               .enable_reg = 0x38,
+               .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
+               .hw.init = &(struct clk_init_data){
+                       .name = "ahbix",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_regmap *lcc_ipq806x_clks[] = {
+       [PLL4] = &pll4.clkr,
+       [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+       [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+       [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+       [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+       [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+       [PCM_SRC] = &pcm_src.clkr,
+       [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+       [PCM_CLK] = &pcm_clk.clkr,
+       [SPDIF_SRC] = &spdif_src.clkr,
+       [SPDIF_CLK] = &spdif_clk.clkr,
+       [AHBIX_CLK] = &ahbix_clk.clkr,
+};
+
+static const struct regmap_config lcc_ipq806x_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xfc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc lcc_ipq806x_desc = {
+       .config = &lcc_ipq806x_regmap_config,
+       .clks = lcc_ipq806x_clks,
+       .num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
+};
+
+static const struct of_device_id lcc_ipq806x_match_table[] = {
+       { .compatible = "qcom,lcc-ipq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table);
+
+static int lcc_ipq806x_probe(struct platform_device *pdev)
+{
+       u32 val;
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Configure the rate of PLL4 if the bootloader hasn't already */
+       val = regmap_read(regmap, 0x0, &val);
+       if (!val)
+               clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+
+       return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
+}
+
+static int lcc_ipq806x_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver lcc_ipq806x_driver = {
+       .probe          = lcc_ipq806x_probe,
+       .remove         = lcc_ipq806x_remove,
+       .driver         = {
+               .name   = "lcc-ipq806x",
+               .of_match_table = lcc_ipq806x_match_table,
+       },
+};
+module_platform_driver(lcc_ipq806x_driver);
+
+MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-ipq806x");
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
new file mode 100644 (file)
index 0000000..e2c8632
--- /dev/null
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-msm8960.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+       .l_reg = 0x4,
+       .m_reg = 0x8,
+       .n_reg = 0xc,
+       .config_reg = 0x14,
+       .mode_reg = 0x0,
+       .status_reg = 0x18,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll4",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+#define P_PXO  0
+#define P_PLL4 1
+
+static const u8 lcc_pxo_pll4_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL4]        = 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+       "pxo",
+       "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_osr_492[] = {
+       {   512000, P_PLL4, 4, 1, 240 },
+       {   768000, P_PLL4, 4, 1, 160 },
+       {  1024000, P_PLL4, 4, 1, 120 },
+       {  1536000, P_PLL4, 4, 1,  80 },
+       {  2048000, P_PLL4, 4, 1,  60 },
+       {  3072000, P_PLL4, 4, 1,  40 },
+       {  4096000, P_PLL4, 4, 1,  30 },
+       {  6144000, P_PLL4, 4, 1,  20 },
+       {  8192000, P_PLL4, 4, 1,  15 },
+       { 12288000, P_PLL4, 4, 1,  10 },
+       { 24576000, P_PLL4, 4, 1,   5 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct freq_tbl clk_tbl_aif_osr_393[] = {
+       {   512000, P_PLL4, 4, 1, 192 },
+       {   768000, P_PLL4, 4, 1, 128 },
+       {  1024000, P_PLL4, 4, 1,  96 },
+       {  1536000, P_PLL4, 4, 1,  64 },
+       {  2048000, P_PLL4, 4, 1,  48 },
+       {  3072000, P_PLL4, 4, 1,  32 },
+       {  4096000, P_PLL4, 4, 1,  24 },
+       {  6144000, P_PLL4, 4, 1,  16 },
+       {  8192000, P_PLL4, 4, 1,  12 },
+       { 12288000, P_PLL4, 4, 1,   8 },
+       { 24576000, P_PLL4, 4, 1,   4 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+       .ns_reg = 0x48,
+       .md_reg = 0x4c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr_393,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_mi2s_parents[] = {
+       "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+       .reg = 0x48,
+       .shift = 10,
+       .width = 4,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_div_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_div_clk",
+                       .parent_names = (const char *[]){ "mi2s_div_clk" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+       .reg = 0x48,
+       .shift = 14,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_clk",
+                       .parent_names = (const char *[]){
+                               "mi2s_bit_div_clk",
+                               "mi2s_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr)                  \
+static struct clk_rcg prefix##_osr_src = {                     \
+       .ns_reg = _ns,                                          \
+       .md_reg = _md,                                          \
+       .mn = {                                                 \
+               .mnctr_en_bit = 8,                              \
+               .mnctr_reset_bit = 7,                           \
+               .mnctr_mode_shift = 5,                          \
+               .n_val_shift = 24,                              \
+               .m_val_shift = 8,                               \
+               .width = 8,                                     \
+       },                                                      \
+       .p = {                                                  \
+               .pre_div_shift = 3,                             \
+               .pre_div_width = 2,                             \
+       },                                                      \
+       .s = {                                                  \
+               .src_sel_shift = 0,                             \
+               .parent_map = lcc_pxo_pll4_map,                 \
+       },                                                      \
+       .freq_tbl = clk_tbl_aif_osr_393,                        \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(9),                          \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_osr_src",             \
+                       .parent_names = lcc_pxo_pll4,           \
+                       .num_parents = 2,                       \
+                       .ops = &clk_rcg_ops,                    \
+                       .flags = CLK_SET_RATE_GATE,             \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static const char *lcc_##prefix##_parents[] = {                        \
+       #prefix "_osr_src",                                     \
+};                                                             \
+                                                               \
+static struct clk_branch prefix##_osr_clk = {                  \
+       .halt_reg = hr,                                         \
+       .halt_bit = 1,                                          \
+       .halt_check = BRANCH_HALT_ENABLE,                       \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(21),                         \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_osr_clk",             \
+                       .parent_names = lcc_##prefix##_parents, \
+                       .num_parents = 1,                       \
+                       .ops = &clk_branch_ops,                 \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_regmap_div prefix##_div_clk = {              \
+       .reg = _ns,                                             \
+       .shift = 10,                                            \
+       .width = 8,                                             \
+       .clkr = {                                               \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_div_clk",             \
+                       .parent_names = lcc_##prefix##_parents, \
+                       .num_parents = 1,                       \
+                       .ops = &clk_regmap_div_ops,             \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_branch prefix##_bit_div_clk = {              \
+       .halt_reg = hr,                                         \
+       .halt_bit = 0,                                          \
+       .halt_check = BRANCH_HALT_ENABLE,                       \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(19),                         \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_bit_div_clk",         \
+                       .parent_names = (const char *[]){       \
+                               #prefix "_div_clk"              \
+                       },                                      \
+                       .num_parents = 1,                       \
+                       .ops = &clk_branch_ops,                 \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_regmap_mux prefix##_bit_clk = {              \
+       .reg = _ns,                                             \
+       .shift = 18,                                            \
+       .width = 1,                                             \
+       .clkr = {                                               \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_bit_clk",             \
+                       .parent_names = (const char *[]){       \
+                               #prefix "_bit_div_clk",         \
+                               #prefix "_codec_clk",           \
+                       },                                      \
+                       .num_parents = 2,                       \
+                       .ops = &clk_regmap_mux_closest_ops,     \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+}
+
+CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
+CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
+CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
+CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
+
+static struct freq_tbl clk_tbl_pcm_492[] = {
+       {   256000, P_PLL4, 4, 1, 480 },
+       {   512000, P_PLL4, 4, 1, 240 },
+       {   768000, P_PLL4, 4, 1, 160 },
+       {  1024000, P_PLL4, 4, 1, 120 },
+       {  1536000, P_PLL4, 4, 1,  80 },
+       {  2048000, P_PLL4, 4, 1,  60 },
+       {  3072000, P_PLL4, 4, 1,  40 },
+       {  4096000, P_PLL4, 4, 1,  30 },
+       {  6144000, P_PLL4, 4, 1,  20 },
+       {  8192000, P_PLL4, 4, 1,  15 },
+       { 12288000, P_PLL4, 4, 1,  10 },
+       { 24576000, P_PLL4, 4, 1,   5 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct freq_tbl clk_tbl_pcm_393[] = {
+       {   256000, P_PLL4, 4, 1, 384 },
+       {   512000, P_PLL4, 4, 1, 192 },
+       {   768000, P_PLL4, 4, 1, 128 },
+       {  1024000, P_PLL4, 4, 1,  96 },
+       {  1536000, P_PLL4, 4, 1,  64 },
+       {  2048000, P_PLL4, 4, 1,  48 },
+       {  3072000, P_PLL4, 4, 1,  32 },
+       {  4096000, P_PLL4, 4, 1,  24 },
+       {  6144000, P_PLL4, 4, 1,  16 },
+       {  8192000, P_PLL4, 4, 1,  12 },
+       { 12288000, P_PLL4, 4, 1,   8 },
+       { 24576000, P_PLL4, 4, 1,   4 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct clk_rcg pcm_src = {
+       .ns_reg = 0x54,
+       .md_reg = 0x58,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_pcm_393,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcm_clk_out = {
+       .halt_reg = 0x5c,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk_out",
+                       .parent_names = (const char *[]){ "pcm_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+       .reg = 0x54,
+       .shift = 10,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk",
+                       .parent_names = (const char *[]){
+                               "pcm_clk_out",
+                               "pcm_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg slimbus_src = {
+       .ns_reg = 0xcc,
+       .md_reg = 0xd0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr_393,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "slimbus_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_slimbus_parents[] = {
+       "slimbus_src",
+};
+
+static struct clk_branch audio_slimbus_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "audio_slimbus_clk",
+                       .parent_names = lcc_slimbus_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sps_slimbus_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sps_slimbus_clk",
+                       .parent_names = lcc_slimbus_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap *lcc_msm8960_clks[] = {
+       [PLL4] = &pll4.clkr,
+       [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+       [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+       [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+       [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+       [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+       [PCM_SRC] = &pcm_src.clkr,
+       [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+       [PCM_CLK] = &pcm_clk.clkr,
+       [SLIMBUS_SRC] = &slimbus_src.clkr,
+       [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
+       [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
+       [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
+       [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
+       [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
+       [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
+       [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
+       [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
+       [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
+       [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
+       [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
+       [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
+       [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
+       [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
+       [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
+       [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
+       [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
+       [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
+       [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
+       [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
+       [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
+       [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
+};
+
+static const struct regmap_config lcc_msm8960_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xfc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc lcc_msm8960_desc = {
+       .config = &lcc_msm8960_regmap_config,
+       .clks = lcc_msm8960_clks,
+       .num_clks = ARRAY_SIZE(lcc_msm8960_clks),
+};
+
+static const struct of_device_id lcc_msm8960_match_table[] = {
+       { .compatible = "qcom,lcc-msm8960" },
+       { .compatible = "qcom,lcc-apq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table);
+
+static int lcc_msm8960_probe(struct platform_device *pdev)
+{
+       u32 val;
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &lcc_msm8960_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Use the correct frequency plan depending on speed of PLL4 */
+       regmap_read(regmap, 0x4, &val);
+       if (val == 0x12) {
+               slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
+               mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               pcm_src.freq_tbl = clk_tbl_pcm_492;
+       }
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+
+       return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
+}
+
+static int lcc_msm8960_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver lcc_msm8960_driver = {
+       .probe          = lcc_msm8960_probe,
+       .remove         = lcc_msm8960_remove,
+       .driver         = {
+               .name   = "lcc-msm8960",
+               .of_match_table = lcc_msm8960_match_table,
+       },
+};
+module_platform_driver(lcc_msm8960_driver);
+
+MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-msm8960");
index cbcddcc02475233f53b2d150f8395a99c3b4ba4d..05d7a0bc059907872ff1719d0c401a0494082199 100644 (file)
@@ -535,44 +535,44 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
                        RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 8, GFLAGS),
-       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
+       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(17), 0,
                        RK3288_CLKGATE_CON(1), 9, GFLAGS),
-       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0,
+       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
        MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
        COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 10, GFLAGS),
-       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0,
+       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(18), 0,
                        RK3288_CLKGATE_CON(1), 11, GFLAGS),
-       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0,
+       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 12, GFLAGS),
-       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0,
+       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(19), 0,
                        RK3288_CLKGATE_CON(1), 13, GFLAGS),
-       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0,
+       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 14, GFLAGS),
-       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0,
+       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(20), 0,
                        RK3288_CLKGATE_CON(1), 15, GFLAGS),
-       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0,
+       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(2), 12, GFLAGS),
-       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0,
+       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(7), 0,
                        RK3288_CLKGATE_CON(2), 13, GFLAGS),
-       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
+       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
 
        COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
@@ -598,7 +598,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(0, "jtag", "ext_jtag", 0,
                        RK3288_CLKGATE_CON(4), 14, GFLAGS),
 
-       COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
+       COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
                        RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
                        RK3288_CLKGATE_CON(5), 14, GFLAGS),
        COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
@@ -704,8 +704,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 
        GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
        GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
-       GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
-       GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
+       GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
+       GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
        GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
 
        /* sclk_gpu gates */
@@ -805,6 +805,20 @@ static int rk3288_clk_suspend(void)
                rk3288_saved_cru_regs[i] =
                                readl_relaxed(rk3288_cru_base + reg_id);
        }
+
+       /*
+        * Switch PLLs other than DPLL (for SDRAM) to slow mode to
+        * avoid crashes on resume. The Mask ROM on the system will
+        * put APLL, CPLL, and GPLL into slow mode at resume time
+        * anyway (which is why we restore them), but we might not
+        * even make it to the Mask ROM if this isn't done at suspend
+        * time.
+        *
+        * NOTE: only APLL truly matters here, but we'll do them all.
+        */
+
+       writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
+
        return 0;
 }
 
@@ -866,6 +880,14 @@ static void __init rk3288_clk_init(struct device_node *np)
                pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
                        __func__, PTR_ERR(clk));
 
+       /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
+       clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+                       __func__, PTR_ERR(clk));
+       else
+               rockchip_clk_add_lookup(clk, PCLK_WDT);
+
        rockchip_clk_register_plls(rk3288_pll_clks,
                                   ARRAY_SIZE(rk3288_pll_clks),
                                   RK3288_GRF_SOC_STATUS1);
index f2c2ccce49bb1ad00f7502e9d4e4236fc6bcbe54..454b02ae486a86917f31614c2c4b016841deefaa 100644 (file)
@@ -82,6 +82,26 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
        {},
 };
 
+static void exynos_audss_clk_teardown(void)
+{
+       int i;
+
+       for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_mux(clk_table[i]);
+       }
+
+       for (; i < EXYNOS_SRP_CLK; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_divider(clk_table[i]);
+       }
+
+       for (; i < clk_data.clk_num; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_gate(clk_table[i]);
+       }
+}
+
 /* register exynos_audss clocks */
 static int exynos_audss_clk_probe(struct platform_device *pdev)
 {
@@ -219,10 +239,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
        return 0;
 
 unregister:
-       for (i = 0; i < clk_data.clk_num; i++) {
-               if (!IS_ERR(clk_table[i]))
-                       clk_unregister(clk_table[i]);
-       }
+       exynos_audss_clk_teardown();
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
@@ -232,18 +249,13 @@ unregister:
 
 static int exynos_audss_clk_remove(struct platform_device *pdev)
 {
-       int i;
-
 #ifdef CONFIG_PM_SLEEP
        unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
 #endif
 
        of_clk_del_provider(pdev->dev.of_node);
 
-       for (i = 0; i < clk_data.clk_num; i++) {
-               if (!IS_ERR(clk_table[i]))
-                       clk_unregister(clk_table[i]);
-       }
+       exynos_audss_clk_teardown();
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
index 6e6cca3920829b5a7dc0685a071389a2d201d780..cc4c348d8a24038007375bcf53ba45a96bcd9ac9 100644 (file)
 #define PWR_CTRL1_USE_CORE1_WFI                        (1 << 1)
 #define PWR_CTRL1_USE_CORE0_WFI                        (1 << 0)
 
-/* list of PLLs to be registered */
-enum exynos3250_plls {
-       apll, mpll, vpll, upll,
-       nr_plls
-};
-
-/* list of PLLs in DMC block to be registered */
-enum exynos3250_dmc_plls {
-       bpll, epll,
-       nr_dmc_plls
-};
-
-static void __iomem *reg_base;
-static void __iomem *dmc_reg_base;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_clk_regs;
-
 static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        SRC_LEFTBUS,
        DIV_LEFTBUS,
@@ -195,43 +174,6 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        PWR_CTRL2,
 };
 
-static int exynos3250_clk_suspend(void)
-{
-       samsung_clk_save(reg_base, exynos3250_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_clk_regs));
-       return 0;
-}
-
-static void exynos3250_clk_resume(void)
-{
-       samsung_clk_restore(reg_base, exynos3250_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos3250_clk_syscore_ops = {
-       .suspend = exynos3250_clk_suspend,
-       .resume = exynos3250_clk_resume,
-};
-
-static void exynos3250_clk_sleep_init(void)
-{
-       exynos3250_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
-                                          ARRAY_SIZE(exynos3250_cmu_clk_regs));
-       if (!exynos3250_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               goto err;
-       }
-
-       register_syscore_ops(&exynos3250_clk_syscore_ops);
-       return;
-err:
-       kfree(exynos3250_clk_regs);
-}
-#else
-static inline void exynos3250_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_vpllsrc_p)          = { "fin_pll", };
 
@@ -782,18 +724,18 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
        { /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = {
-       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-                       APLL_LOCK, APLL_CON0, NULL),
-       [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
-                       MPLL_LOCK, MPLL_CON0, NULL),
-       [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
-                       VPLL_LOCK, VPLL_CON0, NULL),
-       [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
-                       UPLL_LOCK, UPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+                       MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
+                       VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates),
+       PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
+                       UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
 };
 
-static void __init exynos3_core_down_clock(void)
+static void __init exynos3_core_down_clock(void __iomem *reg_base)
 {
        unsigned int tmp;
 
@@ -814,38 +756,31 @@ static void __init exynos3_core_down_clock(void)
        __raw_writel(0x0, reg_base + PWR_CTRL2);
 }
 
+static struct samsung_cmu_info cmu_info __initdata = {
+       .pll_clks               = exynos3250_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos3250_plls),
+       .mux_clks               = mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(mux_clks),
+       .div_clks               = div_clks,
+       .nr_div_clks            = ARRAY_SIZE(div_clks),
+       .gate_clks              = gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(gate_clks),
+       .fixed_factor_clks      = fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(fixed_factor_clks),
+       .nr_clk_ids             = CLK_NR_CLKS,
+       .clk_regs               = exynos3250_cmu_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos3250_cmu_clk_regs),
+};
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
        struct samsung_clk_provider *ctx;
 
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+       ctx = samsung_cmu_register_one(np, &cmu_info);
        if (!ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
-                                         ARRAY_SIZE(fixed_factor_clks));
-
-       exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
-       exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
-       exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
-       exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
-
-       samsung_clk_register_pll(ctx, exynos3250_plls,
-                                       ARRAY_SIZE(exynos3250_plls), reg_base);
-
-       samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
-       samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
-       samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
-
-       exynos3_core_down_clock();
+               return;
 
-       exynos3250_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, ctx);
+       exynos3_core_down_clock(ctx->reg_base);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 
@@ -872,12 +807,6 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 #define EPLL_CON2              0x111c
 #define SRC_EPLL               0x1120
 
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
-
 static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
        BPLL_LOCK,
        BPLL_CON0,
@@ -899,43 +828,6 @@ static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
        SRC_EPLL,
 };
 
-static int exynos3250_dmc_clk_suspend(void)
-{
-       samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-       return 0;
-}
-
-static void exynos3250_dmc_clk_resume(void)
-{
-       samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
-       .suspend = exynos3250_dmc_clk_suspend,
-       .resume = exynos3250_dmc_clk_resume,
-};
-
-static void exynos3250_dmc_clk_sleep_init(void)
-{
-       exynos3250_dmc_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
-                                  ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-       if (!exynos3250_dmc_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               goto err;
-       }
-
-       register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
-       return;
-err:
-       kfree(exynos3250_dmc_clk_regs);
-}
-#else
-static inline void exynos3250_dmc_clk_sleep_init(void) { }
-#endif
-
 PNAME(mout_epll_p)     = { "fin_pll", "fout_epll", };
 PNAME(mout_bpll_p)     = { "fin_pll", "fout_bpll", };
 PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
@@ -977,43 +869,28 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = {
        DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
 };
 
-static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = {
-       [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
-                       BPLL_LOCK, BPLL_CON0, NULL),
-       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-                       EPLL_LOCK, EPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
+};
+
+static struct samsung_cmu_info dmc_cmu_info __initdata = {
+       .pll_clks               = exynos3250_dmc_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos3250_dmc_plls),
+       .mux_clks               = dmc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(dmc_mux_clks),
+       .div_clks               = dmc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(dmc_div_clks),
+       .nr_clk_ids             = NR_CLKS_DMC,
+       .clk_regs               = exynos3250_cmu_dmc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs),
 };
 
 static void __init exynos3250_cmu_dmc_init(struct device_node *np)
 {
-       struct samsung_clk_provider *ctx;
-
-       dmc_reg_base = of_iomap(np, 0);
-       if (!dmc_reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
-       if (!ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
-       exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
-
-       pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
-                       exynos3250_dmc_plls[bpll].rate_table[0].rate,
-                       exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
-                       exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
-                       exynos3250_dmc_plls[bpll].rate_table[0].sdiv
-             );
-       samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
-                               ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
-
-       samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
-       samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
-
-       exynos3250_dmc_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, ctx);
+       samsung_cmu_register_one(np, &dmc_cmu_info);
 }
 CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
                exynos3250_cmu_dmc_init);
index 88e8c6bbd77ff8ea6919bc6c1ef7bd3fdc1324ae..51462e85675f7f6ed04dc6d3f38891c400aea7e4 100644 (file)
@@ -703,12 +703,12 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
 
 /* list of divider clocks supported in all exynos4 soc's */
 static struct samsung_div_clock exynos4_div_clks[] __initdata = {
-       DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
+       DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
        DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
        DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
                        CLKOUT_CMU_LEFTBUS, 8, 6),
 
-       DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
+       DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
        DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
        DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
                        CLKOUT_CMU_RIGHTBUS, 8, 6),
@@ -781,10 +781,10 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
                        CLK_SET_RATE_PARENT, 0),
        DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
 
-       DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
+       DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
        DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
        DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
-       DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
+       DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
        DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
        DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
        DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
@@ -829,7 +829,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
        DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
                                                8, 3, CLK_GET_RATE_NOCACHE, 0),
        DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
-       DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+       DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
        DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
index 2123fc251e0f6960b7566759088914162b840c89..6c78b09c829f9148453f56d4f05981452f91487c 100644 (file)
 #define DIV_CPU0               0x14500
 #define DIV_CPU1               0x14504
 
-enum exynos4415_plls {
-       apll, epll, g3d_pll, isp_pll, disp_pll,
-       nr_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_ctx;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_clk_regs;
-
 static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
        SRC_LEFTBUS,
        DIV_LEFTBUS,
@@ -219,41 +206,6 @@ static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
        DIV_CPU1,
 };
 
-static int exynos4415_clk_suspend(void)
-{
-       samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
-
-       return 0;
-}
-
-static void exynos4415_clk_resume(void)
-{
-       samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos4415_clk_syscore_ops = {
-       .suspend = exynos4415_clk_suspend,
-       .resume = exynos4415_clk_resume,
-};
-
-static void exynos4415_clk_sleep_init(void)
-{
-       exynos4415_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
-                                       ARRAY_SIZE(exynos4415_cmu_clk_regs));
-       if (!exynos4415_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               return;
-       }
-
-       register_syscore_ops(&exynos4415_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_g3d_pllsrc_p)       = { "fin_pll", };
 
@@ -959,56 +911,40 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
        { /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
-       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-                       APLL_LOCK, APLL_CON0, NULL),
-       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-                       EPLL_LOCK, EPLL_CON0, NULL),
-       [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
-                       "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
-       [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
-                       ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
-       [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
-                       "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates),
+       PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc",
+               G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
+               ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
+               "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_info __initdata = {
+       .pll_clks               = exynos4415_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos4415_plls),
+       .mux_clks               = exynos4415_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(exynos4415_mux_clks),
+       .div_clks               = exynos4415_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(exynos4415_div_clks),
+       .gate_clks              = exynos4415_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(exynos4415_gate_clks),
+       .fixed_clks             = exynos4415_fixed_rate_clks,
+       .nr_fixed_clks          = ARRAY_SIZE(exynos4415_fixed_rate_clks),
+       .fixed_factor_clks      = exynos4415_fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(exynos4415_fixed_factor_clks),
+       .nr_clk_ids             = CLK_NR_CLKS,
+       .clk_regs               = exynos4415_cmu_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos4415_cmu_clk_regs),
 };
 
 static void __init exynos4415_cmu_init(struct device_node *np)
 {
-       void __iomem *reg_base;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
-       if (!exynos4415_ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
-       exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
-
-       samsung_clk_register_fixed_factor(exynos4415_ctx,
-                               exynos4415_fixed_factor_clks,
-                               ARRAY_SIZE(exynos4415_fixed_factor_clks));
-       samsung_clk_register_fixed_rate(exynos4415_ctx,
-                               exynos4415_fixed_rate_clks,
-                               ARRAY_SIZE(exynos4415_fixed_rate_clks));
-
-       samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
-                               ARRAY_SIZE(exynos4415_plls), reg_base);
-       samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
-                               ARRAY_SIZE(exynos4415_mux_clks));
-       samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
-                               ARRAY_SIZE(exynos4415_div_clks));
-       samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
-                               ARRAY_SIZE(exynos4415_gate_clks));
-
-       exynos4415_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, exynos4415_ctx);
+       samsung_cmu_register_one(np, &cmu_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 
@@ -1027,16 +963,6 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 #define SRC_DMC                        0x300
 #define DIV_DMC1               0x504
 
-enum exynos4415_dmc_plls {
-       mpll, bpll,
-       nr_dmc_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_dmc_ctx;
-
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
-
 static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
        MPLL_LOCK,
        MPLL_CON0,
@@ -1050,42 +976,6 @@ static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
        DIV_DMC1,
 };
 
-static int exynos4415_dmc_clk_suspend(void)
-{
-       samsung_clk_save(exynos4415_dmc_ctx->reg_base,
-                               exynos4415_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-       return 0;
-}
-
-static void exynos4415_dmc_clk_resume(void)
-{
-       samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
-                               exynos4415_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
-       .suspend = exynos4415_dmc_clk_suspend,
-       .resume = exynos4415_dmc_clk_resume,
-};
-
-static void exynos4415_dmc_clk_sleep_init(void)
-{
-       exynos4415_dmc_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-       if (!exynos4415_dmc_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               return;
-       }
-
-       register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_dmc_clk_sleep_init(void) { }
-#endif /* CONFIG_PM_SLEEP */
-
 PNAME(mout_mpll_p)             = { "fin_pll", "fout_mpll", };
 PNAME(mout_bpll_p)             = { "fin_pll", "fout_bpll", };
 PNAME(mbpll_p)                 = { "mout_mpll", "mout_bpll", };
@@ -1107,38 +997,28 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
        DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
 };
 
-static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
-       [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
-               MPLL_LOCK, MPLL_CON0, NULL),
-       [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
-               BPLL_LOCK, BPLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
+       PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
+               MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_dmc_info __initdata = {
+       .pll_clks               = exynos4415_dmc_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos4415_dmc_plls),
+       .mux_clks               = exynos4415_dmc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(exynos4415_dmc_mux_clks),
+       .div_clks               = exynos4415_dmc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(exynos4415_dmc_div_clks),
+       .nr_clk_ids             = NR_CLKS_DMC,
+       .clk_regs               = exynos4415_cmu_dmc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs),
 };
 
 static void __init exynos4415_cmu_dmc_init(struct device_node *np)
 {
-       void __iomem *reg_base;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
-       if (!exynos4415_dmc_ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
-       exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
-
-       samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
-                               ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
-       samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
-                               ARRAY_SIZE(exynos4415_dmc_mux_clks));
-       samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
-                               ARRAY_SIZE(exynos4415_dmc_div_clks));
-
-       exynos4415_dmc_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
+       samsung_cmu_register_one(np, &cmu_dmc_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
                exynos4415_cmu_dmc_init);
index ea4483b8d62e8a89ea2ef1c1d37b1e720c5d9f68..03d36e847b78e067d16bdfe1758df10560c4a2b4 100644 (file)
@@ -34,6 +34,7 @@
 #define DIV_TOPC0              0x0600
 #define DIV_TOPC1              0x0604
 #define DIV_TOPC3              0x060C
+#define ENABLE_ACLK_TOPC1      0x0804
 
 static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
        FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
@@ -45,6 +46,7 @@ static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
 };
 
 /* List of parent clocks for Muxes in CMU_TOPC */
+PNAME(mout_aud_pll_ctrl_p)     = { "fin_pll", "fout_aud_pll" };
 PNAME(mout_bus0_pll_ctrl_p)    = { "fin_pll", "fout_bus0_pll" };
 PNAME(mout_bus1_pll_ctrl_p)    = { "fin_pll", "fout_bus1_pll" };
 PNAME(mout_cc_pll_ctrl_p)      = { "fin_pll", "fout_cc_pll" };
@@ -104,9 +106,11 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = {
 
        MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
                MUX_SEL_TOPC1, 16, 1),
+       MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
 
        MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
 
+       MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2),
        MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
 };
 
@@ -114,6 +118,8 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
        DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
                DIV_TOPC0, 4, 4),
 
+       DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532",
+               DIV_TOPC1, 20, 4),
        DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
                DIV_TOPC1, 24, 4),
 
@@ -125,6 +131,18 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
                DIV_TOPC3, 12, 3),
        DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
                DIV_TOPC3, 16, 3),
+       DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
+               DIV_TOPC3, 28, 3),
+};
+
+static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
+       PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+       {},
+};
+
+static struct samsung_gate_clock topc_gate_clks[] __initdata = {
+       GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
+               ENABLE_ACLK_TOPC1, 20, 0, 0),
 };
 
 static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -136,8 +154,8 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = {
                BUS1_DPLL_CON0, NULL),
        PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
                MFC_PLL_CON0, NULL),
-       PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
-               AUD_PLL_CON0, NULL),
+       PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
+               AUD_PLL_CON0, pll1460x_24mhz_tbl),
 };
 
 static struct samsung_cmu_info topc_cmu_info __initdata = {
@@ -147,6 +165,8 @@ static struct samsung_cmu_info topc_cmu_info __initdata = {
        .nr_mux_clks            = ARRAY_SIZE(topc_mux_clks),
        .div_clks               = topc_div_clks,
        .nr_div_clks            = ARRAY_SIZE(topc_div_clks),
+       .gate_clks              = topc_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(topc_gate_clks),
        .fixed_factor_clks      = topc_fixed_factor_clks,
        .nr_fixed_factor_clks   = ARRAY_SIZE(topc_fixed_factor_clks),
        .nr_clk_ids             = TOPC_NR_CLK,
@@ -166,9 +186,18 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
 #define MUX_SEL_TOP00                  0x0200
 #define MUX_SEL_TOP01                  0x0204
 #define MUX_SEL_TOP03                  0x020C
+#define MUX_SEL_TOP0_PERIC0            0x0230
+#define MUX_SEL_TOP0_PERIC1            0x0234
+#define MUX_SEL_TOP0_PERIC2            0x0238
 #define MUX_SEL_TOP0_PERIC3            0x023C
 #define DIV_TOP03                      0x060C
+#define DIV_TOP0_PERIC0                        0x0630
+#define DIV_TOP0_PERIC1                        0x0634
+#define DIV_TOP0_PERIC2                        0x0638
 #define DIV_TOP0_PERIC3                        0x063C
+#define ENABLE_SCLK_TOP0_PERIC0                0x0A30
+#define ENABLE_SCLK_TOP0_PERIC1                0x0A34
+#define ENABLE_SCLK_TOP0_PERIC2                0x0A38
 #define ENABLE_SCLK_TOP0_PERIC3                0x0A3C
 
 /* List of parent clocks for Muxes in CMU_TOP0 */
@@ -176,6 +205,7 @@ PNAME(mout_bus0_pll_p)      = { "fin_pll", "dout_sclk_bus0_pll" };
 PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
 PNAME(mout_cc_pll_p)   = { "fin_pll", "dout_sclk_cc_pll" };
 PNAME(mout_mfc_pll_p)  = { "fin_pll", "dout_sclk_mfc_pll" };
+PNAME(mout_aud_pll_p)  = { "fin_pll", "dout_sclk_aud_pll" };
 
 PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
        "ffac_top0_bus0_pll_div2"};
@@ -189,18 +219,34 @@ PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
 PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
        "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
        "mout_top0_half_mfc_pll"};
+PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
+       "ioclk_audiocdclk1", "ioclk_spdif_extclk",
+       "mout_top0_aud_pll", "mout_top0_half_bus0_pll",
+       "mout_top0_half_bus1_pll"};
+PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
+       "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
 
 static unsigned long top0_clk_regs[] __initdata = {
        MUX_SEL_TOP00,
        MUX_SEL_TOP01,
        MUX_SEL_TOP03,
+       MUX_SEL_TOP0_PERIC0,
+       MUX_SEL_TOP0_PERIC1,
+       MUX_SEL_TOP0_PERIC2,
        MUX_SEL_TOP0_PERIC3,
        DIV_TOP03,
+       DIV_TOP0_PERIC0,
+       DIV_TOP0_PERIC1,
+       DIV_TOP0_PERIC2,
        DIV_TOP0_PERIC3,
+       ENABLE_SCLK_TOP0_PERIC0,
+       ENABLE_SCLK_TOP0_PERIC1,
+       ENABLE_SCLK_TOP0_PERIC2,
        ENABLE_SCLK_TOP0_PERIC3,
 };
 
 static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+       MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
        MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
        MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
        MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
@@ -218,10 +264,20 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
        MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
 
+       MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3),
+       MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2),
+       MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2),
+
+       MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2),
+       MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2),
+
+       MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2),
+       MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2),
        MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
        MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
        MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
        MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
+       MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
 };
 
 static struct samsung_div_clock top0_div_clks[] __initdata = {
@@ -230,13 +286,40 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
        DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
                DIV_TOP03, 20, 6),
 
+       DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4),
+       DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12),
+       DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10),
+
+       DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12),
+       DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12),
+
+       DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12),
+       DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12),
+
        DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
        DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
        DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
        DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
+       DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
 };
 
 static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+       GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
+               ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
+               ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1",
+               ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1",
+               ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0",
+               ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3",
+               ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2",
+               ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0),
        GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
                ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
        GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
@@ -245,6 +328,8 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
                ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
        GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
                ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
+       GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4",
+               ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
@@ -343,6 +428,8 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
 
        MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+       MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
+               MUX_SEL_TOP1_FSYS0, 28, 2),
 
        MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
        MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
@@ -356,6 +443,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
 
        DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
                DIV_TOP1_FSYS0, 24, 4),
+       DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
+               DIV_TOP1_FSYS0, 28, 4),
 
        DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
                DIV_TOP1_FSYS1, 24, 4),
@@ -366,6 +455,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
 static struct samsung_gate_clock top1_gate_clks[] __initdata = {
        GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
                ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
+               ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
 
        GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
                ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
@@ -514,6 +605,7 @@ static void __init exynos7_clk_peric0_init(struct device_node *np)
 /* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
 #define MUX_SEL_PERIC10                        0x0200
 #define MUX_SEL_PERIC11                        0x0204
+#define MUX_SEL_PERIC12                        0x0208
 #define ENABLE_PCLK_PERIC1             0x0900
 #define ENABLE_SCLK_PERIC10            0x0A00
 
@@ -525,10 +617,16 @@ PNAME(mout_aclk_peric1_66_p)      = { "fin_pll", "dout_aclk_peric1_66" };
 PNAME(mout_sclk_uart1_p)       = { "fin_pll", "sclk_uart1" };
 PNAME(mout_sclk_uart2_p)       = { "fin_pll", "sclk_uart2" };
 PNAME(mout_sclk_uart3_p)       = { "fin_pll", "sclk_uart3" };
+PNAME(mout_sclk_spi0_p)                = { "fin_pll", "sclk_spi0" };
+PNAME(mout_sclk_spi1_p)                = { "fin_pll", "sclk_spi1" };
+PNAME(mout_sclk_spi2_p)                = { "fin_pll", "sclk_spi2" };
+PNAME(mout_sclk_spi3_p)                = { "fin_pll", "sclk_spi3" };
+PNAME(mout_sclk_spi4_p)                = { "fin_pll", "sclk_spi4" };
 
 static unsigned long peric1_clk_regs[] __initdata = {
        MUX_SEL_PERIC10,
        MUX_SEL_PERIC11,
+       MUX_SEL_PERIC12,
        ENABLE_PCLK_PERIC1,
        ENABLE_SCLK_PERIC10,
 };
@@ -537,6 +635,16 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
                MUX_SEL_PERIC10, 0, 1),
 
+       MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
+               MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
+               MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
+               MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
+               MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
+               MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
        MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
                MUX_SEL_PERIC11, 20, 1),
        MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
@@ -562,6 +670,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
                ENABLE_PCLK_PERIC1, 10, 0, 0),
        GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
                ENABLE_PCLK_PERIC1, 11, 0, 0),
+       GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 12, 0, 0),
+       GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 13, 0, 0),
+       GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 14, 0, 0),
+       GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 15, 0, 0),
+       GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 16, 0, 0),
+       GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0),
+       GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 18, 0, 0),
+       GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 19, 0, 0),
 
        GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
                ENABLE_SCLK_PERIC10, 9, 0, 0),
@@ -569,6 +693,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
                ENABLE_SCLK_PERIC10, 10, 0, 0),
        GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
                ENABLE_SCLK_PERIC10, 11, 0, 0),
+       GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user",
+               ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user",
+               ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user",
+               ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user",
+               ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user",
+               ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1",
+               ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1",
+               ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif",
+               ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_cmu_info peric1_cmu_info __initdata = {
@@ -647,7 +787,12 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
 /* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
 #define MUX_SEL_FSYS00                 0x0200
 #define MUX_SEL_FSYS01                 0x0204
+#define MUX_SEL_FSYS02                 0x0208
+#define ENABLE_ACLK_FSYS00             0x0800
 #define ENABLE_ACLK_FSYS01             0x0804
+#define ENABLE_SCLK_FSYS01             0x0A04
+#define ENABLE_SCLK_FSYS02             0x0A08
+#define ENABLE_SCLK_FSYS04             0x0A10
 
 /*
  * List of parent clocks for Muxes in CMU_FSYS0
@@ -655,10 +800,29 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
 PNAME(mout_aclk_fsys0_200_p)   = { "fin_pll", "dout_aclk_fsys0_200" };
 PNAME(mout_sclk_mmc2_p)                = { "fin_pll", "sclk_mmc2" };
 
+PNAME(mout_sclk_usbdrd300_p)   = { "fin_pll", "sclk_usbdrd300" };
+PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p)   = { "fin_pll",
+                               "phyclk_usbdrd300_udrd30_phyclock" };
+PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p)        = { "fin_pll",
+                               "phyclk_usbdrd300_udrd30_pipe_pclk" };
+
+/* fixed rate clocks used in the FSYS0 block */
+struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
+       FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
+               CLK_IS_ROOT, 60000000),
+       FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
+               CLK_IS_ROOT, 125000000),
+};
+
 static unsigned long fsys0_clk_regs[] __initdata = {
        MUX_SEL_FSYS00,
        MUX_SEL_FSYS01,
+       MUX_SEL_FSYS02,
+       ENABLE_ACLK_FSYS00,
        ENABLE_ACLK_FSYS01,
+       ENABLE_SCLK_FSYS01,
+       ENABLE_SCLK_FSYS02,
+       ENABLE_SCLK_FSYS04,
 };
 
 static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
@@ -666,11 +830,49 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
                MUX_SEL_FSYS00, 24, 1),
 
        MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
+       MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
+               MUX_SEL_FSYS01, 28, 1),
+
+       MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
+               MUX_SEL_FSYS02, 24, 1),
+       MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+               mout_phyclk_usbdrd300_udrd30_phyclk_p,
+               MUX_SEL_FSYS02, 28, 1),
 };
 
 static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+       GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
+               "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS00, 19, 0, 0),
+       GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
+                       ENABLE_ACLK_FSYS00, 3, 0, 0),
+       GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
+                       ENABLE_ACLK_FSYS00, 4, 0, 0),
+
+       GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS01, 29, 0, 0),
        GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
                ENABLE_ACLK_FSYS01, 31, 0, 0),
+
+       GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk",
+               "mout_sclk_usbdrd300_user",
+               ENABLE_SCLK_FSYS01, 4, 0, 0),
+       GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll",
+               ENABLE_SCLK_FSYS01, 8, 0, 0),
+
+       GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER,
+               "phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               ENABLE_SCLK_FSYS02, 24, 0, 0),
+       GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER,
+               "phyclk_usbdrd300_udrd30_phyclk_user",
+               "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+               ENABLE_SCLK_FSYS02, 28, 0, 0),
+
+       GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy",
+               "fin_pll",
+               ENABLE_SCLK_FSYS04, 28, 0, 0),
 };
 
 static struct samsung_cmu_info fsys0_cmu_info __initdata = {
@@ -741,3 +943,205 @@ static void __init exynos7_clk_fsys1_init(struct device_node *np)
 
 CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
        exynos7_clk_fsys1_init);
+
+#define MUX_SEL_MSCL                   0x0200
+#define DIV_MSCL                       0x0600
+#define ENABLE_ACLK_MSCL               0x0800
+#define ENABLE_PCLK_MSCL               0x0900
+
+/* List of parent clocks for Muxes in CMU_MSCL */
+PNAME(mout_aclk_mscl_532_user_p)       = { "fin_pll", "aclk_mscl_532" };
+
+static unsigned long mscl_clk_regs[] __initdata = {
+       MUX_SEL_MSCL,
+       DIV_MSCL,
+       ENABLE_ACLK_MSCL,
+       ENABLE_PCLK_MSCL,
+};
+
+static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+       MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
+               mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
+};
+static struct samsung_div_clock mscl_div_clks[] __initdata = {
+       DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
+                       DIV_MSCL, 0, 3),
+};
+static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+
+       GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 31, 0, 0),
+       GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 30, 0, 0),
+       GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 29, 0, 0),
+       GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 28, 0, 0),
+       GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 27, 0, 0),
+       GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 26, 0, 0),
+       GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 25, 0, 0),
+       GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 24, 0, 0),
+       GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 23, 0, 0),
+       GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 22, 0, 0),
+       GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 21, 0, 0),
+       GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 20, 0, 0),
+       GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 19, 0, 0),
+       GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 18, 0, 0),
+       GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 17, 0, 0),
+       GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 16, 0, 0),
+       GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 15, 0, 0),
+       GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 14, 0, 0),
+
+       GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 31, 0, 0),
+       GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 30, 0, 0),
+       GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 29, 0, 0),
+       GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 28, 0, 0),
+       GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 27, 0, 0),
+       GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 26, 0, 0),
+       GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 25, 0, 0),
+       GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 24, 0, 0),
+       GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 23, 0, 0),
+       GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 22, 0, 0),
+       GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 21, 0, 0),
+       GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 20, 0, 0),
+};
+
+static struct samsung_cmu_info mscl_cmu_info __initdata = {
+       .mux_clks               = mscl_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(mscl_mux_clks),
+       .div_clks               = mscl_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(mscl_div_clks),
+       .gate_clks              = mscl_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(mscl_gate_clks),
+       .nr_clk_ids             = MSCL_NR_CLK,
+       .clk_regs               = mscl_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(mscl_clk_regs),
+};
+
+static void __init exynos7_clk_mscl_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &mscl_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
+               exynos7_clk_mscl_init);
+
+/* Register Offset definitions for CMU_AUD (0x114C0000) */
+#define        MUX_SEL_AUD                     0x0200
+#define        DIV_AUD0                        0x0600
+#define        DIV_AUD1                        0x0604
+#define        ENABLE_ACLK_AUD                 0x0800
+#define        ENABLE_PCLK_AUD                 0x0900
+#define        ENABLE_SCLK_AUD                 0x0A00
+
+/*
+ * List of parent clocks for Muxes in CMU_AUD
+ */
+PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
+PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
+
+static unsigned long aud_clk_regs[] __initdata = {
+       MUX_SEL_AUD,
+       DIV_AUD0,
+       DIV_AUD1,
+       ENABLE_ACLK_AUD,
+       ENABLE_PCLK_AUD,
+       ENABLE_SCLK_AUD,
+};
+
+static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+       MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
+       MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
+       MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
+};
+
+static struct samsung_div_clock aud_div_clks[] __initdata = {
+       DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
+       DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
+       DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
+
+       DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4),
+       DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8),
+       DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4),
+       DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5),
+       DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
+};
+
+static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+       GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
+                       ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
+                       ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0),
+       GATE(0, "sclk_slimbus", "dout_sclk_slimbus",
+                       ENABLE_SCLK_AUD, 30, 0, 0),
+
+       GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0),
+       GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0),
+       GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0),
+       GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0),
+       GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0),
+       GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0),
+       GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud",
+                       ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0),
+       GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud",
+                       ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0),
+       GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0),
+
+       GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0),
+       GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud",
+                        ENABLE_ACLK_AUD, 28, 0, 0),
+       GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
+};
+
+static struct samsung_cmu_info aud_cmu_info __initdata = {
+       .mux_clks               = aud_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(aud_mux_clks),
+       .div_clks               = aud_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(aud_div_clks),
+       .gate_clks              = aud_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(aud_gate_clks),
+       .nr_clk_ids             = AUD_NR_CLK,
+       .clk_regs               = aud_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(aud_clk_regs),
+};
+
+static void __init exynos7_clk_aud_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &aud_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud",
+               exynos7_clk_aud_init);
index 4bda54095a16a0f3a1a670d4e949a4267f411041..9e1f88c04fd46dd583e1b2ccd90fc96c8cae06e8 100644 (file)
@@ -374,19 +374,24 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
  * Common function which registers plls, muxes, dividers and gates
  * for each CMU. It also add CMU register list to register cache.
  */
-void __init samsung_cmu_register_one(struct device_node *np,
+struct samsung_clk_provider * __init samsung_cmu_register_one(
+                       struct device_node *np,
                        struct samsung_cmu_info *cmu)
 {
        void __iomem *reg_base;
        struct samsung_clk_provider *ctx;
 
        reg_base = of_iomap(np, 0);
-       if (!reg_base)
+       if (!reg_base) {
                panic("%s: failed to map registers\n", __func__);
+               return NULL;
+       }
 
        ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
-       if (!ctx)
+       if (!ctx) {
                panic("%s: unable to alllocate ctx\n", __func__);
+               return ctx;
+       }
 
        if (cmu->pll_clks)
                samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
@@ -410,4 +415,6 @@ void __init samsung_cmu_register_one(struct device_node *np,
                        cmu->nr_clk_regs);
 
        samsung_clk_of_add_provider(np, ctx);
+
+       return ctx;
 }
index 8acabe1f32c4d4f13264eb26088b237778becf6b..e4c75383cea718c7fb563db70fbdd8452b1f9bd8 100644 (file)
@@ -392,7 +392,8 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
                        struct samsung_pll_clock *pll_list,
                        unsigned int nr_clk, void __iomem *base);
 
-extern void __init samsung_cmu_register_one(struct device_node *,
+extern struct samsung_clk_provider __init *samsung_cmu_register_one(
+                       struct device_node *,
                        struct samsung_cmu_info *);
 
 extern unsigned long _get_rate(const char *clk_name);
index f83980f2b9568ffa7df081bf5593518e04bcde0b..0689d7fb2666b1956728d85f9373b9cbd2c600b1 100644 (file)
@@ -1,9 +1,11 @@
 obj-$(CONFIG_ARCH_EMEV2)               += clk-emev2.o
 obj-$(CONFIG_ARCH_R7S72100)            += clk-rz.o
+obj-$(CONFIG_ARCH_R8A73A4)             += clk-r8a73a4.o
 obj-$(CONFIG_ARCH_R8A7740)             += clk-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7779)             += clk-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7791)             += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7793)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7794)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_SH73A0)              += clk-sh73a0.o
 obj-$(CONFIG_ARCH_SHMOBILE_MULTI)      += clk-div6.o
index 639241e31e03ec244907761ef430ed95bc53adde..036a692c72195db93760e40dc1fcb1928b1ecded 100644 (file)
@@ -54,12 +54,19 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
 static void cpg_div6_clock_disable(struct clk_hw *hw)
 {
        struct div6_clock *clock = to_div6_clock(hw);
+       u32 val;
 
-       /* DIV6 clocks require the divisor field to be non-zero when stopping
-        * the clock.
+       val = clk_readl(clock->reg);
+       val |= CPG_DIV6_CKSTP;
+       /*
+        * DIV6 clocks require the divisor field to be non-zero when stopping
+        * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be
+        * re-enabled later if the divisor field is changed when stopping the
+        * clock
         */
-       clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
-                  clock->reg);
+       if (!(val & CPG_DIV6_DIV_MASK))
+               val |= CPG_DIV6_DIV_MASK;
+       clk_writel(val, clock->reg);
 }
 
 static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
@@ -83,6 +90,9 @@ static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
 {
        unsigned int div;
 
+       if (!rate)
+               rate = 1;
+
        div = DIV_ROUND_CLOSEST(parent_rate, rate);
        return clamp_t(unsigned int, div, 1, 64);
 }
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
new file mode 100644 (file)
index 0000000..29b9a0b
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * r8a73a4 Core CPG Clocks
+ *
+ * Copyright (C) 2014  Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+struct r8a73a4_cpg {
+       struct clk_onecell_data data;
+       spinlock_t lock;
+       void __iomem *reg;
+};
+
+#define CPG_CKSCR      0xc0
+#define CPG_FRQCRA     0x00
+#define CPG_FRQCRB     0x04
+#define CPG_FRQCRC     0xe0
+#define CPG_PLL0CR     0xd8
+#define CPG_PLL1CR     0x28
+#define CPG_PLL2CR     0x2c
+#define CPG_PLL2HCR    0xe4
+#define CPG_PLL2SCR    0xf4
+
+#define CLK_ENABLE_ON_INIT BIT(0)
+
+struct div4_clk {
+       const char *name;
+       unsigned int reg;
+       unsigned int shift;
+};
+
+static struct div4_clk div4_clks[] = {
+       { "i",  CPG_FRQCRA, 20 },
+       { "m3", CPG_FRQCRA, 12 },
+       { "b",  CPG_FRQCRA,  8 },
+       { "m1", CPG_FRQCRA,  4 },
+       { "m2", CPG_FRQCRA,  0 },
+       { "zx", CPG_FRQCRB, 12 },
+       { "zs", CPG_FRQCRB,  8 },
+       { "hp", CPG_FRQCRB,  4 },
+       { NULL, 0, 0 },
+};
+
+static const struct clk_div_table div4_div_table[] = {
+       { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 },
+       { 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 },
+       { 12, 10 }, { 0, 0 }
+};
+
+static struct clk * __init
+r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
+                            const char *name)
+{
+       const struct clk_div_table *table = NULL;
+       const char *parent_name;
+       unsigned int shift, reg;
+       unsigned int mult = 1;
+       unsigned int div = 1;
+
+
+       if (!strcmp(name, "main")) {
+               u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
+
+               switch ((ckscr >> 28) & 3) {
+               case 0: /* extal1 */
+                       parent_name = of_clk_get_parent_name(np, 0);
+                       break;
+               case 1: /* extal1 / 2 */
+                       parent_name = of_clk_get_parent_name(np, 0);
+                       div = 2;
+                       break;
+               case 2: /* extal2 */
+                       parent_name = of_clk_get_parent_name(np, 1);
+                       break;
+               case 3: /* extal2 / 2 */
+                       parent_name = of_clk_get_parent_name(np, 1);
+                       div = 2;
+                       break;
+               }
+       } else if (!strcmp(name, "pll0")) {
+               /* PLL0/1 are configurable multiplier clocks. Register them as
+                * fixed factor clocks for now as there's no generic multiplier
+                * clock implementation and we currently have no need to change
+                * the multiplier value.
+                */
+               u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+
+               parent_name = "main";
+               mult = ((value >> 24) & 0x7f) + 1;
+               if (value & BIT(20))
+                       div = 2;
+       } else if (!strcmp(name, "pll1")) {
+               u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
+
+               parent_name = "main";
+               /* XXX: enable bit? */
+               mult = ((value >> 24) & 0x7f) + 1;
+               if (value & BIT(7))
+                       div = 2;
+       } else if (!strncmp(name, "pll2", 4)) {
+               u32 value, cr;
+
+               switch (name[4]) {
+               case 0:
+                       cr = CPG_PLL2CR;
+                       break;
+               case 's':
+                       cr = CPG_PLL2SCR;
+                       break;
+               case 'h':
+                       cr = CPG_PLL2HCR;
+                       break;
+               default:
+                       return ERR_PTR(-EINVAL);
+               }
+               value = clk_readl(cpg->reg + cr);
+               switch ((value >> 5) & 7) {
+               case 0:
+                       parent_name = "main";
+                       div = 2;
+                       break;
+               case 1:
+                       parent_name = "extal2";
+                       div = 2;
+                       break;
+               case 3:
+                       parent_name = "extal2";
+                       div = 4;
+                       break;
+               case 4:
+                       parent_name = "main";
+                       break;
+               case 5:
+                       parent_name = "extal2";
+                       break;
+               default:
+                       pr_warn("%s: unexpected parent of %s\n", __func__,
+                               name);
+                       return ERR_PTR(-EINVAL);
+               }
+               /* XXX: enable bit? */
+               mult = ((value >> 24) & 0x7f) + 1;
+       } else if (!strcmp(name, "z") || !strcmp(name, "z2")) {
+               u32 shift = 8;
+
+               parent_name = "pll0";
+               if (name[1] == '2') {
+                       div = 2;
+                       shift = 0;
+               }
+               div *= 32;
+               mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
+                      & 0x1f);
+       } else {
+               struct div4_clk *c;
+
+               for (c = div4_clks; c->name; c++) {
+                       if (!strcmp(name, c->name))
+                               break;
+               }
+               if (!c->name)
+                       return ERR_PTR(-EINVAL);
+
+               parent_name = "pll1";
+               table = div4_div_table;
+               reg = c->reg;
+               shift = c->shift;
+       }
+
+       if (!table) {
+               return clk_register_fixed_factor(NULL, name, parent_name, 0,
+                                                mult, div);
+       } else {
+               return clk_register_divider_table(NULL, name, parent_name, 0,
+                                                 cpg->reg + reg, shift, 4, 0,
+                                                 table, &cpg->lock);
+       }
+}
+
+static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
+{
+       struct r8a73a4_cpg *cpg;
+       struct clk **clks;
+       unsigned int i;
+       int num_clks;
+
+       num_clks = of_property_count_strings(np, "clock-output-names");
+       if (num_clks < 0) {
+               pr_err("%s: failed to count clocks\n", __func__);
+               return;
+       }
+
+       cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+       clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
+       if (cpg == NULL || clks == NULL) {
+               /* We're leaking memory on purpose, there's no point in cleaning
+                * up as the system won't boot anyway.
+                */
+               return;
+       }
+
+       spin_lock_init(&cpg->lock);
+
+       cpg->data.clks = clks;
+       cpg->data.clk_num = num_clks;
+
+       cpg->reg = of_iomap(np, 0);
+       if (WARN_ON(cpg->reg == NULL))
+               return;
+
+       for (i = 0; i < num_clks; ++i) {
+               const char *name;
+               struct clk *clk;
+
+               of_property_read_string_index(np, "clock-output-names", i,
+                                             &name);
+
+               clk = r8a73a4_cpg_register_clock(np, cpg, name);
+               if (IS_ERR(clk))
+                       pr_err("%s: failed to register %s %s clock (%ld)\n",
+                              __func__, np->name, name, PTR_ERR(clk));
+               else
+                       cpg->data.clks[i] = clk;
+       }
+
+       of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
+              r8a73a4_cpg_clocks_init);
index e996425d06a920728cd4f32448c19d08efb5a776..acfb6d7dbd6bc049fe39213d675d5a2602e65d9e 100644 (file)
@@ -33,6 +33,8 @@ struct rcar_gen2_cpg {
 #define CPG_FRQCRC                     0x000000e0
 #define CPG_FRQCRC_ZFC_MASK            (0x1f << 8)
 #define CPG_FRQCRC_ZFC_SHIFT           8
+#define CPG_ADSPCKCR                   0x0000025c
+#define CPG_RCANCKCR                   0x00000270
 
 /* -----------------------------------------------------------------------------
  * Z Clock
@@ -161,6 +163,88 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
        return clk;
 }
 
+static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
+                                                struct device_node *np)
+{
+       const char *parent_name = of_clk_get_parent_name(np, 1);
+       struct clk_fixed_factor *fixed;
+       struct clk_gate *gate;
+       struct clk *clk;
+
+       fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+       if (!fixed)
+               return ERR_PTR(-ENOMEM);
+
+       fixed->mult = 1;
+       fixed->div = 6;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               kfree(fixed);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gate->reg = cpg->reg + CPG_RCANCKCR;
+       gate->bit_idx = 8;
+       gate->flags = CLK_GATE_SET_TO_DISABLE;
+       gate->lock = &cpg->lock;
+
+       clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
+                                    &fixed->hw, &clk_fixed_factor_ops,
+                                    &gate->hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(fixed);
+       }
+
+       return clk;
+}
+
+/* ADSP divisors */
+static const struct clk_div_table cpg_adsp_div_table[] = {
+       {  1,  3 }, {  2,  4 }, {  3,  6 }, {  4,  8 },
+       {  5, 12 }, {  6, 16 }, {  7, 18 }, {  8, 24 },
+       { 10, 36 }, { 11, 48 }, {  0,  0 },
+};
+
+static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
+{
+       const char *parent_name = "pll1";
+       struct clk_divider *div;
+       struct clk_gate *gate;
+       struct clk *clk;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       div->reg = cpg->reg + CPG_ADSPCKCR;
+       div->width = 4;
+       div->table = cpg_adsp_div_table;
+       div->lock = &cpg->lock;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               kfree(div);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gate->reg = cpg->reg + CPG_ADSPCKCR;
+       gate->bit_idx = 8;
+       gate->flags = CLK_GATE_SET_TO_DISABLE;
+       gate->lock = &cpg->lock;
+
+       clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
+                                    &div->hw, &clk_divider_ops,
+                                    &gate->hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(div);
+       }
+
+       return clk;
+}
+
 /* -----------------------------------------------------------------------------
  * CPG Clock Data
  */
@@ -263,6 +347,10 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
                shift = 0;
        } else if (!strcmp(name, "z")) {
                return cpg_z_clk_register(cpg);
+       } else if (!strcmp(name, "rcan")) {
+               return cpg_rcan_clk_register(cpg, np);
+       } else if (!strcmp(name, "adsp")) {
+               return cpg_adsp_clk_register(cpg);
        } else {
                return ERR_PTR(-EINVAL);
        }
index 2282cef9f2ffb0d834b84be7b2b00c32190bde15..bf12a25eb3a22aab048f04d5280195d21bd7dd01 100644 (file)
@@ -37,8 +37,8 @@ static int flexgen_enable(struct clk_hw *hw)
        struct clk_hw *pgate_hw = &flexgen->pgate.hw;
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-       pgate_hw->clk = hw->clk;
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(pgate_hw, hw);
+       __clk_hw_set_clk(fgate_hw, hw);
 
        clk_gate_ops.enable(pgate_hw);
 
@@ -54,7 +54,7 @@ static void flexgen_disable(struct clk_hw *hw)
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
        /* disable only the final gate */
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(fgate_hw, hw);
 
        clk_gate_ops.disable(fgate_hw);
 
@@ -66,7 +66,7 @@ static int flexgen_is_enabled(struct clk_hw *hw)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(fgate_hw, hw);
 
        if (!clk_gate_ops.is_enabled(fgate_hw))
                return 0;
@@ -79,7 +79,7 @@ static u8 flexgen_get_parent(struct clk_hw *hw)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return clk_mux_ops.get_parent(mux_hw);
 }
@@ -89,7 +89,7 @@ static int flexgen_set_parent(struct clk_hw *hw, u8 index)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return clk_mux_ops.set_parent(mux_hw, index);
 }
@@ -124,8 +124,8 @@ unsigned long flexgen_recalc_rate(struct clk_hw *hw,
        struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
        unsigned long mid_rate;
 
-       pdiv_hw->clk = hw->clk;
-       fdiv_hw->clk = hw->clk;
+       __clk_hw_set_clk(pdiv_hw, hw);
+       __clk_hw_set_clk(fdiv_hw, hw);
 
        mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
 
@@ -138,16 +138,27 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
        struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
-       unsigned long primary_div = 0;
+       unsigned long div = 0;
        int ret = 0;
 
-       pdiv_hw->clk = hw->clk;
-       fdiv_hw->clk = hw->clk;
+       __clk_hw_set_clk(pdiv_hw, hw);
+       __clk_hw_set_clk(fdiv_hw, hw);
 
-       primary_div = clk_best_div(parent_rate, rate);
+       div = clk_best_div(parent_rate, rate);
 
-       clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
-       ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div);
+       /*
+       * pdiv is mainly targeted for low freq results, while fdiv
+       * should be used for div <= 64. The other way round can
+       * lead to 'duty cycle' issues.
+       */
+
+       if (div <= 64) {
+               clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
+               ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
+       } else {
+               clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+               ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
+       }
 
        return ret;
 }
index 79dc40b5cc688fb1a028ca8407cd3feb929ca873..9a15ec344a85900ea68029c730c02ad01ede5335 100644 (file)
@@ -94,7 +94,7 @@ static int clkgena_divmux_enable(struct clk_hw *hw)
        unsigned long timeout;
        int ret = 0;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
        if (ret)
@@ -116,7 +116,7 @@ static void clkgena_divmux_disable(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
 }
@@ -126,7 +126,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
 }
@@ -136,7 +136,7 @@ u8 clkgena_divmux_get_parent(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
        if ((s8)genamux->muxsel < 0) {
@@ -174,7 +174,7 @@ unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.recalc_rate(div_hw, parent_rate);
 }
@@ -185,7 +185,7 @@ static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
 }
@@ -196,7 +196,7 @@ static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.round_rate(div_hw, rate, prate);
 }
index a66953c0f43094a4b96a816fb0d4ff37bfedf466..3a5292e3fcf8086f6418c8dceae6c59aab79b082 100644 (file)
@@ -8,6 +8,7 @@ obj-y += clk-a20-gmac.o
 obj-y += clk-mod0.o
 obj-y += clk-sun8i-mbus.o
 obj-y += clk-sun9i-core.o
+obj-y += clk-sun9i-mmc.o
 
 obj-$(CONFIG_MFD_SUN6I_PRCM) += \
        clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
index 62e08fb58554cbe8d4bf8f1c5a08f08bbd58ec68..8c20190a3e9f4e134824449c10a72e9e13b5b957 100644 (file)
@@ -80,6 +80,8 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_p)
 {
@@ -156,9 +158,10 @@ static const struct clk_ops clk_factors_ops = {
        .set_rate = clk_factors_set_rate,
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-                                          const struct factors_data *data,
-                                          spinlock_t *lock)
+struct clk *sunxi_factors_register(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock,
+                                  void __iomem *reg)
 {
        struct clk *clk;
        struct clk_factors *factors;
@@ -168,11 +171,8 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
        struct clk_hw *mux_hw = NULL;
        const char *clk_name = node->name;
        const char *parents[FACTORS_MAX_PARENTS];
-       void __iomem *reg;
        int i = 0;
 
-       reg = of_iomap(node, 0);
-
        /* if we have a mux, we will have >1 parents */
        while (i < FACTORS_MAX_PARENTS &&
               (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
index 912238fde1324224863035da8a5836b5c276e8a0..171085ab5513e4f724017d05d6f91929262a4f78 100644 (file)
@@ -36,8 +36,9 @@ struct clk_factors {
        spinlock_t *lock;
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-                                          const struct factors_data *data,
-                                          spinlock_t *lock);
+struct clk *sunxi_factors_register(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock,
+                                  void __iomem *reg);
 
 #endif
index da0524eaee9406aff6c2d73f8b56b82c12360a16..ec8f5a1fca09f4240c433a1de2ea8207e6e369e5 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 
 #include "clk-factors.h"
 
@@ -67,7 +68,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
        .pwidth = 2,
 };
 
-static const struct factors_data sun4i_a10_mod0_data __initconst = {
+static const struct factors_data sun4i_a10_mod0_data = {
        .enable = 31,
        .mux = 24,
        .muxmask = BIT(1) | BIT(0),
@@ -79,15 +80,95 @@ static DEFINE_SPINLOCK(sun4i_a10_mod0_lock);
 
 static void __init sun4i_a10_mod0_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock);
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               /*
+                * This happens with mod0 clk nodes instantiated through
+                * mfd, as those do not have their resources assigned at
+                * CLK_OF_DECLARE time yet, so do not print an error.
+                */
+               return;
+       }
+
+       sunxi_factors_register(node, &sun4i_a10_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
 }
 CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
 
+static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *r;
+       void __iomem *reg;
+
+       if (!np)
+               return -ENODEV;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       reg = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
+
+       sunxi_factors_register(np, &sun4i_a10_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
+       return 0;
+}
+
+static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun4i-a10-mod0-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun4i_a10_mod0_clk_driver = {
+       .driver = {
+               .name = "sun4i-a10-mod0-clk",
+               .of_match_table = sun4i_a10_mod0_clk_dt_ids,
+       },
+       .probe = sun4i_a10_mod0_clk_probe,
+};
+module_platform_driver(sun4i_a10_mod0_clk_driver);
+
+static const struct factors_data sun9i_a80_mod0_data __initconst = {
+       .enable = 31,
+       .mux = 24,
+       .muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+       .table = &sun4i_a10_mod0_config,
+       .getter = sun4i_a10_get_mod0_factors,
+};
+
+static void __init sun9i_a80_mod0_setup(struct device_node *node)
+{
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg)) {
+               pr_err("Could not get registers for mod0-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
+}
+CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup);
+
 static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
 
 static void __init sun5i_a13_mbus_setup(struct device_node *node)
 {
-       struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock);
+       struct clk *mbus;
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for a13-mbus-clk\n");
+               return;
+       }
+
+       mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data,
+                                     &sun5i_a13_mbus_lock, reg);
 
        /* The MBUS clocks needs to be always enabled */
        __clk_get(mbus);
@@ -95,14 +176,10 @@ static void __init sun5i_a13_mbus_setup(struct device_node *node)
 }
 CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
 
-struct mmc_phase_data {
-       u8      offset;
-};
-
 struct mmc_phase {
        struct clk_hw           hw;
+       u8                      offset;
        void __iomem            *reg;
-       struct mmc_phase_data   *data;
        spinlock_t              *lock;
 };
 
@@ -118,7 +195,7 @@ static int mmc_get_phase(struct clk_hw *hw)
        u8 delay;
 
        value = readl(phase->reg);
-       delay = (value >> phase->data->offset) & 0x3;
+       delay = (value >> phase->offset) & 0x3;
 
        if (!delay)
                return 180;
@@ -206,8 +283,8 @@ static int mmc_set_phase(struct clk_hw *hw, int degrees)
 
        spin_lock_irqsave(phase->lock, flags);
        value = readl(phase->reg);
-       value &= ~GENMASK(phase->data->offset + 3, phase->data->offset);
-       value |= delay << phase->data->offset;
+       value &= ~GENMASK(phase->offset + 3, phase->offset);
+       value |= delay << phase->offset;
        writel(value, phase->reg);
        spin_unlock_irqrestore(phase->lock, flags);
 
@@ -219,66 +296,97 @@ static const struct clk_ops mmc_clk_ops = {
        .set_phase      = mmc_set_phase,
 };
 
-static void __init sun4i_a10_mmc_phase_setup(struct device_node *node,
-                                            struct mmc_phase_data *data)
+/*
+ * sunxi_mmc_setup - Common setup function for mmc module clocks
+ *
+ * The only difference between module clocks on different platforms is the
+ * width of the mux register bits and the valid values, which are passed in
+ * through struct factors_data. The phase clocks parts are identical.
+ */
+static void __init sunxi_mmc_setup(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock)
 {
-       const char *parent_names[1] = { of_clk_get_parent_name(node, 0) };
-       struct clk_init_data init = {
-               .num_parents    = 1,
-               .parent_names   = parent_names,
-               .ops            = &mmc_clk_ops,
-       };
-
-       struct mmc_phase *phase;
-       struct clk *clk;
-
-       phase = kmalloc(sizeof(*phase), GFP_KERNEL);
-       if (!phase)
+       struct clk_onecell_data *clk_data;
+       const char *parent;
+       void __iomem *reg;
+       int i;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg)) {
+               pr_err("Couldn't map the %s clock registers\n", node->name);
                return;
+       }
 
-       phase->hw.init = &init;
-
-       phase->reg = of_iomap(node, 0);
-       if (!phase->reg)
-               goto err_free;
-
-       phase->data = data;
-       phase->lock = &sun4i_a10_mod0_lock;
-
-       if (of_property_read_string(node, "clock-output-names", &init.name))
-               init.name = node->name;
+       clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL);
+       if (!clk_data)
+               return;
 
-       clk = clk_register(NULL, &phase->hw);
-       if (IS_ERR(clk))
-               goto err_unmap;
+       clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL);
+       if (!clk_data->clks)
+               goto err_free_data;
+
+       clk_data->clk_num = 3;
+       clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
+       if (!clk_data->clks[0])
+               goto err_free_clks;
+
+       parent = __clk_get_name(clk_data->clks[0]);
+
+       for (i = 1; i < 3; i++) {
+               struct clk_init_data init = {
+                       .num_parents    = 1,
+                       .parent_names   = &parent,
+                       .ops            = &mmc_clk_ops,
+               };
+               struct mmc_phase *phase;
+
+               phase = kmalloc(sizeof(*phase), GFP_KERNEL);
+               if (!phase)
+                       continue;
+
+               phase->hw.init = &init;
+               phase->reg = reg;
+               phase->lock = lock;
+
+               if (i == 1)
+                       phase->offset = 8;
+               else
+                       phase->offset = 20;
+
+               if (of_property_read_string_index(node, "clock-output-names",
+                                                 i, &init.name))
+                       init.name = node->name;
+
+               clk_data->clks[i] = clk_register(NULL, &phase->hw);
+               if (IS_ERR(clk_data->clks[i])) {
+                       kfree(phase);
+                       continue;
+               }
+       }
 
-       of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 
        return;
 
-err_unmap:
-       iounmap(phase->reg);
-err_free:
-       kfree(phase);
+err_free_clks:
+       kfree(clk_data->clks);
+err_free_data:
+       kfree(clk_data);
 }
 
+static DEFINE_SPINLOCK(sun4i_a10_mmc_lock);
 
-static struct mmc_phase_data mmc_output_clk = {
-       .offset = 8,
-};
-
-static struct mmc_phase_data mmc_sample_clk = {
-       .offset = 20,
-};
-
-static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
+static void __init sun4i_a10_mmc_setup(struct device_node *node)
 {
-       sun4i_a10_mmc_phase_setup(node, &mmc_output_clk);
+       sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup);
+CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup);
+
+static DEFINE_SPINLOCK(sun9i_a80_mmc_lock);
 
-static void __init sun4i_a10_mmc_sample_setup(struct device_node *node)
+static void __init sun9i_a80_mmc_setup(struct device_node *node)
 {
-       sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk);
+       sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup);
+CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup);
index 3d282fb8f85cc204d84c182aa4b16ae2181ea5e9..63cf149195ae1a40cb61a7b2256878c0f2ccd5ef 100644 (file)
@@ -45,6 +45,8 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
 }
 
 static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *best_parent_rate,
                                 struct clk_hw **best_parent_clk)
 {
index ef49786eefd3caa5b6f856287635a973213a14b3..14cd026064bf377ed7cbc05bbd84aa608e639fec 100644 (file)
@@ -69,8 +69,17 @@ static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
 
 static void __init sun8i_a23_mbus_setup(struct device_node *node)
 {
-       struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
-                                                 &sun8i_a23_mbus_lock);
+       struct clk *mbus;
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for a23-mbus-clk\n");
+               return;
+       }
+
+       mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
+                                     &sun8i_a23_mbus_lock, reg);
 
        /* The MBUS clocks needs to be always enabled */
        __clk_get(mbus);
index 3cb9036d91bb202fc057273302a6f2bcc31edf9f..d8da77d72861b29f0867d47b43b7917da31037f7 100644 (file)
 
 
 /**
- * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
+ * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4
  * PLL4 rate is calculated as follows
  * rate = (parent_rate * n >> p) / (m + 1);
- * parent_rate is always 24Mhz
+ * parent_rate is always 24MHz
  *
  * p and m are named div1 and div2 in Allwinner's SDK
  */
 
 static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+                                      u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
 {
-       int div;
+       int n;
+       int m = 1;
+       int p = 1;
 
-       /* Normalize value to a 6M multiple */
-       div = DIV_ROUND_UP(*freq, 6000000);
+       /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
+       n = DIV_ROUND_UP(*freq, 6000000);
 
-       /* divs above 256 cannot be odd */
-       if (div > 256)
-               div = round_up(div, 2);
+       /* If n is too large switch to steps of 12 MHz */
+       if (n > 255) {
+               m = 0;
+               n = (n + 1) / 2;
+       }
+
+       /* If n is still too large switch to steps of 24 MHz */
+       if (n > 255) {
+               p = 0;
+               n = (n + 1) / 2;
+       }
 
-       /* divs above 512 must be a multiple of 4 */
-       if (div > 512)
-               div = round_up(div, 4);
+       /* n must be between 12 and 255 */
+       if (n > 255)
+               n = 255;
+       else if (n < 12)
+               n = 12;
 
-       *freq = 6000000 * div;
+       *freq = ((24000000 * n) >> p) / (m + 1);
 
        /* we were called to round the frequency, we can now return */
-       if (n == NULL)
+       if (n_ret == NULL)
                return;
 
-       /* p will be 1 for divs under 512 */
-       if (div < 512)
-               *p = 1;
-       else
-               *p = 0;
-
-       /* m will be 1 if div is odd */
-       if (div & 1)
-               *m = 1;
-       else
-               *m = 0;
-
-       /* calculate a suitable n based on m and p */
-       *n = div / (*p + 1) / (*m + 1);
+       *n_ret = n;
+       *m_ret = m;
+       *p_ret = p;
 }
 
 static struct clk_factors_config sun9i_a80_pll4_config = {
@@ -89,7 +90,17 @@ static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
 
 static void __init sun9i_a80_pll4_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-pll4-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_pll4_data,
+                              &sun9i_a80_pll4_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
 
@@ -139,8 +150,18 @@ static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
 
 static void __init sun9i_a80_gt_setup(struct device_node *node)
 {
-       struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
-                                               &sun9i_a80_gt_lock);
+       void __iomem *reg;
+       struct clk *gt;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-gt-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
+                                   &sun9i_a80_gt_lock, reg);
 
        /* The GT bus clock needs to be always enabled */
        __clk_get(gt);
@@ -194,7 +215,17 @@ static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
 
 static void __init sun9i_a80_ahb_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-ahb-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_ahb_data,
+                              &sun9i_a80_ahb_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
 
@@ -210,7 +241,17 @@ static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
 
 static void __init sun9i_a80_apb0_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-apb0-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_apb0_data,
+                              &sun9i_a80_apb0_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
 
@@ -266,6 +307,16 @@ static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
 
 static void __init sun9i_a80_apb1_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-apb1-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_apb1_data,
+                              &sun9i_a80_apb1_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
new file mode 100644 (file)
index 0000000..710c273
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai        <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#define SUN9I_MMC_WIDTH                4
+
+#define SUN9I_MMC_GATE_BIT     16
+#define SUN9I_MMC_RESET_BIT    18
+
+struct sun9i_mmc_clk_data {
+       spinlock_t                      lock;
+       void __iomem                    *membase;
+       struct clk                      *clk;
+       struct reset_control            *reset;
+       struct clk_onecell_data         clk_data;
+       struct reset_controller_dev     rcdev;
+};
+
+static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       struct sun9i_mmc_clk_data *data = container_of(rcdev,
+                                                      struct sun9i_mmc_clk_data,
+                                                      rcdev);
+       unsigned long flags;
+       void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+       u32 val;
+
+       clk_prepare_enable(data->clk);
+       spin_lock_irqsave(&data->lock, flags);
+
+       val = readl(reg);
+       writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct sun9i_mmc_clk_data *data = container_of(rcdev,
+                                                      struct sun9i_mmc_clk_data,
+                                                      rcdev);
+       unsigned long flags;
+       void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+       u32 val;
+
+       clk_prepare_enable(data->clk);
+       spin_lock_irqsave(&data->lock, flags);
+
+       val = readl(reg);
+       writel(val | BIT(SUN9I_MMC_RESET_BIT), reg);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static struct reset_control_ops sun9i_mmc_reset_ops = {
+       .assert         = sun9i_mmc_reset_assert,
+       .deassert       = sun9i_mmc_reset_deassert,
+};
+
+static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sun9i_mmc_clk_data *data;
+       struct clk_onecell_data *clk_data;
+       const char *clk_name = np->name;
+       const char *clk_parent;
+       struct resource *r;
+       int count, i, ret;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       spin_lock_init(&data->lock);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       /* one clock/reset pair per word */
+       count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
+       data->membase = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(data->membase))
+               return PTR_ERR(data->membase);
+
+       clk_data = &data->clk_data;
+       clk_data->clk_num = count;
+       clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *),
+                                     GFP_KERNEL);
+       if (!clk_data->clks)
+               return -ENOMEM;
+
+       data->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->clk)) {
+               dev_err(&pdev->dev, "Could not get clock\n");
+               return PTR_ERR(data->clk);
+       }
+
+       data->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(data->reset)) {
+               dev_err(&pdev->dev, "Could not get reset control\n");
+               return PTR_ERR(data->reset);
+       }
+
+       ret = reset_control_deassert(data->reset);
+       if (ret) {
+               dev_err(&pdev->dev, "Reset deassert err %d\n", ret);
+               return ret;
+       }
+
+       clk_parent = __clk_get_name(data->clk);
+       for (i = 0; i < count; i++) {
+               of_property_read_string_index(np, "clock-output-names",
+                                             i, &clk_name);
+
+               clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
+                                                     clk_parent, 0,
+                                                     data->membase + SUN9I_MMC_WIDTH * i,
+                                                     SUN9I_MMC_GATE_BIT, 0,
+                                                     &data->lock);
+
+               if (IS_ERR(clk_data->clks[i])) {
+                       ret = PTR_ERR(clk_data->clks[i]);
+                       goto err_clk_register;
+               }
+       }
+
+       ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+       if (ret)
+               goto err_clk_provider;
+
+       data->rcdev.owner = THIS_MODULE;
+       data->rcdev.nr_resets = count;
+       data->rcdev.ops = &sun9i_mmc_reset_ops;
+       data->rcdev.of_node = pdev->dev.of_node;
+
+       ret = reset_controller_register(&data->rcdev);
+       if (ret)
+               goto err_rc_reg;
+
+       platform_set_drvdata(pdev, data);
+
+       return 0;
+
+err_rc_reg:
+       of_clk_del_provider(np);
+
+err_clk_provider:
+       for (i = 0; i < count; i++)
+               clk_unregister(clk_data->clks[i]);
+
+err_clk_register:
+       reset_control_assert(data->reset);
+
+       return ret;
+}
+
+static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
+       struct clk_onecell_data *clk_data = &data->clk_data;
+       int i;
+
+       reset_controller_unregister(&data->rcdev);
+       of_clk_del_provider(np);
+       for (i = 0; i < clk_data->clk_num; i++)
+               clk_unregister(clk_data->clks[i]);
+
+       reset_control_assert(data->reset);
+
+       return 0;
+}
+
+static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
+       .driver = {
+               .name = "sun9i-a80-mmc-config-clk",
+               .of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
+       },
+       .probe = sun9i_a80_mmc_config_clk_probe,
+       .remove = sun9i_a80_mmc_config_clk_remove,
+};
+module_platform_driver(sun9i_a80_mmc_config_clk_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
+MODULE_LICENSE("GPL v2");
index 1818f404538d377d22b8b1d1602a0130e388018a..379324eb5486e1b332d19fcf66fc8d7a0c4aff15 100644 (file)
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
 #include <linux/spinlock.h>
+#include <linux/log2.h>
 
 #include "clk-factors.h"
 
 static DEFINE_SPINLOCK(clk_lock);
 
+/**
+ * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
+ */
+
+#define SUN6I_AHB1_MAX_PARENTS         4
+#define SUN6I_AHB1_MUX_PARENT_PLL6     3
+#define SUN6I_AHB1_MUX_SHIFT           12
+/* un-shifted mask is what mux_clk expects */
+#define SUN6I_AHB1_MUX_MASK            0x3
+#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
+                                        SUN6I_AHB1_MUX_MASK)
+
+#define SUN6I_AHB1_DIV_SHIFT           4
+#define SUN6I_AHB1_DIV_MASK            (0x3 << SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_GET(reg)                ((reg & SUN6I_AHB1_DIV_MASK) >> \
+                                               SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_SET(reg, div)   ((reg & ~SUN6I_AHB1_DIV_MASK) | \
+                                               (div << SUN6I_AHB1_DIV_SHIFT))
+#define SUN6I_AHB1_PLL6_DIV_SHIFT      6
+#define SUN6I_AHB1_PLL6_DIV_MASK       (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_GET(reg)   ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
+                                               SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
+                                               (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
+
+struct sun6i_ahb1_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+};
+
+#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
+
+static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+       unsigned long rate;
+       u32 reg;
+
+       /* Fetch the register value */
+       reg = readl(ahb1->reg);
+
+       /* apply pre-divider first if parent is pll6 */
+       if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
+               parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
+
+       /* clk divider */
+       rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
+
+       return rate;
+}
+
+static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
+                                u8 parent, unsigned long parent_rate)
+{
+       u8 div, calcp, calcm = 1;
+
+       /*
+        * clock can only divide, so we will never be able to achieve
+        * frequencies higher than the parent frequency
+        */
+       if (parent_rate && rate > parent_rate)
+               rate = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, rate);
+
+       /* calculate pre-divider if parent is pll6 */
+       if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
+               if (div < 4)
+                       calcp = 0;
+               else if (div / 2 < 4)
+                       calcp = 1;
+               else if (div / 4 < 4)
+                       calcp = 2;
+               else
+                       calcp = 3;
+
+               calcm = DIV_ROUND_UP(div, 1 << calcp);
+       } else {
+               calcp = __roundup_pow_of_two(div);
+               calcp = calcp > 3 ? 3 : calcp;
+       }
+
+       /* we were asked to pass back divider values */
+       if (divp) {
+               *divp = calcp;
+               *pre_divp = calcm - 1;
+       }
+
+       return (parent_rate / calcm) >> calcp;
+}
+
+static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                         unsigned long min_rate,
+                                         unsigned long max_rate,
+                                         unsigned long *best_parent_rate,
+                                         struct clk_hw **best_parent_clk)
+{
+       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       int i, num_parents;
+       unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
+
+       /* find the parent that can help provide the fastest rate <= rate */
+       num_parents = __clk_get_num_parents(clk);
+       for (i = 0; i < num_parents; i++) {
+               parent = clk_get_parent_by_index(clk, i);
+               if (!parent)
+                       continue;
+               if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_round_rate(parent, rate);
+               else
+                       parent_rate = __clk_get_rate(parent);
+
+               child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
+                                                 parent_rate);
+
+               if (child_rate <= rate && child_rate > best_child_rate) {
+                       best_parent = parent;
+                       best = parent_rate;
+                       best_child_rate = child_rate;
+               }
+       }
+
+       if (best_parent)
+               *best_parent_clk = __clk_get_hw(best_parent);
+       *best_parent_rate = best;
+
+       return best_child_rate;
+}
+
+static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+       unsigned long flags;
+       u8 div, pre_div, parent;
+       u32 reg;
+
+       spin_lock_irqsave(&clk_lock, flags);
+
+       reg = readl(ahb1->reg);
+
+       /* need to know which parent is used to apply pre-divider */
+       parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
+       sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
+
+       reg = SUN6I_AHB1_DIV_SET(reg, div);
+       reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
+       writel(reg, ahb1->reg);
+
+       spin_unlock_irqrestore(&clk_lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops sun6i_ahb1_clk_ops = {
+       .determine_rate = sun6i_ahb1_clk_determine_rate,
+       .recalc_rate    = sun6i_ahb1_clk_recalc_rate,
+       .set_rate       = sun6i_ahb1_clk_set_rate,
+};
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+       struct sun6i_ahb1_clk *ahb1;
+       struct clk_mux *mux;
+       const char *clk_name = node->name;
+       const char *parents[SUN6I_AHB1_MAX_PARENTS];
+       void __iomem *reg;
+       int i = 0;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+
+       /* we have a mux, we will have >1 parents */
+       while (i < SUN6I_AHB1_MAX_PARENTS &&
+              (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+               i++;
+
+       of_property_read_string(node, "clock-output-names", &clk_name);
+
+       ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
+       if (!ahb1)
+               return;
+
+       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+       if (!mux) {
+               kfree(ahb1);
+               return;
+       }
+
+       /* set up clock properties */
+       mux->reg = reg;
+       mux->shift = SUN6I_AHB1_MUX_SHIFT;
+       mux->mask = SUN6I_AHB1_MUX_MASK;
+       mux->lock = &clk_lock;
+       ahb1->reg = reg;
+
+       clk = clk_register_composite(NULL, clk_name, parents, i,
+                                    &mux->hw, &clk_mux_ops,
+                                    &ahb1->hw, &sun6i_ahb1_clk_ops,
+                                    NULL, NULL, 0);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               clk_register_clkdev(clk, clk_name, NULL);
+       }
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
+
 /* Maximum number of parents our clocks have */
 #define SUNXI_MAX_PARENTS      5
 
@@ -354,43 +564,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
        *p = calcp;
 }
 
-/**
- * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
- */
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output)
-{
-       #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
-       #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
-
-       struct clk_hw *hw = __clk_get_hw(clk);
-       struct clk_composite *composite = to_clk_composite(hw);
-       struct clk_hw *rate_hw = composite->rate_hw;
-       struct clk_factors *factors = to_clk_factors(rate_hw);
-       unsigned long flags = 0;
-       u32 reg;
-
-       if (factors->lock)
-               spin_lock_irqsave(factors->lock, flags);
-
-       reg = readl(factors->reg);
-
-       /* set sample clock phase control */
-       reg &= ~(0x7 << 20);
-       reg |= ((sample & 0x7) << 20);
-
-       /* set output clock phase control */
-       reg &= ~(0x7 << 8);
-       reg |= ((output & 0x7) << 8);
-
-       writel(reg, factors->reg);
-
-       if (factors->lock)
-               spin_unlock_irqrestore(factors->lock, flags);
-}
-EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
-
-
 /**
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
@@ -413,6 +586,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
        .kwidth = 2,
        .mshift = 0,
        .mwidth = 2,
+       .n_start = 1,
 };
 
 static struct clk_factors_config sun8i_a23_pll1_config = {
@@ -520,7 +694,16 @@ static const struct factors_data sun7i_a20_out_data __initconst = {
 static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
                                                   const struct factors_data *data)
 {
-       return sunxi_factors_register(node, data, &clk_lock);
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for factors-clk: %s\n",
+                      node->name);
+               return NULL;
+       }
+
+       return sunxi_factors_register(node, data, &clk_lock, reg);
 }
 
 
@@ -561,7 +744,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
        of_property_read_string(node, "clock-output-names", &clk_name);
 
        clk = clk_register_mux(NULL, clk_name, parents, i,
-                              CLK_SET_RATE_NO_REPARENT, reg,
+                              CLK_SET_RATE_PARENT, reg,
                               data->shift, SUNXI_MUX_GATE_WIDTH,
                               0, &clk_lock);
 
@@ -1217,7 +1400,6 @@ CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
 
 static const char *sun6i_critical_clocks[] __initdata = {
        "cpu",
-       "ahb1_sdram",
 };
 
 static void __init sun6i_init_clocks(struct device_node *node)
index f7dfb72884a4e2d177261984023d7c20f9249a22..edb8358fa6cebab596e7f39c022736a4b6c31457 100644 (file)
@@ -15,3 +15,4 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC)         += clk-tegra20.o
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)       += clk-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)       += clk-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC)       += clk-tegra124.o
index 0011d547a9f7ed1e5767625582c1b1ab59a34c9e..60738cc954cb3dd857763d7aaa99ea955d939e5e 100644 (file)
@@ -64,10 +64,8 @@ enum clk_id {
        tegra_clk_disp2,
        tegra_clk_dp2,
        tegra_clk_dpaux,
-       tegra_clk_dsia,
        tegra_clk_dsialp,
        tegra_clk_dsia_mux,
-       tegra_clk_dsib,
        tegra_clk_dsiblp,
        tegra_clk_dsib_mux,
        tegra_clk_dtv,
index 9e899c18af8678c8eac70fab0ede9d8631fff3f3..d84ae49d0e05eead08c6379a0236c339ff88cb5c 100644 (file)
@@ -28,7 +28,7 @@ static u8 clk_periph_get_parent(struct clk_hw *hw)
        const struct clk_ops *mux_ops = periph->mux_ops;
        struct clk_hw *mux_hw = &periph->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->get_parent(mux_hw);
 }
@@ -39,7 +39,7 @@ static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
        const struct clk_ops *mux_ops = periph->mux_ops;
        struct clk_hw *mux_hw = &periph->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->set_parent(mux_hw, index);
 }
@@ -51,7 +51,7 @@ static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->recalc_rate(div_hw, parent_rate);
 }
@@ -63,7 +63,7 @@ static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->round_rate(div_hw, rate, prate);
 }
@@ -75,7 +75,7 @@ static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->set_rate(div_hw, rate, parent_rate);
 }
@@ -86,7 +86,7 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
        const struct clk_ops *gate_ops = periph->gate_ops;
        struct clk_hw *gate_hw = &periph->gate.hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->is_enabled(gate_hw);
 }
@@ -97,7 +97,7 @@ static int clk_periph_enable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = periph->gate_ops;
        struct clk_hw *gate_hw = &periph->gate.hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->enable(gate_hw);
 }
index c7c6d8fb32fbb14bfc0727024bdd91ccab6009b8..bfef9abdf23250d39504587f7f41d3ca6ec6e06a 100644 (file)
@@ -816,7 +816,9 @@ const struct clk_ops tegra_clk_plle_ops = {
        .enable = clk_plle_enable,
 };
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_132_SOC)
 
 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
                           unsigned long parent_rate)
@@ -1505,7 +1507,9 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
        return clk;
 }
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllxc_ops = {
        .is_enabled = clk_pll_is_enabled,
        .enable = clk_pll_iddq_enable,
@@ -1565,7 +1569,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1665,7 +1669,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1706,7 +1710,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1802,7 +1806,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
 }
 #endif
 
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllss_ops = {
        .is_enabled = clk_pll_is_enabled,
        .enable = clk_pll_iddq_enable,
@@ -1830,7 +1834,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
index 37f32c49674eb293cb242b34b446ce73e227de37..cef0727b9eec98b91de3db3c2373fe8e508e5750 100644 (file)
@@ -434,10 +434,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
        MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
        MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
-       MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1),
-       MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2),
-       MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3),
-       MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4),
+       MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1),
+       MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2),
+       MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
+       MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
        MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
        MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
        MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
@@ -470,10 +470,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
        MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
        MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
-       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
-       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
-       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
-       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
+       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8),
+       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8),
+       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8),
+       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8),
        MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
        MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
        MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -537,8 +537,6 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
        GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
        GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
-       GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
-       GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
        GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
        GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
        GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
index 0b03d2cf7264f7d42dafb84fd83bb29e18f420a1..d0766423a5d607554ff8f9e9f9b7f610f5d875a3 100644 (file)
@@ -715,7 +715,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
        [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
        [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
-       [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
        [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
        [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
        [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
@@ -739,7 +738,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
        [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
-       [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
        [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
        [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
@@ -1224,6 +1222,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
                               clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
        clks[TEGRA114_CLK_DSIB_MUX] = clk;
 
+       clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
+                                            0, 48, periph_clk_enb_refcnt);
+       clks[TEGRA114_CLK_DSIA] = clk;
+
+       clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
+                                            0, 82, periph_clk_enb_refcnt);
+       clks[TEGRA114_CLK_DSIB] = clk;
+
        /* emc mux */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
                               ARRAY_SIZE(mux_pllmcp_clkm),
index f5f9baca7bb621924d14ff4d9364d18580e0fbc2..9a893f2fe8e9889e7e1df95d12ba90d687763942 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014 NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #include "clk.h"
 #include "clk-id.h"
 
+/*
+ * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register
+ * banks present in the Tegra124/132 CAR IP block.  The banks are
+ * identified by single letters, e.g.: L, H, U, V, W, X.  See
+ * periph_regs[] in drivers/clk/tegra/clk.c
+ */
+#define TEGRA124_CAR_BANK_COUNT                        6
+
 #define CLK_SOURCE_CSITE 0x1d4
 #define CLK_SOURCE_EMC 0x19c
 
@@ -128,7 +136,6 @@ static unsigned long osc_freq;
 static unsigned long pll_ref_freq;
 
 static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(pll_d2_lock);
 static DEFINE_SPINLOCK(pll_e_lock);
 static DEFINE_SPINLOCK(pll_re_lock);
 static DEFINE_SPINLOCK(pll_u_lock);
@@ -145,11 +152,6 @@ static unsigned long tegra124_input_freq[] = {
        [12] = 260000000,
 };
 
-static const char *mux_plld_out0_plld2_out0[] = {
-       "pll_d_out0", "pll_d2_out0",
-};
-#define mux_plld_out0_plld2_out0_idx NULL
-
 static const char *mux_pllmcp_clkm[] = {
        "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
 };
@@ -783,7 +785,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
        [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
        [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
-       [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
        [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
        [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
        [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
@@ -809,7 +810,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
        [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
-       [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
        [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
        [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
@@ -949,8 +949,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
        [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
        [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
-       [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
-       [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -1112,17 +1110,17 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
                                        1, 2);
        clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
 
-       /* dsia mux */
-       clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
-                              ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-                              clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
-       clks[TEGRA124_CLK_DSIA_MUX] = clk;
+       clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
+                               clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
+       clks[TEGRA124_CLK_PLLD_DSI] = clk;
+
+       clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
+                                            0, 48, periph_clk_enb_refcnt);
+       clks[TEGRA124_CLK_DSIA] = clk;
 
-       /* dsib mux */
-       clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
-                              ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-                              clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
-       clks[TEGRA124_CLK_DSIB_MUX] = clk;
+       clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
+                                            0, 82, periph_clk_enb_refcnt);
+       clks[TEGRA124_CLK_DSIB] = clk;
 
        /* emc mux */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1351,7 +1349,7 @@ static const struct of_device_id pmc_match[] __initconst = {
        {},
 };
 
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table common_init_table[] __initdata = {
        {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
        {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
        {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
@@ -1368,6 +1366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
        {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
        {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
+       {TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0},
+       {TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0},
        {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
        {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
        {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
@@ -1385,27 +1385,73 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
        {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
        {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
-       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
+       /* This MUST be the last entry. */
+       {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
        {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
+       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+       /* This MUST be the last entry. */
+       {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+/* Tegra132 requires the SOC_THERM clock to remain active */
+static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
+       {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1},
        /* This MUST be the last entry. */
        {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
 };
 
+/**
+ * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra124 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
 static void __init tegra124_clock_apply_init_table(void)
 {
-       tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX);
 }
 
-static void __init tegra124_clock_init(struct device_node *np)
+/**
+ * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra132 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
+static void __init tegra132_clock_apply_init_table(void)
+{
+       tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX);
+}
+
+/**
+ * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the clocks controlled by the CAR IP block, along
+ * with a few clocks controlled by the PMC IP block.  Everything in
+ * this function should be common to Tegra124 and Tegra132.  XXX The
+ * PMC clock initialization should probably be moved to PMC-specific
+ * driver code.  No return value.
+ */
+static void __init tegra124_132_clock_init_pre(struct device_node *np)
 {
        struct device_node *node;
+       u32 plld_base;
 
        clk_base = of_iomap(np, 0);
        if (!clk_base) {
-               pr_err("ioremap tegra124 CAR failed\n");
+               pr_err("ioremap tegra124/tegra132 CAR failed\n");
                return;
        }
 
@@ -1423,7 +1469,8 @@ static void __init tegra124_clock_init(struct device_node *np)
                return;
        }
 
-       clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6);
+       clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX,
+                             TEGRA124_CAR_BANK_COUNT);
        if (!clks)
                return;
 
@@ -1437,13 +1484,76 @@ static void __init tegra124_clock_init(struct device_node *np)
        tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
        tegra_pmc_clk_init(pmc_base, tegra124_clks);
 
+       /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
+       plld_base = clk_readl(clk_base + PLLD_BASE);
+       plld_base &= ~BIT(25);
+       clk_writel(plld_base, clk_base + PLLD_BASE);
+}
+
+/**
+ * tegra124_132_clock_init_post - clock initialization postamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the along with a few clocks controlled by the PMC
+ * IP block.  Everything in this function should be common to Tegra124
+ * and Tegra132.  This function must be called after
+ * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
+ * not be set.  No return value.
+ */
+static void __init tegra124_132_clock_init_post(struct device_node *np)
+{
        tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
-                                       &pll_x_params);
+                                 &pll_x_params);
        tegra_add_of_provider(np);
        tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
 
+       tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+}
+
+/**
+ * tegra124_clock_init - Tegra124-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra124 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra124-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra124_clock_init(struct device_node *np)
+{
+       tegra124_132_clock_init_pre(np);
        tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
+       tegra124_132_clock_init_post(np);
+}
 
-       tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+/**
+ * tegra132_clock_init - Tegra132-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra132 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra132-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra132_clock_init(struct device_node *np)
+{
+       tegra124_132_clock_init_pre(np);
+
+       /*
+        * On Tegra132, these clocks are controlled by the
+        * CLUSTER_clocks IP block, located in the CPU complex
+        */
+       tegra124_clks[tegra_clk_cclk_g].present = false;
+       tegra124_clks[tegra_clk_cclk_lp].present = false;
+       tegra124_clks[tegra_clk_pll_x].present = false;
+       tegra124_clks[tegra_clk_pll_x_out0].present = false;
+
+       tegra_clk_apply_init_table = tegra132_clock_apply_init_table;
+       tegra124_132_clock_init_post(np);
 }
 CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
+CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init);
index 97dc8595c3cd5c1b59113deaa16bbd23a5298cfc..9ddb7547cb431b4b234d5ec6a41fba457abc2678 100644 (file)
@@ -302,10 +302,13 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
 
 tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
 
-void __init tegra_clocks_apply_init_table(void)
+static int __init tegra_clocks_apply_init_table(void)
 {
        if (!tegra_clk_apply_init_table)
-               return;
+               return 0;
 
        tegra_clk_apply_init_table();
+
+       return 0;
 }
+arch_initcall(tegra_clocks_apply_init_table);
index ed4d0aaf891639585825344d7460bbfcbc1c81e2..105ffd0f5e79da04e496b291e4eb3968b834ba64 100644 (file)
@@ -1,13 +1,17 @@
-ifneq ($(CONFIG_OF),)
 obj-y                                  += clk.o autoidle.o clockdomain.o
 clk-common                             = dpll.o composite.o divider.o gate.o \
                                          fixed-factor.o mux.o apll.o
 obj-$(CONFIG_SOC_AM33XX)               += $(clk-common) clk-33xx.o
+obj-$(CONFIG_SOC_TI81XX)               += $(clk-common) fapll.o clk-816x.o
 obj-$(CONFIG_ARCH_OMAP2)               += $(clk-common) interface.o clk-2xxx.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o \
+                                          clk-3xxx.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(clk-common) clk-44xx.o
 obj-$(CONFIG_SOC_OMAP5)                        += $(clk-common) clk-54xx.o
 obj-$(CONFIG_SOC_DRA7XX)               += $(clk-common) clk-7xx.o \
                                           clk-dra7-atl.o
 obj-$(CONFIG_SOC_AM43XX)               += $(clk-common) clk-43xx.o
+
+ifdef CONFIG_ATAGS
+obj-$(CONFIG_ARCH_OMAP3)                += clk-3xxx-legacy.o
 endif
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
new file mode 100644 (file)
index 0000000..e0732a4
--- /dev/null
@@ -0,0 +1,4653 @@
+/*
+ * OMAP3 Legacy clock data
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_clk_fixed virt_12m_ck_data = {
+       .frequency = 12000000,
+};
+
+static struct ti_clk virt_12m_ck = {
+       .name = "virt_12m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_12m_ck_data,
+};
+
+static struct ti_clk_fixed virt_13m_ck_data = {
+       .frequency = 13000000,
+};
+
+static struct ti_clk virt_13m_ck = {
+       .name = "virt_13m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_13m_ck_data,
+};
+
+static struct ti_clk_fixed virt_19200000_ck_data = {
+       .frequency = 19200000,
+};
+
+static struct ti_clk virt_19200000_ck = {
+       .name = "virt_19200000_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_19200000_ck_data,
+};
+
+static struct ti_clk_fixed virt_26000000_ck_data = {
+       .frequency = 26000000,
+};
+
+static struct ti_clk virt_26000000_ck = {
+       .name = "virt_26000000_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_26000000_ck_data,
+};
+
+static struct ti_clk_fixed virt_38_4m_ck_data = {
+       .frequency = 38400000,
+};
+
+static struct ti_clk virt_38_4m_ck = {
+       .name = "virt_38_4m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_38_4m_ck_data,
+};
+
+static struct ti_clk_fixed virt_16_8m_ck_data = {
+       .frequency = 16800000,
+};
+
+static struct ti_clk virt_16_8m_ck = {
+       .name = "virt_16_8m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_16_8m_ck_data,
+};
+
+static const char *osc_sys_ck_parents[] = {
+       "virt_12m_ck",
+       "virt_13m_ck",
+       "virt_19200000_ck",
+       "virt_26000000_ck",
+       "virt_38_4m_ck",
+       "virt_16_8m_ck",
+};
+
+static struct ti_clk_mux osc_sys_ck_data = {
+       .num_parents = ARRAY_SIZE(osc_sys_ck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_PRM,
+       .parents = osc_sys_ck_parents,
+};
+
+static struct ti_clk osc_sys_ck = {
+       .name = "osc_sys_ck",
+       .type = TI_CLK_MUX,
+       .data = &osc_sys_ck_data,
+};
+
+static struct ti_clk_divider sys_ck_data = {
+       .parent = "osc_sys_ck",
+       .bit_shift = 6,
+       .max_div = 3,
+       .reg = 0x1270,
+       .module = TI_CLKM_PRM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk sys_ck = {
+       .name = "sys_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &sys_ck_data,
+};
+
+static const char *dpll3_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll3_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll3_ck_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd40,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll3_ck_parents,
+       .flags = CLKF_CORE,
+       .freqsel_mask = 0xf0,
+       .div1_mask = 0x7f00,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x5,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff0000,
+       .recal_st_bit = 0x5,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll3_ck = {
+       .name = "dpll3_ck",
+       .clkdm_name = "dpll3_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll3_ck_data,
+};
+
+static struct ti_clk_divider dpll3_m2_ck_data = {
+       .parent = "dpll3_ck",
+       .bit_shift = 27,
+       .max_div = 31,
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m2_ck = {
+       .name = "dpll3_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll3_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor core_ck_data = {
+       .parent = "dpll3_m2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_ck = {
+       .name = "core_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_ck_data,
+};
+
+static struct ti_clk_divider l3_ick_data = {
+       .parent = "core_ck",
+       .max_div = 3,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l3_ick = {
+       .name = "l3_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor security_l3_ick_data = {
+       .parent = "l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk security_l3_ick = {
+       .name = "security_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &security_l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor wkup_l4_ick_data = {
+       .parent = "sys_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wkup_l4_ick = {
+       .name = "wkup_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wkup_l4_ick_data,
+};
+
+static struct ti_clk_gate usim_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 9,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usim_ick = {
+       .name = "usim_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usim_ick_data,
+};
+
+static struct ti_clk_gate dss2_alwon_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 1,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss2_alwon_fck = {
+       .name = "dss2_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss2_alwon_fck_data,
+};
+
+static struct ti_clk_divider l4_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 2,
+       .max_div = 3,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l4_ick = {
+       .name = "l4_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_l4_ick = {
+       .name = "core_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_l4_ick_data,
+};
+
+static struct ti_clk_gate mmchs2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 25,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs2_ick = {
+       .name = "mmchs2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs2_ick_data,
+};
+
+static const char *dpll4_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll4_ck_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd44,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll4_ck_parents,
+       .flags = CLKF_PER,
+       .freqsel_mask = 0xf00000,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x2,
+       .auto_recal_bit = 0x13,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x6,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x70000,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x6,
+       .autoidle_mask = 0x38,
+};
+
+static struct ti_clk dpll4_ck = {
+       .name = "dpll4_ck",
+       .clkdm_name = "dpll4_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll4_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m2_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 63,
+       .reg = 0xd48,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m2_ck = {
+       .name = "dpll4_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = {
+       .parent = "dpll4_m2_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m2x2_mul_ck = {
+       .name = "dpll4_m2x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m2x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_data = {
+       .parent = "dpll4_m2x2_mul_ck",
+       .bit_shift = 0x1b,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck = {
+       .name = "dpll4_m2x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = {
+       .parent = "dpll4_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_alwon_fck = {
+       .name = "omap_96m_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_fck_data = {
+       .parent = "omap_96m_alwon_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk cm_96m_fck = {
+       .name = "cm_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &cm_96m_fck_data,
+};
+
+static const char *omap_96m_fck_parents[] = {
+       "cm_96m_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux omap_96m_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(omap_96m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_96m_fck_parents,
+};
+
+static struct ti_clk omap_96m_fck = {
+       .name = "omap_96m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_96m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_96m_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_96m_fck = {
+       .name = "core_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_96m_fck_data,
+};
+
+static struct ti_clk_gate mspro_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 23,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mspro_fck = {
+       .name = "mspro_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mspro_fck_data,
+};
+
+static struct ti_clk_gate dss_ick_3430es2_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xe10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es2 = {
+       .name = "dss_ick",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_ick_3430es2_data,
+};
+
+static struct ti_clk_gate uart4_ick_am35xx_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 23,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick_am35xx = {
+       .name = "uart4_ick_am35xx",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_ick_am35xx_data,
+};
+
+static struct ti_clk_fixed_factor security_l4_ick2_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk security_l4_ick2 = {
+       .name = "security_l4_ick2",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &security_l4_ick2_data,
+};
+
+static struct ti_clk_gate aes1_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 3,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes1_ick = {
+       .name = "aes1_ick",
+       .type = TI_CLK_GATE,
+       .data = &aes1_ick_data,
+};
+
+static const char *dpll5_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll5_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll5_ck_parents),
+       .control_reg = 0xd04,
+       .idlest_reg = 0xd24,
+       .mult_div1_reg = 0xd4c,
+       .autoidle_reg = 0xd34,
+       .module = TI_CLKM_CM,
+       .parents = dpll5_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x19,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x19,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll5_ck = {
+       .name = "dpll5_ck",
+       .clkdm_name = "dpll5_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll5_ck_data,
+};
+
+static struct ti_clk_divider dpll5_m2_ck_data = {
+       .parent = "dpll5_ck",
+       .max_div = 31,
+       .reg = 0xd50,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll5_m2_ck = {
+       .name = "dpll5_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll5_m2_ck_data,
+};
+
+static struct ti_clk_gate usbhost_120m_fck_data = {
+       .parent = "dpll5_m2_ck",
+       .bit_shift = 1,
+       .reg = 0x1400,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk usbhost_120m_fck = {
+       .name = "usbhost_120m_fck",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_120m_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_d2_fck_data = {
+       .parent = "cm_96m_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk cm_96m_d2_fck = {
+       .name = "cm_96m_d2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &cm_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed sys_altclk_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk sys_altclk = {
+       .name = "sys_altclk",
+       .type = TI_CLK_FIXED,
+       .data = &sys_altclk_data,
+};
+
+static const char *omap_48m_fck_parents[] = {
+       "cm_96m_d2_fck",
+       "sys_altclk",
+};
+
+static struct ti_clk_mux omap_48m_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(omap_48m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_48m_fck_parents,
+};
+
+static struct ti_clk omap_48m_fck = {
+       .name = "omap_48m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_48m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_48m_fck = {
+       .name = "core_48m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_48m_fck_data,
+};
+
+static struct ti_clk_fixed mcbsp_clks_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk mcbsp_clks = {
+       .name = "mcbsp_clks",
+       .type = TI_CLK_FIXED,
+       .data = &mcbsp_clks_data,
+};
+
+static struct ti_clk_gate mcbsp2_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 0,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_96m_fck_data = {
+       .parent = "omap_96m_alwon_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_96m_fck = {
+       .name = "per_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_96m_fck_data,
+};
+
+static const char *mcbsp2_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp2_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents),
+       .reg = 0x274,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp2_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp2_fck_data = {
+       .mux = &mcbsp2_mux_fck_data,
+       .gate = &mcbsp2_gate_fck_data,
+};
+
+static struct ti_clk mcbsp2_fck = {
+       .name = "mcbsp2_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = {
+       .parent = "dpll3_m2_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_m2x2_ck = {
+       .name = "dpll3_m2x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_fck_data = {
+       .parent = "dpll3_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_fck = {
+       .name = "corex2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_fck_data,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = {
+       .parent = "corex2_fck",
+       .bit_shift = 0,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static int ssi_ssr_div_fck_3430es1_divs[] = {
+       0,
+       1,
+       2,
+       3,
+       4,
+       0,
+       6,
+       0,
+       8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = {
+       .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs),
+       .parent = "corex2_fck",
+       .bit_shift = 8,
+       .dividers = ssi_ssr_div_fck_3430es1_divs,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es1_data = {
+       .gate = &ssi_ssr_gate_fck_3430es1_data,
+       .divider = &ssi_ssr_div_fck_3430es1_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es1 = {
+       .name = "ssi_ssr_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &ssi_ssr_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = {
+       .parent = "ssi_ssr_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es1 = {
+       .name = "ssi_sst_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_sst_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed omap_32k_fck_data = {
+       .frequency = 32768,
+};
+
+static struct ti_clk omap_32k_fck = {
+       .name = "omap_32k_fck",
+       .type = TI_CLK_FIXED,
+       .data = &omap_32k_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_32k_alwon_fck_data = {
+       .parent = "omap_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_32k_alwon_fck = {
+       .name = "per_32k_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_32k_alwon_fck_data,
+};
+
+static struct ti_clk_gate gpio5_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 16,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio5_dbck = {
+       .name = "gpio5_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio5_dbck_data,
+};
+
+static struct ti_clk_gate gpt1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt1_ick = {
+       .name = "gpt1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt1_ick_data,
+};
+
+static struct ti_clk_gate mcspi3_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 20,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi3_fck = {
+       .name = "mcspi3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi3_fck_data,
+};
+
+static struct ti_clk_gate gpt2_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt2_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt2_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(gpt2_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt2_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt2_fck_data = {
+       .mux = &gpt2_mux_fck_data,
+       .gate = &gpt2_gate_fck_data,
+};
+
+static struct ti_clk gpt2_fck = {
+       .name = "gpt2_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt2_fck_data,
+};
+
+static struct ti_clk_gate gpt10_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 11,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt10_ick = {
+       .name = "gpt10_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt10_ick_data,
+};
+
+static struct ti_clk_gate uart2_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 14,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart2_fck = {
+       .name = "uart2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart2_fck_data,
+};
+
+static struct ti_clk_fixed_factor sr_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk sr_l4_ick = {
+       .name = "sr_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &sr_l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d8_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 8,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d8_fck = {
+       .name = "omap_96m_d8_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d8_fck_data,
+};
+
+static struct ti_clk_divider dpll4_m5_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 63,
+       .reg = 0xf40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m5_ck = {
+       .name = "dpll4_m5_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m5_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = {
+       .parent = "dpll4_m5_ck",
+       .div = 1,
+       .mult = 2,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m5x2_mul_ck = {
+       .name = "dpll4_m5x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m5x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_data = {
+       .parent = "dpll4_m5x2_mul_ck",
+       .bit_shift = 0x1e,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck = {
+       .name = "dpll4_m5x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m5x2_ck_data,
+};
+
+static struct ti_clk_gate cam_mclk_data = {
+       .parent = "dpll4_m5x2_ck",
+       .bit_shift = 0,
+       .reg = 0xf00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk cam_mclk = {
+       .name = "cam_mclk",
+       .type = TI_CLK_GATE,
+       .data = &cam_mclk_data,
+};
+
+static struct ti_clk_gate mcbsp3_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 1,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *mcbsp3_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp3_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp3_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp3_fck_data = {
+       .mux = &mcbsp3_mux_fck_data,
+       .gate = &mcbsp3_gate_fck_data,
+};
+
+static struct ti_clk mcbsp3_fck = {
+       .name = "mcbsp3_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp3_fck_data,
+};
+
+static struct ti_clk_gate csi2_96m_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 1,
+       .reg = 0xf00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk csi2_96m_fck = {
+       .name = "csi2_96m_fck",
+       .clkdm_name = "cam_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &csi2_96m_fck_data,
+};
+
+static struct ti_clk_gate gpt9_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 10,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt9_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt9_mux_fck_data = {
+       .bit_shift = 7,
+       .num_parents = ARRAY_SIZE(gpt9_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt9_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt9_fck_data = {
+       .mux = &gpt9_mux_fck_data,
+       .gate = &gpt9_gate_fck_data,
+};
+
+static struct ti_clk gpt9_fck = {
+       .name = "gpt9_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt9_fck_data,
+};
+
+static struct ti_clk_divider dpll3_m3_ck_data = {
+       .parent = "dpll3_ck",
+       .bit_shift = 16,
+       .max_div = 31,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m3_ck = {
+       .name = "dpll3_m3_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll3_m3_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = {
+       .parent = "dpll3_m3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_m3x2_mul_ck = {
+       .name = "dpll3_m3x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate sr2_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 7,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr2_fck = {
+       .name = "sr2_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sr2_fck_data,
+};
+
+static struct ti_clk_fixed pclk_ck_data = {
+       .frequency = 27000000,
+};
+
+static struct ti_clk pclk_ck = {
+       .name = "pclk_ck",
+       .type = TI_CLK_FIXED,
+       .data = &pclk_ck_data,
+};
+
+static struct ti_clk_gate wdt2_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 5,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt2_ick = {
+       .name = "wdt2_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt2_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l3_ick_data = {
+       .parent = "l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_l3_ick = {
+       .name = "core_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_l3_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 21,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi4_fck = {
+       .name = "mcspi4_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi4_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_48m_fck = {
+       .name = "per_48m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_48m_fck_data,
+};
+
+static struct ti_clk_gate uart4_fck_data = {
+       .parent = "per_48m_fck",
+       .bit_shift = 18,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck = {
+       .name = "uart4_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_fck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d10_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 10,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d10_fck = {
+       .name = "omap_96m_d10_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d10_fck_data,
+};
+
+static struct ti_clk_gate usim_gate_fck_data = {
+       .parent = "omap_96m_fck",
+       .bit_shift = 9,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_l4_ick = {
+       .name = "per_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_l4_ick_data,
+};
+
+static struct ti_clk_gate gpt5_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 6,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt5_ick = {
+       .name = "gpt5_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt5_ick_data,
+};
+
+static struct ti_clk_gate mcspi2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 19,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi2_ick = {
+       .name = "mcspi2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi2_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_l4_ick = {
+       .name = "ssi_l4_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_l4_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es1_data = {
+       .parent = "ssi_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es1 = {
+       .name = "ssi_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ssi_ick_3430es1_data,
+};
+
+static struct ti_clk_gate i2c2_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 16,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c2_fck = {
+       .name = "i2c2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c2_fck_data,
+};
+
+static struct ti_clk_divider dpll1_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 19,
+       .max_div = 7,
+       .reg = 0x940,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_fck = {
+       .name = "dpll1_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll1_fck_data,
+};
+
+static const char *dpll1_ck_parents[] = {
+       "sys_ck",
+       "dpll1_fck",
+};
+
+static struct ti_clk_dpll dpll1_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll1_ck_parents),
+       .control_reg = 0x904,
+       .idlest_reg = 0x924,
+       .mult_div1_reg = 0x940,
+       .autoidle_reg = 0x934,
+       .module = TI_CLKM_CM,
+       .parents = dpll1_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0xa0,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x7,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x7,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll1_ck = {
+       .name = "dpll1_ck",
+       .clkdm_name = "dpll1_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll1_ck_data,
+};
+
+static struct ti_clk_fixed secure_32k_fck_data = {
+       .frequency = 32768,
+};
+
+static struct ti_clk secure_32k_fck = {
+       .name = "secure_32k_fck",
+       .type = TI_CLK_FIXED,
+       .data = &secure_32k_fck_data,
+};
+
+static struct ti_clk_gate gpio5_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 16,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio5_ick = {
+       .name = "gpio5_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio5_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m4_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 32,
+       .reg = 0xe40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m4_ck = {
+       .name = "dpll4_m4_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m4_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = {
+       .parent = "dpll4_m4_ck",
+       .div = 1,
+       .mult = 2,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m4x2_mul_ck = {
+       .name = "dpll4_m4x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m4x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m4x2_ck_data = {
+       .parent = "dpll4_m4x2_mul_ck",
+       .bit_shift = 0x1d,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m4x2_ck = {
+       .name = "dpll4_m4x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m4x2_ck_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es2_data = {
+       .parent = "dpll4_m4x2_ck",
+       .bit_shift = 0,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es2 = {
+       .name = "dss1_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss1_alwon_fck_3430es2_data,
+};
+
+static struct ti_clk_gate uart3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 11,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart3_ick = {
+       .name = "uart3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart3_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m3_ck_data = {
+       .parent = "dpll4_ck",
+       .bit_shift = 8,
+       .max_div = 32,
+       .reg = 0xe40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m3_ck = {
+       .name = "dpll4_m3_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m3_ck_data,
+};
+
+static struct ti_clk_gate mcbsp3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 1,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp3_ick = {
+       .name = "mcbsp3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 14,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio3_dbck = {
+       .name = "gpio3_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio3_dbck_data,
+};
+
+static struct ti_clk_gate fac_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 8,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk fac_ick = {
+       .name = "fac_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &fac_ick_data,
+};
+
+static struct ti_clk_gate clkout2_src_gate_ck_data = {
+       .parent = "core_ck",
+       .bit_shift = 7,
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = {
+       .parent = "dpll4_m3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m3x2_mul_ck = {
+       .name = "dpll4_m3x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_data = {
+       .parent = "dpll4_m3x2_mul_ck",
+       .bit_shift = 0x1c,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck = {
+       .name = "dpll4_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m3x2_ck_data,
+};
+
+static const char *omap_54m_fck_parents[] = {
+       "dpll4_m3x2_ck",
+       "sys_altclk",
+};
+
+static struct ti_clk_mux omap_54m_fck_data = {
+       .bit_shift = 5,
+       .num_parents = ARRAY_SIZE(omap_54m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_54m_fck_parents,
+};
+
+static struct ti_clk omap_54m_fck = {
+       .name = "omap_54m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_54m_fck_data,
+};
+
+static const char *clkout2_src_mux_ck_parents[] = {
+       "core_ck",
+       "sys_ck",
+       "cm_96m_fck",
+       "omap_54m_fck",
+};
+
+static struct ti_clk_mux clkout2_src_mux_ck_data = {
+       .num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents),
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .parents = clkout2_src_mux_ck_parents,
+};
+
+static struct ti_clk_composite clkout2_src_ck_data = {
+       .mux = &clkout2_src_mux_ck_data,
+       .gate = &clkout2_src_gate_ck_data,
+};
+
+static struct ti_clk clkout2_src_ck = {
+       .name = "clkout2_src_ck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &clkout2_src_ck_data,
+};
+
+static struct ti_clk_gate i2c1_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 15,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c1_fck = {
+       .name = "i2c1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c1_fck_data,
+};
+
+static struct ti_clk_gate wdt3_fck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 12,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt3_fck = {
+       .name = "wdt3_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt3_fck_data,
+};
+
+static struct ti_clk_gate gpt7_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 8,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt7_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt7_mux_fck_data = {
+       .bit_shift = 5,
+       .num_parents = ARRAY_SIZE(gpt7_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt7_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt7_fck_data = {
+       .mux = &gpt7_mux_fck_data,
+       .gate = &gpt7_gate_fck_data,
+};
+
+static struct ti_clk gpt7_fck = {
+       .name = "gpt7_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt7_fck_data,
+};
+
+static struct ti_clk_gate usb_l4_gate_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 5,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INTERFACE,
+};
+
+static struct ti_clk_divider usb_l4_div_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 4,
+       .max_div = 1,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usb_l4_ick_data = {
+       .gate = &usb_l4_gate_ick_data,
+       .divider = &usb_l4_div_ick_data,
+};
+
+static struct ti_clk usb_l4_ick = {
+       .name = "usb_l4_ick",
+       .type = TI_CLK_COMPOSITE,
+       .data = &usb_l4_ick_data,
+};
+
+static struct ti_clk_gate uart4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 18,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick = {
+       .name = "uart4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_ick_data,
+};
+
+static struct ti_clk_fixed dummy_ck_data = {
+       .frequency = 0,
+};
+
+static struct ti_clk dummy_ck = {
+       .name = "dummy_ck",
+       .type = TI_CLK_FIXED,
+       .data = &dummy_ck_data,
+};
+
+static const char *gpt3_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt3_mux_fck_data = {
+       .bit_shift = 1,
+       .num_parents = ARRAY_SIZE(gpt3_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt3_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt9_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 10,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt9_ick = {
+       .name = "gpt9_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt9_ick_data,
+};
+
+static struct ti_clk_gate gpt10_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 11,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate dss_ick_3430es1_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xe10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es1 = {
+       .name = "dss_ick",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpt11_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 12,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt11_ick = {
+       .name = "gpt11_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt11_ick_data,
+};
+
+static struct ti_clk_divider dpll2_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 19,
+       .max_div = 7,
+       .reg = 0x40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_fck = {
+       .name = "dpll2_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll2_fck_data,
+};
+
+static struct ti_clk_gate uart1_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 13,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart1_fck = {
+       .name = "uart1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart1_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es1_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es1 = {
+       .name = "hsotgusb_ick_3430es1",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpio2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 13,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio2_ick = {
+       .name = "gpio2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 24,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs1_ick = {
+       .name = "mmchs1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs1_ick_data,
+};
+
+static struct ti_clk_gate modem_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 31,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk modem_fck = {
+       .name = "modem_fck",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &modem_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 2,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp4_ick = {
+       .name = "mcbsp4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp4_ick_data,
+};
+
+static struct ti_clk_gate gpio1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 3,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio1_ick = {
+       .name = "gpio1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio1_ick_data,
+};
+
+static const char *gpt6_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt6_mux_fck_data = {
+       .bit_shift = 4,
+       .num_parents = ARRAY_SIZE(gpt6_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt6_mux_fck_parents,
+};
+
+static struct ti_clk_fixed_factor dpll1_x2_ck_data = {
+       .parent = "dpll1_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll1_x2_ck = {
+       .name = "dpll1_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll1_x2_ck_data,
+};
+
+static struct ti_clk_divider dpll1_x2m2_ck_data = {
+       .parent = "dpll1_x2_ck",
+       .max_div = 31,
+       .reg = 0x944,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_x2m2_ck = {
+       .name = "dpll1_x2m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll1_x2m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor mpu_ck_data = {
+       .parent = "dpll1_x2m2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk mpu_ck = {
+       .name = "mpu_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &mpu_ck_data,
+};
+
+static struct ti_clk_divider arm_fck_data = {
+       .parent = "mpu_ck",
+       .max_div = 2,
+       .reg = 0x924,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk arm_fck = {
+       .name = "arm_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &arm_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d3_ck_data = {
+       .parent = "core_ck",
+       .div = 3,
+       .mult = 1,
+};
+
+static struct ti_clk core_d3_ck = {
+       .name = "core_d3_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d3_ck_data,
+};
+
+static struct ti_clk_gate gpt11_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 12,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt11_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt11_mux_fck_data = {
+       .bit_shift = 7,
+       .num_parents = ARRAY_SIZE(gpt11_mux_fck_parents),
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .parents = gpt11_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt11_fck_data = {
+       .mux = &gpt11_mux_fck_data,
+       .gate = &gpt11_gate_fck_data,
+};
+
+static struct ti_clk gpt11_fck = {
+       .name = "gpt11_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt11_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d6_ck_data = {
+       .parent = "core_ck",
+       .div = 6,
+       .mult = 1,
+};
+
+static struct ti_clk core_d6_ck = {
+       .name = "core_d6_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d6_ck_data,
+};
+
+static struct ti_clk_gate uart4_fck_am35xx_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 23,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck_am35xx = {
+       .name = "uart4_fck_am35xx",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_fck_am35xx_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_data = {
+       .parent = "dpll3_m3x2_mul_ck",
+       .bit_shift = 0xc,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck = {
+       .name = "dpll3_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll3_m3x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_core_alwon_ck_data = {
+       .parent = "dpll3_m3x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_core_alwon_ck = {
+       .name = "emu_core_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_core_alwon_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m6_ck_data = {
+       .parent = "dpll4_ck",
+       .bit_shift = 24,
+       .max_div = 63,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m6_ck = {
+       .name = "dpll4_m6_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m6_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = {
+       .parent = "dpll4_m6_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m6x2_mul_ck = {
+       .name = "dpll4_m6x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m6x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_data = {
+       .parent = "dpll4_m6x2_mul_ck",
+       .bit_shift = 0x1f,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck = {
+       .name = "dpll4_m6x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m6x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_per_alwon_ck_data = {
+       .parent = "dpll4_m6x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_per_alwon_ck = {
+       .name = "emu_per_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_per_alwon_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = {
+       .parent = "mpu_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_mpu_alwon_ck = {
+       .name = "emu_mpu_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_mpu_alwon_ck_data,
+};
+
+static const char *emu_src_mux_ck_parents[] = {
+       "sys_ck",
+       "emu_core_alwon_ck",
+       "emu_per_alwon_ck",
+       "emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux emu_src_mux_ck_data = {
+       .num_parents = ARRAY_SIZE(emu_src_mux_ck_parents),
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .parents = emu_src_mux_ck_parents,
+};
+
+static struct ti_clk emu_src_mux_ck = {
+       .name = "emu_src_mux_ck",
+       .type = TI_CLK_MUX,
+       .data = &emu_src_mux_ck_data,
+};
+
+static struct ti_clk_gate emu_src_ck_data = {
+       .parent = "emu_src_mux_ck",
+       .flags = CLKF_CLKDM,
+};
+
+static struct ti_clk emu_src_ck = {
+       .name = "emu_src_ck",
+       .clkdm_name = "emu_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &emu_src_ck_data,
+};
+
+static struct ti_clk_divider atclk_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 4,
+       .max_div = 3,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk atclk_fck = {
+       .name = "atclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &atclk_fck_data,
+};
+
+static struct ti_clk_gate ipss_ick_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_AM35XX | CLKF_INTERFACE,
+};
+
+static struct ti_clk ipss_ick = {
+       .name = "ipss_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ipss_ick_data,
+};
+
+static struct ti_clk_gate emac_ick_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 1,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk emac_ick = {
+       .name = "emac_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &emac_ick_data,
+};
+
+static struct ti_clk_gate vpfe_ick_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 2,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk vpfe_ick = {
+       .name = "vpfe_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &vpfe_ick_data,
+};
+
+static const char *dpll2_ck_parents[] = {
+       "sys_ck",
+       "dpll2_fck",
+};
+
+static struct ti_clk_dpll dpll2_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll2_ck_parents),
+       .control_reg = 0x4,
+       .idlest_reg = 0x24,
+       .mult_div1_reg = 0x40,
+       .autoidle_reg = 0x34,
+       .module = TI_CLKM_CM,
+       .parents = dpll2_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0xa2,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x8,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x8,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll2_ck = {
+       .name = "dpll2_ck",
+       .clkdm_name = "dpll2_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll2_ck_data,
+};
+
+static struct ti_clk_divider dpll2_m2_ck_data = {
+       .parent = "dpll2_ck",
+       .max_div = 31,
+       .reg = 0x44,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_m2_ck = {
+       .name = "dpll2_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll2_m2_ck_data,
+};
+
+static const char *mcbsp4_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp4_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp4_mux_fck_parents,
+};
+
+static const char *mcbsp1_mux_fck_parents[] = {
+       "core_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp1_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents),
+       .reg = 0x274,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp1_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt8_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 9,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt8_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 9,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt8_ick = {
+       .name = "gpt8_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt8_ick_data,
+};
+
+static const char *gpt10_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt10_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(gpt10_mux_fck_parents),
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .parents = gpt10_mux_fck_parents,
+};
+
+static struct ti_clk_gate mmchs3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 30,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs3_ick = {
+       .name = "mmchs3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 14,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio3_ick = {
+       .name = "gpio3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio3_ick_data,
+};
+
+static const char *traceclk_src_fck_parents[] = {
+       "sys_ck",
+       "emu_core_alwon_ck",
+       "emu_per_alwon_ck",
+       "emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux traceclk_src_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(traceclk_src_fck_parents),
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .parents = traceclk_src_fck_parents,
+};
+
+static struct ti_clk traceclk_src_fck = {
+       .name = "traceclk_src_fck",
+       .type = TI_CLK_MUX,
+       .data = &traceclk_src_fck_data,
+};
+
+static struct ti_clk_divider traceclk_fck_data = {
+       .parent = "traceclk_src_fck",
+       .bit_shift = 11,
+       .max_div = 7,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk traceclk_fck = {
+       .name = "traceclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &traceclk_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 10,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate sad2d_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 3,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sad2d_ick = {
+       .name = "sad2d_ick",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sad2d_ick_data,
+};
+
+static const char *gpt1_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt1_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(gpt1_mux_fck_parents),
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .parents = gpt1_mux_fck_parents,
+};
+
+static struct ti_clk_gate hecc_ck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hecc_ck = {
+       .name = "hecc_ck",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hecc_ck_data,
+};
+
+static struct ti_clk_gate gpt1_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 0,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt1_fck_data = {
+       .mux = &gpt1_mux_fck_data,
+       .gate = &gpt1_gate_fck_data,
+};
+
+static struct ti_clk gpt1_fck = {
+       .name = "gpt1_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = {
+       .parent = "dpll4_m2x2_mul_ck",
+       .bit_shift = 0x1b,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck_omap36xx = {
+       .name = "dpll4_m2x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m2x2_ck_omap36xx_data,
+       .patch = &dpll4_m2x2_ck,
+};
+
+static struct ti_clk_divider gfx_l3_fck_data = {
+       .parent = "l3_ick",
+       .max_div = 7,
+       .reg = 0xb40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk gfx_l3_fck = {
+       .name = "gfx_l3_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &gfx_l3_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg1_ck_data = {
+       .parent = "gfx_l3_fck",
+       .bit_shift = 1,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg1_ck = {
+       .name = "gfx_cg1_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_cg1_ck_data,
+};
+
+static struct ti_clk_gate mailboxes_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 7,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mailboxes_ick = {
+       .name = "mailboxes_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mailboxes_ick_data,
+};
+
+static struct ti_clk_gate sha11_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 1,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha11_ick = {
+       .name = "sha11_ick",
+       .type = TI_CLK_GATE,
+       .data = &sha11_ick_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_am35xx_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 0,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hsotgusb_ick_am35xx = {
+       .name = "hsotgusb_ick_am35xx",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_am35xx_data,
+};
+
+static struct ti_clk_gate mmchs3_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 30,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs3_fck = {
+       .name = "mmchs3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs3_fck_data,
+};
+
+static struct ti_clk_divider pclk_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 8,
+       .max_div = 7,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclk_fck = {
+       .name = "pclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &pclk_fck_data,
+};
+
+static const char *dpll4_ck_omap36xx_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_omap36xx_data = {
+       .num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd44,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll4_ck_omap36xx_parents,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x2,
+       .auto_recal_bit = 0x13,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x6,
+       .max_multiplier = 0xfff,
+       .enable_mask = 0x70000,
+       .mult_mask = 0xfff00,
+       .recal_st_bit = 0x6,
+       .autoidle_mask = 0x38,
+       .sddiv_mask = 0xff000000,
+       .dco_mask = 0xe00000,
+       .flags = CLKF_PER | CLKF_J_TYPE,
+};
+
+static struct ti_clk dpll4_ck_omap36xx = {
+       .name = "dpll4_ck",
+       .type = TI_CLK_DPLL,
+       .data = &dpll4_ck_omap36xx_data,
+       .patch = &dpll4_ck,
+};
+
+static struct ti_clk_gate uart3_fck_data = {
+       .parent = "per_48m_fck",
+       .bit_shift = 11,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart3_fck = {
+       .name = "uart3_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart3_fck_data,
+};
+
+static struct ti_clk_fixed_factor wkup_32k_fck_data = {
+       .parent = "omap_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wkup_32k_fck = {
+       .name = "wkup_32k_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wkup_32k_fck_data,
+};
+
+static struct ti_clk_gate sys_clkout1_data = {
+       .parent = "osc_sys_ck",
+       .bit_shift = 7,
+       .reg = 0xd70,
+       .module = TI_CLKM_PRM,
+};
+
+static struct ti_clk sys_clkout1 = {
+       .name = "sys_clkout1",
+       .type = TI_CLK_GATE,
+       .data = &sys_clkout1_data,
+};
+
+static struct ti_clk_fixed_factor gpmc_fck_data = {
+       .parent = "core_l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gpmc_fck = {
+       .name = "gpmc_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gpmc_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 20,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d20_ck = {
+       .name = "dpll5_m2_d20_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d20_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = {
+       .parent = "dpll4_m5x2_mul_ck",
+       .bit_shift = 0x1e,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck_omap36xx = {
+       .name = "dpll4_m5x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m5x2_ck_omap36xx_data,
+       .patch = &dpll4_m5x2_ck,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = {
+       .parent = "corex2_fck",
+       .bit_shift = 0,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_gate uart1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 13,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart1_ick = {
+       .name = "uart1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart1_ick_data,
+};
+
+static struct ti_clk_gate iva2_ck_data = {
+       .parent = "dpll2_m2_ck",
+       .bit_shift = 0,
+       .reg = 0x0,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk iva2_ck = {
+       .name = "iva2_ck",
+       .clkdm_name = "iva2_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &iva2_ck_data,
+};
+
+static struct ti_clk_gate pka_ick_data = {
+       .parent = "security_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk pka_ick = {
+       .name = "pka_ick",
+       .type = TI_CLK_GATE,
+       .data = &pka_ick_data,
+};
+
+static struct ti_clk_gate gpt12_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 1,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt12_ick = {
+       .name = "gpt12_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt12_ick_data,
+};
+
+static const char *mcbsp5_mux_fck_parents[] = {
+       "core_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp5_mux_fck_data = {
+       .bit_shift = 4,
+       .num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp5_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp5_fck_data = {
+       .mux = &mcbsp5_mux_fck_data,
+       .gate = &mcbsp5_gate_fck_data,
+};
+
+static struct ti_clk mcbsp5_fck = {
+       .name = "mcbsp5_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp5_fck_data,
+};
+
+static struct ti_clk_gate usbhost_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .bit_shift = 0,
+       .reg = 0x1400,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS,
+};
+
+static struct ti_clk usbhost_48m_fck = {
+       .name = "usbhost_48m_fck",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_48m_fck_data,
+};
+
+static struct ti_clk_gate des1_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 0,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des1_ick = {
+       .name = "des1_ick",
+       .type = TI_CLK_GATE,
+       .data = &des1_ick_data,
+};
+
+static struct ti_clk_gate sgx_gate_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 1,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor core_d4_ck_data = {
+       .parent = "core_ck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk core_d4_ck = {
+       .name = "core_d4_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = {
+       .parent = "dpll4_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk omap_192m_alwon_fck = {
+       .name = "omap_192m_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_192m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d2_ck_data = {
+       .parent = "core_ck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk core_d2_ck = {
+       .name = "core_d2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d3_fck_data = {
+       .parent = "corex2_fck",
+       .div = 3,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_d3_fck = {
+       .name = "corex2_d3_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_d3_fck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d5_fck_data = {
+       .parent = "corex2_fck",
+       .div = 5,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_d5_fck = {
+       .name = "corex2_d5_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_d5_fck_data,
+};
+
+static const char *sgx_mux_fck_parents[] = {
+       "core_d3_ck",
+       "core_d4_ck",
+       "core_d6_ck",
+       "cm_96m_fck",
+       "omap_192m_alwon_fck",
+       "core_d2_ck",
+       "corex2_d3_fck",
+       "corex2_d5_fck",
+};
+
+static struct ti_clk_mux sgx_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(sgx_mux_fck_parents),
+       .reg = 0xb40,
+       .module = TI_CLKM_CM,
+       .parents = sgx_mux_fck_parents,
+};
+
+static struct ti_clk_composite sgx_fck_data = {
+       .mux = &sgx_mux_fck_data,
+       .gate = &sgx_gate_fck_data,
+};
+
+static struct ti_clk sgx_fck = {
+       .name = "sgx_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &sgx_fck_data,
+};
+
+static struct ti_clk_gate mcspi1_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 18,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi1_fck = {
+       .name = "mcspi1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi1_fck_data,
+};
+
+static struct ti_clk_gate mmchs2_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 25,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs2_fck = {
+       .name = "mmchs2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs2_fck_data,
+};
+
+static struct ti_clk_gate mcspi2_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 19,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi2_fck = {
+       .name = "mcspi2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi2_fck_data,
+};
+
+static struct ti_clk_gate vpfe_fck_data = {
+       .parent = "pclk_ck",
+       .bit_shift = 10,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk vpfe_fck = {
+       .name = "vpfe_fck",
+       .type = TI_CLK_GATE,
+       .data = &vpfe_fck_data,
+};
+
+static struct ti_clk_gate gpt4_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 5,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate mcbsp1_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 9,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt5_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 6,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt5_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt5_mux_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(gpt5_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt5_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt5_fck_data = {
+       .mux = &gpt5_mux_fck_data,
+       .gate = &gpt5_gate_fck_data,
+};
+
+static struct ti_clk gpt5_fck = {
+       .name = "gpt5_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt5_fck_data,
+};
+
+static struct ti_clk_gate ts_fck_data = {
+       .parent = "omap_32k_fck",
+       .bit_shift = 1,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk ts_fck = {
+       .name = "ts_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ts_fck_data,
+};
+
+static struct ti_clk_fixed_factor wdt1_fck_data = {
+       .parent = "secure_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wdt1_fck = {
+       .name = "wdt1_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wdt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = {
+       .parent = "dpll4_m6x2_mul_ck",
+       .bit_shift = 0x1f,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck_omap36xx = {
+       .name = "dpll4_m6x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m6x2_ck_omap36xx_data,
+       .patch = &dpll4_m6x2_ck,
+};
+
+static const char *gpt4_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt4_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(gpt4_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt4_mux_fck_parents,
+};
+
+static struct ti_clk_gate usbhost_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0x1410,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbhost_ick = {
+       .name = "usbhost_ick",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_ick_data,
+};
+
+static struct ti_clk_gate mcbsp2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 0,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp2_ick = {
+       .name = "mcbsp2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp2_ick_data,
+};
+
+static struct ti_clk_gate omapctrl_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 6,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omapctrl_ick = {
+       .name = "omapctrl_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &omapctrl_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d4_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d4_fck = {
+       .name = "omap_96m_d4_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d4_fck_data,
+};
+
+static struct ti_clk_gate gpt6_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 7,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt6_ick = {
+       .name = "gpt6_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt6_ick_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = {
+       .parent = "dpll3_m3x2_mul_ck",
+       .bit_shift = 0xc,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck_omap36xx = {
+       .name = "dpll3_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll3_m3x2_ck_omap36xx_data,
+       .patch = &dpll3_m3x2_ck,
+};
+
+static struct ti_clk_gate i2c3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 17,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c3_ick = {
+       .name = "i2c3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c3_ick_data,
+};
+
+static struct ti_clk_gate gpio6_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 17,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio6_ick = {
+       .name = "gpio6_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio6_ick_data,
+};
+
+static struct ti_clk_gate mspro_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 23,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mspro_ick = {
+       .name = "mspro_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mspro_ick_data,
+};
+
+static struct ti_clk_composite mcbsp1_fck_data = {
+       .mux = &mcbsp1_mux_fck_data,
+       .gate = &mcbsp1_gate_fck_data,
+};
+
+static struct ti_clk mcbsp1_fck = {
+       .name = "mcbsp1_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp1_fck_data,
+};
+
+static struct ti_clk_gate gpt3_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 4,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed rmii_ck_data = {
+       .frequency = 50000000,
+};
+
+static struct ti_clk rmii_ck = {
+       .name = "rmii_ck",
+       .type = TI_CLK_FIXED,
+       .data = &rmii_ck_data,
+};
+
+static struct ti_clk_gate gpt6_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 7,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt6_fck_data = {
+       .mux = &gpt6_mux_fck_data,
+       .gate = &gpt6_gate_fck_data,
+};
+
+static struct ti_clk gpt6_fck = {
+       .name = "gpt6_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt6_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d4_ck = {
+       .name = "dpll5_m2_d4_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor sys_d2_ck_data = {
+       .parent = "sys_ck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk sys_d2_ck = {
+       .name = "sys_d2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &sys_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d2_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d2_fck = {
+       .name = "omap_96m_d2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 8,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d8_ck = {
+       .name = "dpll5_m2_d8_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d8_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 16,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d16_ck = {
+       .name = "dpll5_m2_d16_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d16_ck_data,
+};
+
+static const char *usim_mux_fck_parents[] = {
+       "sys_ck",
+       "sys_d2_ck",
+       "omap_96m_d2_fck",
+       "omap_96m_d4_fck",
+       "omap_96m_d8_fck",
+       "omap_96m_d10_fck",
+       "dpll5_m2_d4_ck",
+       "dpll5_m2_d8_ck",
+       "dpll5_m2_d16_ck",
+       "dpll5_m2_d20_ck",
+};
+
+static struct ti_clk_mux usim_mux_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(usim_mux_fck_parents),
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .parents = usim_mux_fck_parents,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usim_fck_data = {
+       .mux = &usim_mux_fck_data,
+       .gate = &usim_gate_fck_data,
+};
+
+static struct ti_clk usim_fck = {
+       .name = "usim_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &usim_fck_data,
+};
+
+static int ssi_ssr_div_fck_3430es2_divs[] = {
+       0,
+       1,
+       2,
+       3,
+       4,
+       0,
+       6,
+       0,
+       8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = {
+       .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs),
+       .parent = "corex2_fck",
+       .bit_shift = 8,
+       .dividers = ssi_ssr_div_fck_3430es2_divs,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es2_data = {
+       .gate = &ssi_ssr_gate_fck_3430es2_data,
+       .divider = &ssi_ssr_div_fck_3430es2_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es2 = {
+       .name = "ssi_ssr_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &ssi_ssr_fck_3430es2_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es1_data = {
+       .parent = "dpll4_m4x2_ck",
+       .bit_shift = 0,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es1 = {
+       .name = "dss1_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss1_alwon_fck_3430es1_data,
+};
+
+static struct ti_clk_gate gpt3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 4,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt3_ick = {
+       .name = "gpt3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt3_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_12m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk omap_12m_fck = {
+       .name = "omap_12m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_12m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_12m_fck_data = {
+       .parent = "omap_12m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_12m_fck = {
+       .name = "core_12m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_12m_fck_data,
+};
+
+static struct ti_clk_gate hdq_fck_data = {
+       .parent = "core_12m_fck",
+       .bit_shift = 22,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk hdq_fck = {
+       .name = "hdq_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hdq_fck_data,
+};
+
+static struct ti_clk_gate usbtll_fck_data = {
+       .parent = "dpll5_m2_ck",
+       .bit_shift = 2,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk usbtll_fck = {
+       .name = "usbtll_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbtll_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_fck_am35xx_data = {
+       .parent = "sys_ck",
+       .bit_shift = 8,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk hsotgusb_fck_am35xx = {
+       .name = "hsotgusb_fck_am35xx",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_fck_am35xx_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es2_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es2 = {
+       .name = "hsotgusb_ick_3430es2",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gfx_l3_ck_data = {
+       .parent = "l3_ick",
+       .bit_shift = 0,
+       .reg = 0xb10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_l3_ck = {
+       .name = "gfx_l3_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_l3_ck_data,
+};
+
+static struct ti_clk_fixed_factor gfx_l3_ick_data = {
+       .parent = "gfx_l3_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gfx_l3_ick = {
+       .name = "gfx_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gfx_l3_ick_data,
+};
+
+static struct ti_clk_gate mcbsp1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 9,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp1_ick = {
+       .name = "mcbsp1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp1_ick_data,
+};
+
+static struct ti_clk_fixed_factor gpt12_fck_data = {
+       .parent = "secure_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gpt12_fck = {
+       .name = "gpt12_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gpt12_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg2_ck_data = {
+       .parent = "gfx_l3_fck",
+       .bit_shift = 2,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg2_ck = {
+       .name = "gfx_cg2_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_cg2_ck_data,
+};
+
+static struct ti_clk_gate i2c2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 16,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c2_ick = {
+       .name = "i2c2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c2_ick_data,
+};
+
+static struct ti_clk_gate gpio4_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 15,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio4_dbck = {
+       .name = "gpio4_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio4_dbck_data,
+};
+
+static struct ti_clk_gate i2c3_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 17,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c3_fck = {
+       .name = "i2c3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c3_fck_data,
+};
+
+static struct ti_clk_composite gpt3_fck_data = {
+       .mux = &gpt3_mux_fck_data,
+       .gate = &gpt3_gate_fck_data,
+};
+
+static struct ti_clk gpt3_fck = {
+       .name = "gpt3_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt3_fck_data,
+};
+
+static struct ti_clk_gate i2c1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 15,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c1_ick = {
+       .name = "i2c1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c1_ick_data,
+};
+
+static struct ti_clk_gate omap_32ksync_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 2,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omap_32ksync_ick = {
+       .name = "omap_32ksync_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &omap_32ksync_ick_data,
+};
+
+static struct ti_clk_gate aes2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 28,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes2_ick = {
+       .name = "aes2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &aes2_ick_data,
+};
+
+static const char *gpt8_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt8_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(gpt8_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt8_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt8_fck_data = {
+       .mux = &gpt8_mux_fck_data,
+       .gate = &gpt8_gate_fck_data,
+};
+
+static struct ti_clk gpt8_fck = {
+       .name = "gpt8_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt8_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 2,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite mcbsp4_fck_data = {
+       .mux = &mcbsp4_mux_fck_data,
+       .gate = &mcbsp4_gate_fck_data,
+};
+
+static struct ti_clk mcbsp4_fck = {
+       .name = "mcbsp4_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp4_fck_data,
+};
+
+static struct ti_clk_gate gpio2_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 13,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio2_dbck = {
+       .name = "gpio2_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio2_dbck_data,
+};
+
+static struct ti_clk_gate usbtll_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 2,
+       .reg = 0xa18,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbtll_ick = {
+       .name = "usbtll_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbtll_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 21,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi4_ick = {
+       .name = "mcspi4_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi4_ick_data,
+};
+
+static struct ti_clk_gate dss_96m_fck_data = {
+       .parent = "omap_96m_fck",
+       .bit_shift = 2,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_96m_fck = {
+       .name = "dss_96m_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_96m_fck_data,
+};
+
+static struct ti_clk_divider rm_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 1,
+       .max_div = 3,
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk rm_ick = {
+       .name = "rm_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &rm_ick_data,
+};
+
+static struct ti_clk_gate hdq_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 22,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hdq_ick = {
+       .name = "hdq_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hdq_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_x2_ck_data = {
+       .parent = "dpll3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_x2_ck = {
+       .name = "dpll3_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_x2_ck_data,
+};
+
+static struct ti_clk_gate mad2d_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 3,
+       .reg = 0xa18,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mad2d_ick = {
+       .name = "mad2d_ick",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mad2d_ick_data,
+};
+
+static struct ti_clk_gate fshostusb_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 5,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk fshostusb_fck = {
+       .name = "fshostusb_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &fshostusb_fck_data,
+};
+
+static struct ti_clk_gate sr1_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 6,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr1_fck = {
+       .name = "sr1_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sr1_fck_data,
+};
+
+static struct ti_clk_gate des2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 26,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des2_ick = {
+       .name = "des2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &des2_ick_data,
+};
+
+static struct ti_clk_gate sdrc_ick_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 1,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sdrc_ick = {
+       .name = "sdrc_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sdrc_ick_data,
+};
+
+static struct ti_clk_composite gpt4_fck_data = {
+       .mux = &gpt4_mux_fck_data,
+       .gate = &gpt4_gate_fck_data,
+};
+
+static struct ti_clk gpt4_fck = {
+       .name = "gpt4_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt4_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = {
+       .parent = "dpll4_m3x2_mul_ck",
+       .bit_shift = 0x1c,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck_omap36xx = {
+       .name = "dpll4_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m3x2_ck_omap36xx_data,
+       .patch = &dpll4_m3x2_ck,
+};
+
+static struct ti_clk_gate cpefuse_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 0,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk cpefuse_fck = {
+       .name = "cpefuse_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &cpefuse_fck_data,
+};
+
+static struct ti_clk_gate mcspi3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 20,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi3_ick = {
+       .name = "mcspi3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi3_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = {
+       .parent = "ssi_ssr_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es2 = {
+       .name = "ssi_sst_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_sst_fck_3430es2_data,
+};
+
+static struct ti_clk_gate gpio1_dbck_data = {
+       .parent = "wkup_32k_fck",
+       .bit_shift = 3,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio1_dbck = {
+       .name = "gpio1_dbck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio1_dbck_data,
+};
+
+static struct ti_clk_gate gpt4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 5,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt4_ick = {
+       .name = "gpt4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt4_ick_data,
+};
+
+static struct ti_clk_gate gpt2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 3,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt2_ick = {
+       .name = "gpt2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 24,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs1_fck = {
+       .name = "mmchs1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs1_fck_data,
+};
+
+static struct ti_clk_fixed dummy_apb_pclk_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk dummy_apb_pclk = {
+       .name = "dummy_apb_pclk",
+       .type = TI_CLK_FIXED,
+       .data = &dummy_apb_pclk_data,
+};
+
+static struct ti_clk_gate gpio6_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 17,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio6_dbck = {
+       .name = "gpio6_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio6_dbck_data,
+};
+
+static struct ti_clk_gate uart2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 14,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart2_ick = {
+       .name = "uart2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart2_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_x2_ck_data = {
+       .parent = "dpll4_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_x2_ck = {
+       .name = "dpll4_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_x2_ck_data,
+};
+
+static struct ti_clk_gate gpt7_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 8,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt7_ick = {
+       .name = "gpt7_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt7_ick_data,
+};
+
+static struct ti_clk_gate dss_tv_fck_data = {
+       .parent = "omap_54m_fck",
+       .bit_shift = 2,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_tv_fck = {
+       .name = "dss_tv_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_tv_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 10,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp5_ick = {
+       .name = "mcbsp5_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp5_ick_data,
+};
+
+static struct ti_clk_gate mcspi1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 18,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi1_ick = {
+       .name = "mcspi1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi1_ick_data,
+};
+
+static struct ti_clk_gate d2d_26m_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk d2d_26m_fck = {
+       .name = "d2d_26m_fck",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &d2d_26m_fck_data,
+};
+
+static struct ti_clk_gate wdt3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 12,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt3_ick = {
+       .name = "wdt3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt3_ick_data,
+};
+
+static struct ti_clk_divider pclkx2_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 6,
+       .max_div = 3,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclkx2_fck = {
+       .name = "pclkx2_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &pclkx2_fck_data,
+};
+
+static struct ti_clk_gate sha12_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 27,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha12_ick = {
+       .name = "sha12_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sha12_ick_data,
+};
+
+static struct ti_clk_gate emac_fck_data = {
+       .parent = "rmii_ck",
+       .bit_shift = 9,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk emac_fck = {
+       .name = "emac_fck",
+       .type = TI_CLK_GATE,
+       .data = &emac_fck_data,
+};
+
+static struct ti_clk_composite gpt10_fck_data = {
+       .mux = &gpt10_mux_fck_data,
+       .gate = &gpt10_gate_fck_data,
+};
+
+static struct ti_clk gpt10_fck = {
+       .name = "gpt10_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt10_fck_data,
+};
+
+static struct ti_clk_gate wdt2_fck_data = {
+       .parent = "wkup_32k_fck",
+       .bit_shift = 5,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt2_fck = {
+       .name = "wdt2_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt2_fck_data,
+};
+
+static struct ti_clk_gate cam_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xf10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk cam_ick = {
+       .name = "cam_ick",
+       .clkdm_name = "cam_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &cam_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es2_data = {
+       .parent = "ssi_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es2 = {
+       .name = "ssi_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ssi_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gpio4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 15,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio4_ick = {
+       .name = "gpio4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio4_ick_data,
+};
+
+static struct ti_clk_gate wdt1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 4,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt1_ick = {
+       .name = "wdt1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt1_ick_data,
+};
+
+static struct ti_clk_gate rng_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 2,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk rng_ick = {
+       .name = "rng_ick",
+       .type = TI_CLK_GATE,
+       .data = &rng_ick_data,
+};
+
+static struct ti_clk_gate icr_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 29,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk icr_ick = {
+       .name = "icr_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &icr_ick_data,
+};
+
+static struct ti_clk_gate sgx_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 0,
+       .reg = 0xb10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sgx_ick = {
+       .name = "sgx_ick",
+       .clkdm_name = "sgx_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sgx_ick_data,
+};
+
+static struct ti_clk_divider sys_clkout2_data = {
+       .parent = "clkout2_src_ck",
+       .bit_shift = 3,
+       .max_div = 64,
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_POWER_OF_TWO,
+};
+
+static struct ti_clk sys_clkout2 = {
+       .name = "sys_clkout2",
+       .type = TI_CLK_DIVIDER,
+       .data = &sys_clkout2_data,
+};
+
+static struct ti_clk_alias omap34xx_omap36xx_clks[] = {
+       CLK(NULL, "security_l4_ick2", &security_l4_ick2),
+       CLK(NULL, "aes1_ick", &aes1_ick),
+       CLK("omap_rng", "ick", &rng_ick),
+       CLK("omap3-rom-rng", "ick", &rng_ick),
+       CLK(NULL, "sha11_ick", &sha11_ick),
+       CLK(NULL, "des1_ick", &des1_ick),
+       CLK(NULL, "cam_mclk", &cam_mclk),
+       CLK(NULL, "cam_ick", &cam_ick),
+       CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
+       CLK(NULL, "security_l3_ick", &security_l3_ick),
+       CLK(NULL, "pka_ick", &pka_ick),
+       CLK(NULL, "icr_ick", &icr_ick),
+       CLK(NULL, "des2_ick", &des2_ick),
+       CLK(NULL, "mspro_ick", &mspro_ick),
+       CLK(NULL, "mailboxes_ick", &mailboxes_ick),
+       CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
+       CLK(NULL, "sr1_fck", &sr1_fck),
+       CLK(NULL, "sr2_fck", &sr2_fck),
+       CLK(NULL, "sr_l4_ick", &sr_l4_ick),
+       CLK(NULL, "dpll2_fck", &dpll2_fck),
+       CLK(NULL, "dpll2_ck", &dpll2_ck),
+       CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
+       CLK(NULL, "iva2_ck", &iva2_ck),
+       CLK(NULL, "modem_fck", &modem_fck),
+       CLK(NULL, "sad2d_ick", &sad2d_ick),
+       CLK(NULL, "mad2d_ick", &mad2d_ick),
+       CLK(NULL, "mspro_fck", &mspro_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = {
+       CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
+       CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
+       CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
+       CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
+       CLK(NULL, "sys_d2_ck", &sys_d2_ck),
+       CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck),
+       CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck),
+       CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck),
+       CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck),
+       CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck),
+       CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck),
+       CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck),
+       CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck),
+       CLK(NULL, "usim_fck", &usim_fck),
+       CLK(NULL, "usim_ick", &usim_ick),
+       { NULL },
+};
+
+static struct ti_clk_alias omap3xxx_clks[] = {
+       CLK(NULL, "apb_pclk", &dummy_apb_pclk),
+       CLK(NULL, "omap_32k_fck", &omap_32k_fck),
+       CLK(NULL, "virt_12m_ck", &virt_12m_ck),
+       CLK(NULL, "virt_13m_ck", &virt_13m_ck),
+       CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
+       CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
+       CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
+       CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
+       CLK(NULL, "osc_sys_ck", &osc_sys_ck),
+       CLK("twl", "fck", &osc_sys_ck),
+       CLK(NULL, "sys_ck", &sys_ck),
+       CLK(NULL, "timer_sys_ck", &sys_ck),
+       CLK(NULL, "dpll4_ck", &dpll4_ck),
+       CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
+       CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck),
+       CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
+       CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
+       CLK(NULL, "dpll3_ck", &dpll3_ck),
+       CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
+       CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck),
+       CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
+       CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
+       CLK(NULL, "sys_altclk", &sys_altclk),
+       CLK(NULL, "mcbsp_clks", &mcbsp_clks),
+       CLK(NULL, "sys_clkout1", &sys_clkout1),
+       CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
+       CLK(NULL, "core_ck", &core_ck),
+       CLK(NULL, "dpll1_fck", &dpll1_fck),
+       CLK(NULL, "dpll1_ck", &dpll1_ck),
+       CLK(NULL, "cpufreq_ck", &dpll1_ck),
+       CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
+       CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
+       CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
+       CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
+       CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
+       CLK(NULL, "cm_96m_fck", &cm_96m_fck),
+       CLK(NULL, "omap_96m_fck", &omap_96m_fck),
+       CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
+       CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck),
+       CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
+       CLK(NULL, "omap_54m_fck", &omap_54m_fck),
+       CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck),
+       CLK(NULL, "omap_48m_fck", &omap_48m_fck),
+       CLK(NULL, "omap_12m_fck", &omap_12m_fck),
+       CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
+       CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck),
+       CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
+       CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
+       CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck),
+       CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
+       CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
+       CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck),
+       CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
+       CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
+       CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
+       CLK(NULL, "sys_clkout2", &sys_clkout2),
+       CLK(NULL, "corex2_fck", &corex2_fck),
+       CLK(NULL, "mpu_ck", &mpu_ck),
+       CLK(NULL, "arm_fck", &arm_fck),
+       CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
+       CLK(NULL, "l3_ick", &l3_ick),
+       CLK(NULL, "l4_ick", &l4_ick),
+       CLK(NULL, "rm_ick", &rm_ick),
+       CLK(NULL, "timer_32k_ck", &omap_32k_fck),
+       CLK(NULL, "gpt10_fck", &gpt10_fck),
+       CLK(NULL, "gpt11_fck", &gpt11_fck),
+       CLK(NULL, "core_96m_fck", &core_96m_fck),
+       CLK(NULL, "mmchs2_fck", &mmchs2_fck),
+       CLK(NULL, "mmchs1_fck", &mmchs1_fck),
+       CLK(NULL, "i2c3_fck", &i2c3_fck),
+       CLK(NULL, "i2c2_fck", &i2c2_fck),
+       CLK(NULL, "i2c1_fck", &i2c1_fck),
+       CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
+       CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
+       CLK(NULL, "core_48m_fck", &core_48m_fck),
+       CLK(NULL, "mcspi4_fck", &mcspi4_fck),
+       CLK(NULL, "mcspi3_fck", &mcspi3_fck),
+       CLK(NULL, "mcspi2_fck", &mcspi2_fck),
+       CLK(NULL, "mcspi1_fck", &mcspi1_fck),
+       CLK(NULL, "uart2_fck", &uart2_fck),
+       CLK(NULL, "uart1_fck", &uart1_fck),
+       CLK(NULL, "core_12m_fck", &core_12m_fck),
+       CLK("omap_hdq.0", "fck", &hdq_fck),
+       CLK(NULL, "hdq_fck", &hdq_fck),
+       CLK(NULL, "core_l3_ick", &core_l3_ick),
+       CLK(NULL, "sdrc_ick", &sdrc_ick),
+       CLK(NULL, "gpmc_fck", &gpmc_fck),
+       CLK(NULL, "core_l4_ick", &core_l4_ick),
+       CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
+       CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
+       CLK(NULL, "mmchs2_ick", &mmchs2_ick),
+       CLK(NULL, "mmchs1_ick", &mmchs1_ick),
+       CLK("omap_hdq.0", "ick", &hdq_ick),
+       CLK(NULL, "hdq_ick", &hdq_ick),
+       CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
+       CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
+       CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
+       CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
+       CLK(NULL, "mcspi4_ick", &mcspi4_ick),
+       CLK(NULL, "mcspi3_ick", &mcspi3_ick),
+       CLK(NULL, "mcspi2_ick", &mcspi2_ick),
+       CLK(NULL, "mcspi1_ick", &mcspi1_ick),
+       CLK("omap_i2c.3", "ick", &i2c3_ick),
+       CLK("omap_i2c.2", "ick", &i2c2_ick),
+       CLK("omap_i2c.1", "ick", &i2c1_ick),
+       CLK(NULL, "i2c3_ick", &i2c3_ick),
+       CLK(NULL, "i2c2_ick", &i2c2_ick),
+       CLK(NULL, "i2c1_ick", &i2c1_ick),
+       CLK(NULL, "uart2_ick", &uart2_ick),
+       CLK(NULL, "uart1_ick", &uart1_ick),
+       CLK(NULL, "gpt11_ick", &gpt11_ick),
+       CLK(NULL, "gpt10_ick", &gpt10_ick),
+       CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
+       CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
+       CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
+       CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
+       CLK(NULL, "omapctrl_ick", &omapctrl_ick),
+       CLK(NULL, "dss_tv_fck", &dss_tv_fck),
+       CLK(NULL, "dss_96m_fck", &dss_96m_fck),
+       CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
+       CLK(NULL, "init_60m_fclk", &dummy_ck),
+       CLK(NULL, "gpt1_fck", &gpt1_fck),
+       CLK(NULL, "aes2_ick", &aes2_ick),
+       CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
+       CLK(NULL, "gpio1_dbck", &gpio1_dbck),
+       CLK(NULL, "sha12_ick", &sha12_ick),
+       CLK(NULL, "wdt2_fck", &wdt2_fck),
+       CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
+       CLK("omap_wdt", "ick", &wdt2_ick),
+       CLK(NULL, "wdt2_ick", &wdt2_ick),
+       CLK(NULL, "wdt1_ick", &wdt1_ick),
+       CLK(NULL, "gpio1_ick", &gpio1_ick),
+       CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
+       CLK(NULL, "gpt12_ick", &gpt12_ick),
+       CLK(NULL, "gpt1_ick", &gpt1_ick),
+       CLK(NULL, "per_96m_fck", &per_96m_fck),
+       CLK(NULL, "per_48m_fck", &per_48m_fck),
+       CLK(NULL, "uart3_fck", &uart3_fck),
+       CLK(NULL, "gpt2_fck", &gpt2_fck),
+       CLK(NULL, "gpt3_fck", &gpt3_fck),
+       CLK(NULL, "gpt4_fck", &gpt4_fck),
+       CLK(NULL, "gpt5_fck", &gpt5_fck),
+       CLK(NULL, "gpt6_fck", &gpt6_fck),
+       CLK(NULL, "gpt7_fck", &gpt7_fck),
+       CLK(NULL, "gpt8_fck", &gpt8_fck),
+       CLK(NULL, "gpt9_fck", &gpt9_fck),
+       CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
+       CLK(NULL, "gpio6_dbck", &gpio6_dbck),
+       CLK(NULL, "gpio5_dbck", &gpio5_dbck),
+       CLK(NULL, "gpio4_dbck", &gpio4_dbck),
+       CLK(NULL, "gpio3_dbck", &gpio3_dbck),
+       CLK(NULL, "gpio2_dbck", &gpio2_dbck),
+       CLK(NULL, "wdt3_fck", &wdt3_fck),
+       CLK(NULL, "per_l4_ick", &per_l4_ick),
+       CLK(NULL, "gpio6_ick", &gpio6_ick),
+       CLK(NULL, "gpio5_ick", &gpio5_ick),
+       CLK(NULL, "gpio4_ick", &gpio4_ick),
+       CLK(NULL, "gpio3_ick", &gpio3_ick),
+       CLK(NULL, "gpio2_ick", &gpio2_ick),
+       CLK(NULL, "wdt3_ick", &wdt3_ick),
+       CLK(NULL, "uart3_ick", &uart3_ick),
+       CLK(NULL, "uart4_ick", &uart4_ick),
+       CLK(NULL, "gpt9_ick", &gpt9_ick),
+       CLK(NULL, "gpt8_ick", &gpt8_ick),
+       CLK(NULL, "gpt7_ick", &gpt7_ick),
+       CLK(NULL, "gpt6_ick", &gpt6_ick),
+       CLK(NULL, "gpt5_ick", &gpt5_ick),
+       CLK(NULL, "gpt4_ick", &gpt4_ick),
+       CLK(NULL, "gpt3_ick", &gpt3_ick),
+       CLK(NULL, "gpt2_ick", &gpt2_ick),
+       CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
+       CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
+       CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
+       CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
+       CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
+       CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
+       CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
+       CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
+       CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
+       CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+       CLK("etb", "emu_src_ck", &emu_src_ck),
+       CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+       CLK(NULL, "emu_src_ck", &emu_src_ck),
+       CLK(NULL, "pclk_fck", &pclk_fck),
+       CLK(NULL, "pclkx2_fck", &pclkx2_fck),
+       CLK(NULL, "atclk_fck", &atclk_fck),
+       CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
+       CLK(NULL, "traceclk_fck", &traceclk_fck),
+       CLK(NULL, "secure_32k_fck", &secure_32k_fck),
+       CLK(NULL, "gpt12_fck", &gpt12_fck),
+       CLK(NULL, "wdt1_fck", &wdt1_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = {
+       CLK(NULL, "dpll5_ck", &dpll5_ck),
+       CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
+       CLK(NULL, "core_d3_ck", &core_d3_ck),
+       CLK(NULL, "core_d4_ck", &core_d4_ck),
+       CLK(NULL, "core_d6_ck", &core_d6_ck),
+       CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
+       CLK(NULL, "core_d2_ck", &core_d2_ck),
+       CLK(NULL, "corex2_d3_fck", &corex2_d3_fck),
+       CLK(NULL, "corex2_d5_fck", &corex2_d5_fck),
+       CLK(NULL, "sgx_fck", &sgx_fck),
+       CLK(NULL, "sgx_ick", &sgx_ick),
+       CLK(NULL, "cpefuse_fck", &cpefuse_fck),
+       CLK(NULL, "ts_fck", &ts_fck),
+       CLK(NULL, "usbtll_fck", &usbtll_fck),
+       CLK(NULL, "usbtll_ick", &usbtll_ick),
+       CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
+       CLK(NULL, "mmchs3_ick", &mmchs3_ick),
+       CLK(NULL, "mmchs3_fck", &mmchs3_fck),
+       CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
+       CLK("omapdss_dss", "ick", &dss_ick_3430es2),
+       CLK(NULL, "dss_ick", &dss_ick_3430es2),
+       CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
+       CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
+       CLK(NULL, "usbhost_ick", &usbhost_ick),
+       { NULL },
+};
+
+static struct ti_clk_alias omap3430es1_clks[] = {
+       CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
+       CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
+       CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
+       CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
+       CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
+       CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
+       CLK(NULL, "fshostusb_fck", &fshostusb_fck),
+       CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
+       CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
+       CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
+       CLK(NULL, "fac_ick", &fac_ick),
+       CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
+       CLK(NULL, "usb_l4_ick", &usb_l4_ick),
+       CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
+       CLK("omapdss_dss", "ick", &dss_ick_3430es1),
+       CLK(NULL, "dss_ick", &dss_ick_3430es1),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_clks[] = {
+       CLK(NULL, "uart4_fck", &uart4_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias am35xx_clks[] = {
+       CLK(NULL, "ipss_ick", &ipss_ick),
+       CLK(NULL, "rmii_ck", &rmii_ck),
+       CLK(NULL, "pclk_ck", &pclk_ck),
+       CLK(NULL, "emac_ick", &emac_ick),
+       CLK(NULL, "emac_fck", &emac_fck),
+       CLK("davinci_emac.0", NULL, &emac_ick),
+       CLK("davinci_mdio.0", NULL, &emac_fck),
+       CLK("vpfe-capture", "master", &vpfe_ick),
+       CLK("vpfe-capture", "slave", &vpfe_fck),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
+       CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
+       CLK(NULL, "hecc_ck", &hecc_ck),
+       CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
+       CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
+       { NULL },
+};
+
+static struct ti_clk *omap36xx_clk_patches[] = {
+       &dpll4_m3x2_ck_omap36xx,
+       &dpll3_m3x2_ck_omap36xx,
+       &dpll4_m6x2_ck_omap36xx,
+       &dpll4_m2x2_ck_omap36xx,
+       &dpll4_m5x2_ck_omap36xx,
+       &dpll4_ck_omap36xx,
+       NULL,
+};
+
+static const char *enable_init_clks[] = {
+       "sdrc_ick",
+       "gpmc_fck",
+       "omapctrl_ick",
+};
+
+static void __init omap3_clk_legacy_common_init(void)
+{
+       omap2_clk_disable_autoidle_all();
+
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+               (clk_get_rate(osc_sys_ck.clk) / 1000000),
+               (clk_get_rate(osc_sys_ck.clk) / 100000) % 10,
+               (clk_get_rate(core_ck.clk) / 1000000),
+               (clk_get_rate(arm_fck.clk) / 1000000));
+}
+
+int __init omap3430es1_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(omap3430es1_clks);
+       r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+
+       return r;
+}
+
+int __init omap3430_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
+
+int __init omap36xx_clk_legacy_init(void)
+{
+       int r;
+
+       ti_clk_patch_legacy_clks(omap36xx_clk_patches);
+       r = ti_clk_register_legacy_clks(omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
+
+int __init am35xx_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(am35xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
index 0d1750a8aea40db16a470c697418d1dbd8b6bb45..383a06e49b09db95e465260369fe307afebb92e2 100644 (file)
@@ -327,7 +327,6 @@ enum {
        OMAP3_SOC_OMAP3430_ES1,
        OMAP3_SOC_OMAP3430_ES2_PLUS,
        OMAP3_SOC_OMAP3630,
-       OMAP3_SOC_TI81XX,
 };
 
 static int __init omap3xxx_dt_clk_init(int soc_type)
@@ -370,7 +369,7 @@ static int __init omap3xxx_dt_clk_init(int soc_type)
                (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
                (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
 
-       if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+       if (soc_type != OMAP3_SOC_OMAP3430_ES1)
                omap3_clk_lock_dpll5();
 
        return 0;
@@ -390,8 +389,3 @@ int __init am35xx_dt_clk_init(void)
 {
        return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
 }
-
-int __init ti81xx_dt_clk_init(void)
-{
-       return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
-}
index 02517a8206bda8eda55ef32aac17f8163dc2d064..4f4c87751db521d0ce05dcd82e2a62e81408f9d9 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
index 5e183993e3ec56b926fffd252325000d02566d03..14160b2235480f1b2855a5c1f3d16865321ce18a 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/clk/ti.h>
index 62ac8f6e480c61abf1760aeec2ebc980fd716cc3..ee32f4deebf40280be8e4d4e6e454e815c53cd67 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644 (file)
index 0000000..9451e65
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk dm816x_clks[] = {
+       DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
+       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+       DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+       DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+       DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+       DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+       DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+       DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+       DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+       DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
+       DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"),
+       DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
+       DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
+       DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
+       DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"),
+       DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"),
+       { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+       "ddr_pll_clk1",
+       "ddr_pll_clk2",
+       "ddr_pll_clk3",
+};
+
+int __init ti81xx_dt_clk_init(void)
+{
+       ti_dt_clocks_register(dm816x_clks);
+       omap2_clk_disable_autoidle_all();
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       return 0;
+}
index 337abe5909e1f272cdcdca85e5ae0fb3fe1f4729..e22b95646e09a8357e5adb3786f726c8e6baab10 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -183,3 +185,128 @@ void ti_dt_clk_init_retry_clks(void)
                retries--;
        }
 }
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
+{
+       while (*patch) {
+               memcpy((*patch)->patch, *patch, sizeof(**patch));
+               patch++;
+       }
+}
+
+struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
+{
+       struct clk *clk;
+       struct ti_clk_fixed *fixed;
+       struct ti_clk_fixed_factor *fixed_factor;
+       struct clk_hw *clk_hw;
+
+       if (setup->clk)
+               return setup->clk;
+
+       switch (setup->type) {
+       case TI_CLK_FIXED:
+               fixed = setup->data;
+
+               clk = clk_register_fixed_rate(NULL, setup->name, NULL,
+                                             CLK_IS_ROOT, fixed->frequency);
+               break;
+       case TI_CLK_MUX:
+               clk = ti_clk_register_mux(setup);
+               break;
+       case TI_CLK_DIVIDER:
+               clk = ti_clk_register_divider(setup);
+               break;
+       case TI_CLK_COMPOSITE:
+               clk = ti_clk_register_composite(setup);
+               break;
+       case TI_CLK_FIXED_FACTOR:
+               fixed_factor = setup->data;
+
+               clk = clk_register_fixed_factor(NULL, setup->name,
+                                               fixed_factor->parent,
+                                               0, fixed_factor->mult,
+                                               fixed_factor->div);
+               break;
+       case TI_CLK_GATE:
+               clk = ti_clk_register_gate(setup);
+               break;
+       case TI_CLK_DPLL:
+               clk = ti_clk_register_dpll(setup);
+               break;
+       default:
+               pr_err("bad type for %s!\n", setup->name);
+               clk = ERR_PTR(-EINVAL);
+       }
+
+       if (!IS_ERR(clk)) {
+               setup->clk = clk;
+               if (setup->clkdm_name) {
+                       if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+                               pr_warn("can't setup clkdm for basic clk %s\n",
+                                       setup->name);
+                       } else {
+                               clk_hw = __clk_get_hw(clk);
+                               to_clk_hw_omap(clk_hw)->clkdm_name =
+                                       setup->clkdm_name;
+                               omap2_init_clk_clkdm(clk_hw);
+                       }
+               }
+       }
+
+       return clk;
+}
+
+int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
+{
+       struct clk *clk;
+       bool retry;
+       struct ti_clk_alias *retry_clk;
+       struct ti_clk_alias *tmp;
+
+       while (clks->clk) {
+               clk = ti_clk_register_clk(clks->clk);
+               if (IS_ERR(clk)) {
+                       if (PTR_ERR(clk) == -EAGAIN) {
+                               list_add(&clks->link, &retry_list);
+                       } else {
+                               pr_err("register for %s failed: %ld\n",
+                                      clks->clk->name, PTR_ERR(clk));
+                               return PTR_ERR(clk);
+                       }
+               } else {
+                       clks->lk.clk = clk;
+                       clkdev_add(&clks->lk);
+               }
+               clks++;
+       }
+
+       retry = true;
+
+       while (!list_empty(&retry_list) && retry) {
+               retry = false;
+               list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
+                       pr_debug("retry-init: %s\n", retry_clk->clk->name);
+                       clk = ti_clk_register_clk(retry_clk->clk);
+                       if (IS_ERR(clk)) {
+                               if (PTR_ERR(clk) == -EAGAIN) {
+                                       continue;
+                               } else {
+                                       pr_err("register for %s failed: %ld\n",
+                                              retry_clk->clk->name,
+                                              PTR_ERR(clk));
+                                       return PTR_ERR(clk);
+                               }
+                       } else {
+                               retry = true;
+                               retry_clk->lk.clk = clk;
+                               clkdev_add(&retry_clk->lk);
+                               list_del(&retry_clk->link);
+                       }
+               }
+       }
+
+       return 0;
+}
+#endif
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644 (file)
index 0000000..404158d
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * TI Clock driver internal definitions
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_CLK_TI_CLOCK__
+#define __DRIVERS_CLK_TI_CLOCK__
+
+enum {
+       TI_CLK_FIXED,
+       TI_CLK_MUX,
+       TI_CLK_DIVIDER,
+       TI_CLK_COMPOSITE,
+       TI_CLK_FIXED_FACTOR,
+       TI_CLK_GATE,
+       TI_CLK_DPLL,
+};
+
+/* Global flags */
+#define CLKF_INDEX_POWER_OF_TWO                (1 << 0)
+#define CLKF_INDEX_STARTS_AT_ONE       (1 << 1)
+#define CLKF_SET_RATE_PARENT           (1 << 2)
+#define CLKF_OMAP3                     (1 << 3)
+#define CLKF_AM35XX                    (1 << 4)
+
+/* Gate flags */
+#define CLKF_SET_BIT_TO_DISABLE                (1 << 5)
+#define CLKF_INTERFACE                 (1 << 6)
+#define CLKF_SSI                       (1 << 7)
+#define CLKF_DSS                       (1 << 8)
+#define CLKF_HSOTGUSB                  (1 << 9)
+#define CLKF_WAIT                      (1 << 10)
+#define CLKF_NO_WAIT                   (1 << 11)
+#define CLKF_HSDIV                     (1 << 12)
+#define CLKF_CLKDM                     (1 << 13)
+
+/* DPLL flags */
+#define CLKF_LOW_POWER_STOP            (1 << 5)
+#define CLKF_LOCK                      (1 << 6)
+#define CLKF_LOW_POWER_BYPASS          (1 << 7)
+#define CLKF_PER                       (1 << 8)
+#define CLKF_CORE                      (1 << 9)
+#define CLKF_J_TYPE                    (1 << 10)
+
+#define CLK(dev, con, ck)              \
+       {                               \
+               .lk = {                 \
+                       .dev_id = dev,  \
+                       .con_id = con,  \
+               },                      \
+               .clk = ck,              \
+       }
+
+struct ti_clk {
+       const char *name;
+       const char *clkdm_name;
+       int type;
+       void *data;
+       struct ti_clk *patch;
+       struct clk *clk;
+};
+
+struct ti_clk_alias {
+       struct ti_clk *clk;
+       struct clk_lookup lk;
+       struct list_head link;
+};
+
+struct ti_clk_fixed {
+       u32 frequency;
+       u16 flags;
+};
+
+struct ti_clk_mux {
+       u8 bit_shift;
+       int num_parents;
+       u16 reg;
+       u8 module;
+       const char **parents;
+       u16 flags;
+};
+
+struct ti_clk_divider {
+       const char *parent;
+       u8 bit_shift;
+       u16 max_div;
+       u16 reg;
+       u8 module;
+       int *dividers;
+       int num_dividers;
+       u16 flags;
+};
+
+struct ti_clk_fixed_factor {
+       const char *parent;
+       u16 div;
+       u16 mult;
+       u16 flags;
+};
+
+struct ti_clk_gate {
+       const char *parent;
+       u8 bit_shift;
+       u16 reg;
+       u8 module;
+       u16 flags;
+};
+
+struct ti_clk_composite {
+       struct ti_clk_divider *divider;
+       struct ti_clk_mux *mux;
+       struct ti_clk_gate *gate;
+       u16 flags;
+};
+
+struct ti_clk_clkdm_gate {
+       const char *parent;
+       u16 flags;
+};
+
+struct ti_clk_dpll {
+       int num_parents;
+       u16 control_reg;
+       u16 idlest_reg;
+       u16 autoidle_reg;
+       u16 mult_div1_reg;
+       u8 module;
+       const char **parents;
+       u16 flags;
+       u8 modes;
+       u32 mult_mask;
+       u32 div1_mask;
+       u32 enable_mask;
+       u32 autoidle_mask;
+       u32 freqsel_mask;
+       u32 idlest_mask;
+       u32 dco_mask;
+       u32 sddiv_mask;
+       u16 max_multiplier;
+       u16 max_divider;
+       u8 min_divider;
+       u8 auto_recal_bit;
+       u8 recal_en_bit;
+       u8 recal_st_bit;
+};
+
+struct clk *ti_clk_register_gate(struct ti_clk *setup);
+struct clk *ti_clk_register_interface(struct ti_clk *setup);
+struct clk *ti_clk_register_mux(struct ti_clk *setup);
+struct clk *ti_clk_register_divider(struct ti_clk *setup);
+struct clk *ti_clk_register_composite(struct ti_clk *setup);
+struct clk *ti_clk_register_dpll(struct ti_clk *setup);
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
+
+void ti_clk_patch_legacy_clks(struct ti_clk **patch);
+struct clk *ti_clk_register_clk(struct ti_clk *setup);
+int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
+
+#endif
index 19d8980ba458ef4e37f69cd38ba9fe6093d3ecfb..3654f61912ebb7d099771f5f5e7fba9a5bbc4e92 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/clk/ti.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -116,8 +118,46 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
 
 #define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
 
-static void __init ti_clk_register_composite(struct clk_hw *hw,
-                                            struct device_node *node)
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_composite(struct ti_clk *setup)
+{
+       struct ti_clk_composite *comp;
+       struct clk_hw *gate;
+       struct clk_hw *mux;
+       struct clk_hw *div;
+       int num_parents = 1;
+       const char **parent_names = NULL;
+       struct clk *clk;
+
+       comp = setup->data;
+
+       div = ti_clk_build_component_div(comp->divider);
+       gate = ti_clk_build_component_gate(comp->gate);
+       mux = ti_clk_build_component_mux(comp->mux);
+
+       if (div)
+               parent_names = &comp->divider->parent;
+
+       if (gate)
+               parent_names = &comp->gate->parent;
+
+       if (mux) {
+               num_parents = comp->mux->num_parents;
+               parent_names = comp->mux->parents;
+       }
+
+       clk = clk_register_composite(NULL, setup->name,
+                                    parent_names, num_parents, mux,
+                                    &ti_clk_mux_ops, div,
+                                    &ti_composite_divider_ops, gate,
+                                    &ti_composite_gate_ops, 0);
+
+       return clk;
+}
+#endif
+
+static void __init _register_composite(struct clk_hw *hw,
+                                      struct device_node *node)
 {
        struct clk *clk;
        struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
@@ -136,7 +176,7 @@ static void __init ti_clk_register_composite(struct clk_hw *hw,
                        pr_debug("component %s not ready for %s, retry\n",
                                 cclk->comp_nodes[i]->name, node->name);
                        if (!ti_clk_retry_init(node, hw,
-                                              ti_clk_register_composite))
+                                              _register_composite))
                                return;
 
                        goto cleanup;
@@ -216,7 +256,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node)
        for (i = 0; i < num_clks; i++)
                cclk->comp_nodes[i] = _get_component_node(node, i);
 
-       ti_clk_register_composite(&cclk->hw, node);
+       _register_composite(&cclk->hw, node);
 }
 CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
               of_ti_composite_clk_setup);
index bff2b5b8ff598b2e150496eb3ebc984c651a0ebd..6211893c0980665749aa2c08577f9f3c17da4a78 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -300,6 +301,134 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        return clk;
 }
 
+static struct clk_div_table *
+_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width)
+{
+       int valid_div = 0;
+       struct clk_div_table *table;
+       int i;
+       int div;
+       u32 val;
+       u8 flags;
+
+       if (!setup->num_dividers) {
+               /* Clk divider table not provided, determine min/max divs */
+               flags = setup->flags;
+
+               if (flags & CLKF_INDEX_STARTS_AT_ONE)
+                       val = 1;
+               else
+                       val = 0;
+
+               div = 1;
+
+               while (div < setup->max_div) {
+                       if (flags & CLKF_INDEX_POWER_OF_TWO)
+                               div <<= 1;
+                       else
+                               div++;
+                       val++;
+               }
+
+               *width = fls(val);
+
+               return NULL;
+       }
+
+       for (i = 0; i < setup->num_dividers; i++)
+               if (setup->dividers[i])
+                       valid_div++;
+
+       table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       valid_div = 0;
+       *width = 0;
+
+       for (i = 0; i < setup->num_dividers; i++)
+               if (setup->dividers[i]) {
+                       table[valid_div].div = setup->dividers[i];
+                       table[valid_div].val = i;
+                       valid_div++;
+                       *width = i;
+               }
+
+       *width = fls(*width);
+
+       return table;
+}
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
+{
+       struct clk_divider *div;
+       struct clk_omap_reg *reg;
+
+       if (!setup)
+               return NULL;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&div->reg;
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+               div->flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (setup->flags & CLKF_INDEX_POWER_OF_TWO)
+               div->flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       div->table = _get_div_table_from_setup(setup, &div->width);
+
+       div->shift = setup->bit_shift;
+
+       return &div->hw;
+}
+
+struct clk *ti_clk_register_divider(struct ti_clk *setup)
+{
+       struct ti_clk_divider *div;
+       struct clk_omap_reg *reg_setup;
+       u32 reg;
+       u8 width;
+       u32 flags = 0;
+       u8 div_flags = 0;
+       struct clk_div_table *table;
+       struct clk *clk;
+
+       div = setup->data;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       reg_setup->index = div->module;
+       reg_setup->offset = div->reg;
+
+       if (div->flags & CLKF_INDEX_STARTS_AT_ONE)
+               div_flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (div->flags & CLKF_INDEX_POWER_OF_TWO)
+               div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       if (div->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       table = _get_div_table_from_setup(div, &width);
+       if (IS_ERR(table))
+               return (struct clk *)table;
+
+       clk = _register_divider(NULL, setup->name, div->parent,
+                               flags, (void __iomem *)reg, div->bit_shift,
+                               width, div_flags, table, NULL);
+
+       if (IS_ERR(clk))
+               kfree(table);
+
+       return clk;
+}
+
 static struct clk_div_table *
 __init ti_clk_get_div_table(struct device_node *node)
 {
@@ -455,7 +584,8 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
                goto cleanup;
 
        clk = _register_divider(NULL, node->name, parent_name, flags, reg,
-                               shift, width, clk_divider_flags, table, NULL);
+                               shift, width, clk_divider_flags, table,
+                               NULL);
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
index 85ac0dd501dea5fff98801ba47edabbf972492a5..81dc4698dc41740e77e82411b00c67ae9aa148ba 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -130,7 +131,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
 };
 
 /**
- * ti_clk_register_dpll - low level registration of a DPLL clock
+ * _register_dpll - low level registration of a DPLL clock
  * @hw: hardware clock definition for the clock
  * @node: device node for the clock
  *
@@ -138,8 +139,8 @@ static const struct clk_ops dpll_x2_ck_ops = {
  * clk-bypass is missing), the clock is added to retry list and
  * the initialization is retried on later stage.
  */
-static void __init ti_clk_register_dpll(struct clk_hw *hw,
-                                       struct device_node *node)
+static void __init _register_dpll(struct clk_hw *hw,
+                                 struct device_node *node)
 {
        struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
        struct dpll_data *dd = clk_hw->dpll_data;
@@ -151,7 +152,7 @@ static void __init ti_clk_register_dpll(struct clk_hw *hw,
        if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
                pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
                         node->name);
-               if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+               if (!ti_clk_retry_init(node, hw, _register_dpll))
                        return;
 
                goto cleanup;
@@ -175,20 +176,118 @@ cleanup:
        kfree(clk_hw);
 }
 
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __iomem *_get_reg(u8 module, u16 offset)
+{
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       reg_setup->index = module;
+       reg_setup->offset = offset;
+
+       return (void __iomem *)reg;
+}
+
+struct clk *ti_clk_register_dpll(struct ti_clk *setup)
+{
+       struct clk_hw_omap *clk_hw;
+       struct clk_init_data init = { NULL };
+       struct dpll_data *dd;
+       struct clk *clk;
+       struct ti_clk_dpll *dpll;
+       const struct clk_ops *ops = &omap3_dpll_ck_ops;
+       struct clk *clk_ref;
+       struct clk *clk_bypass;
+
+       dpll = setup->data;
+
+       if (dpll->num_parents < 2)
+               return ERR_PTR(-EINVAL);
+
+       clk_ref = clk_get_sys(NULL, dpll->parents[0]);
+       clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
+
+       if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
+               return ERR_PTR(-EAGAIN);
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       if (!dd || !clk_hw) {
+               clk = ERR_PTR(-ENOMEM);
+               goto cleanup;
+       }
+
+       clk_hw->dpll_data = dd;
+       clk_hw->ops = &clkhwops_omap3_dpll;
+       clk_hw->hw.init = &init;
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       init.name = setup->name;
+       init.ops = ops;
+
+       init.num_parents = dpll->num_parents;
+       init.parent_names = dpll->parents;
+
+       dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
+       dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
+       dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
+       dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
+
+       dd->modes = dpll->modes;
+       dd->div1_mask = dpll->div1_mask;
+       dd->idlest_mask = dpll->idlest_mask;
+       dd->mult_mask = dpll->mult_mask;
+       dd->autoidle_mask = dpll->autoidle_mask;
+       dd->enable_mask = dpll->enable_mask;
+       dd->sddiv_mask = dpll->sddiv_mask;
+       dd->dco_mask = dpll->dco_mask;
+       dd->max_divider = dpll->max_divider;
+       dd->min_divider = dpll->min_divider;
+       dd->max_multiplier = dpll->max_multiplier;
+       dd->auto_recal_bit = dpll->auto_recal_bit;
+       dd->recal_en_bit = dpll->recal_en_bit;
+       dd->recal_st_bit = dpll->recal_st_bit;
+
+       dd->clk_ref = clk_ref;
+       dd->clk_bypass = clk_bypass;
+
+       if (dpll->flags & CLKF_CORE)
+               ops = &omap3_dpll_core_ck_ops;
+
+       if (dpll->flags & CLKF_PER)
+               ops = &omap3_dpll_per_ck_ops;
+
+       if (dpll->flags & CLKF_J_TYPE)
+               dd->flags |= DPLL_J_TYPE;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (!IS_ERR(clk))
+               return clk;
+
+cleanup:
+       kfree(dd);
+       kfree(clk_hw);
+       return clk;
+}
+#endif
+
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
        defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
        defined(CONFIG_SOC_AM43XX)
 /**
- * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * _register_dpll_x2 - Registers a DPLLx2 clock
  * @node: device node for this clock
  * @ops: clk_ops for this clock
  * @hw_ops: clk_hw_ops for this clock
  *
  * Initializes a DPLL x 2 clock from device tree data.
  */
-static void ti_clk_register_dpll_x2(struct device_node *node,
-                                   const struct clk_ops *ops,
-                                   const struct clk_hw_omap_ops *hw_ops)
+static void _register_dpll_x2(struct device_node *node,
+                             const struct clk_ops *ops,
+                             const struct clk_hw_omap_ops *hw_ops)
 {
        struct clk *clk;
        struct clk_init_data init = { NULL };
@@ -318,7 +417,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
        if (dpll_mode)
                dd->modes = dpll_mode;
 
-       ti_clk_register_dpll(&clk_hw->hw, node);
+       _register_dpll(&clk_hw->hw, node);
        return;
 
 cleanup:
@@ -332,7 +431,7 @@ cleanup:
        defined(CONFIG_SOC_DRA7XX)
 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
 {
-       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+       _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
 }
 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
               of_ti_omap4_dpll_x2_setup);
@@ -341,7 +440,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
 {
-       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+       _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
 }
 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
               of_ti_am3_dpll_x2_setup);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644 (file)
index 0000000..d216406
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <asm/div64.h>
+
+/* FAPLL Control Register PLL_CTRL */
+#define FAPLL_MAIN_LOCK                BIT(7)
+#define FAPLL_MAIN_PLLEN       BIT(3)
+#define FAPLL_MAIN_BP          BIT(2)
+#define FAPLL_MAIN_LOC_CTL     BIT(0)
+
+/* FAPLL powerdown register PWD */
+#define FAPLL_PWD_OFFSET       4
+
+#define MAX_FAPLL_OUTPUTS      7
+#define FAPLL_MAX_RETRIES      1000
+
+#define to_fapll(_hw)          container_of(_hw, struct fapll_data, hw)
+#define to_synth(_hw)          container_of(_hw, struct fapll_synth, hw)
+
+/* The bypass bit is inverted on the ddr_pll.. */
+#define fapll_is_ddr_pll(va)   (((u32)(va) & 0xffff) == 0x0440)
+
+/*
+ * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
+ * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
+ */
+#define is_ddr_pll_clk1(va)    (((u32)(va) & 0xffff) == 0x044c)
+#define is_audio_pll_clk1(va)  (((u32)(va) & 0xffff) == 0x04a8)
+
+/* Synthesizer divider register */
+#define SYNTH_LDMDIV1          BIT(8)
+
+/* Synthesizer frequency register */
+#define SYNTH_LDFREQ           BIT(31)
+
+struct fapll_data {
+       struct clk_hw hw;
+       void __iomem *base;
+       const char *name;
+       struct clk *clk_ref;
+       struct clk *clk_bypass;
+       struct clk_onecell_data outputs;
+       bool bypass_bit_inverted;
+};
+
+struct fapll_synth {
+       struct clk_hw hw;
+       struct fapll_data *fd;
+       int index;
+       void __iomem *freq;
+       void __iomem *div;
+       const char *name;
+       struct clk *clk_pll;
+};
+
+static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
+{
+       u32 v = readl_relaxed(fd->base);
+
+       if (fd->bypass_bit_inverted)
+               return !(v & FAPLL_MAIN_BP);
+       else
+               return !!(v & FAPLL_MAIN_BP);
+}
+
+static int ti_fapll_enable(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       v |= FAPLL_MAIN_PLLEN;
+       writel_relaxed(v, fd->base);
+
+       return 0;
+}
+
+static void ti_fapll_disable(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       v &= ~FAPLL_MAIN_PLLEN;
+       writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_is_enabled(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       return v & FAPLL_MAIN_PLLEN;
+}
+
+static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 fapll_n, fapll_p, v;
+       long long rate;
+
+       if (ti_fapll_clock_is_bypass(fd))
+               return parent_rate;
+
+       rate = parent_rate;
+
+       /* PLL pre-divider is P and multiplier is N */
+       v = readl_relaxed(fd->base);
+       fapll_p = (v >> 8) & 0xff;
+       if (fapll_p)
+               do_div(rate, fapll_p);
+       fapll_n = v >> 16;
+       if (fapll_n)
+               rate *= fapll_n;
+
+       return rate;
+}
+
+static u8 ti_fapll_get_parent(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+
+       if (ti_fapll_clock_is_bypass(fd))
+               return 1;
+
+       return 0;
+}
+
+static struct clk_ops ti_fapll_ops = {
+       .enable = ti_fapll_enable,
+       .disable = ti_fapll_disable,
+       .is_enabled = ti_fapll_is_enabled,
+       .recalc_rate = ti_fapll_recalc_rate,
+       .get_parent = ti_fapll_get_parent,
+};
+
+static int ti_fapll_synth_enable(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       v &= ~(1 << synth->index);
+       writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+
+       return 0;
+}
+
+static void ti_fapll_synth_disable(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       v |= 1 << synth->index;
+       writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+}
+
+static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       return !(v & (1 << synth->index));
+}
+
+/*
+ * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
+ */
+static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 synth_div_m;
+       long long rate;
+
+       /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
+       if (!synth->div)
+               return 32768;
+
+       /*
+        * PLL in bypass sets the synths in bypass mode too. The PLL rate
+        * can be also be set to 27MHz, so we can't use parent_rate to
+        * check for bypass mode.
+        */
+       if (ti_fapll_clock_is_bypass(synth->fd))
+               return parent_rate;
+
+       rate = parent_rate;
+
+       /*
+        * Synth frequency integer and fractional divider.
+        * Note that the phase output K is 8, so the result needs
+        * to be multiplied by 8.
+        */
+       if (synth->freq) {
+               u32 v, synth_int_div, synth_frac_div, synth_div_freq;
+
+               v = readl_relaxed(synth->freq);
+               synth_int_div = (v >> 24) & 0xf;
+               synth_frac_div = v & 0xffffff;
+               synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
+               rate *= 10000000;
+               do_div(rate, synth_div_freq);
+               rate *= 8;
+       }
+
+       /* Synth ost-divider M */
+       synth_div_m = readl_relaxed(synth->div) & 0xff;
+       do_div(rate, synth_div_m);
+
+       return rate;
+}
+
+static struct clk_ops ti_fapll_synt_ops = {
+       .enable = ti_fapll_synth_enable,
+       .disable = ti_fapll_synth_disable,
+       .is_enabled = ti_fapll_synth_is_enabled,
+       .recalc_rate = ti_fapll_synth_recalc_rate,
+};
+
+static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
+                                               void __iomem *freq,
+                                               void __iomem *div,
+                                               int index,
+                                               const char *name,
+                                               const char *parent,
+                                               struct clk *pll_clk)
+{
+       struct clk_init_data *init;
+       struct fapll_synth *synth;
+
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!init)
+               return ERR_PTR(-ENOMEM);
+
+       init->ops = &ti_fapll_synt_ops;
+       init->name = name;
+       init->parent_names = &parent;
+       init->num_parents = 1;
+
+       synth = kzalloc(sizeof(*synth), GFP_KERNEL);
+       if (!synth)
+               goto free;
+
+       synth->fd = fd;
+       synth->index = index;
+       synth->freq = freq;
+       synth->div = div;
+       synth->name = name;
+       synth->hw.init = init;
+       synth->clk_pll = pll_clk;
+
+       return clk_register(NULL, &synth->hw);
+
+free:
+       kfree(synth);
+       kfree(init);
+
+       return ERR_PTR(-ENOMEM);
+}
+
+static void __init ti_fapll_setup(struct device_node *node)
+{
+       struct fapll_data *fd;
+       struct clk_init_data *init = NULL;
+       const char *parent_name[2];
+       struct clk *pll_clk;
+       int i;
+
+       fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+       if (!fd)
+               return;
+
+       fd->outputs.clks = kzalloc(sizeof(struct clk *) *
+                                  MAX_FAPLL_OUTPUTS + 1,
+                                  GFP_KERNEL);
+       if (!fd->outputs.clks)
+               goto free;
+
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!init)
+               goto free;
+
+       init->ops = &ti_fapll_ops;
+       init->name = node->name;
+
+       init->num_parents = of_clk_get_parent_count(node);
+       if (init->num_parents != 2) {
+               pr_err("%s must have two parents\n", node->name);
+               goto free;
+       }
+
+       parent_name[0] = of_clk_get_parent_name(node, 0);
+       parent_name[1] = of_clk_get_parent_name(node, 1);
+       init->parent_names = parent_name;
+
+       fd->clk_ref = of_clk_get(node, 0);
+       if (IS_ERR(fd->clk_ref)) {
+               pr_err("%s could not get clk_ref\n", node->name);
+               goto free;
+       }
+
+       fd->clk_bypass = of_clk_get(node, 1);
+       if (IS_ERR(fd->clk_bypass)) {
+               pr_err("%s could not get clk_bypass\n", node->name);
+               goto free;
+       }
+
+       fd->base = of_iomap(node, 0);
+       if (!fd->base) {
+               pr_err("%s could not get IO base\n", node->name);
+               goto free;
+       }
+
+       if (fapll_is_ddr_pll(fd->base))
+               fd->bypass_bit_inverted = true;
+
+       fd->name = node->name;
+       fd->hw.init = init;
+
+       /* Register the parent PLL */
+       pll_clk = clk_register(NULL, &fd->hw);
+       if (IS_ERR(pll_clk))
+               goto unmap;
+
+       fd->outputs.clks[0] = pll_clk;
+       fd->outputs.clk_num++;
+
+       /*
+        * Set up the child synthesizers starting at index 1 as the
+        * PLL output is at index 0. We need to check the clock-indices
+        * for numbering in case there are holes in the synth mapping,
+        * and then probe the synth register to see if it has a FREQ
+        * register available.
+        */
+       for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
+               const char *output_name;
+               void __iomem *freq, *div;
+               struct clk *synth_clk;
+               int output_instance;
+               u32 v;
+
+               if (of_property_read_string_index(node, "clock-output-names",
+                                                 i, &output_name))
+                       continue;
+
+               if (of_property_read_u32_index(node, "clock-indices", i,
+                                              &output_instance))
+                       output_instance = i;
+
+               freq = fd->base + (output_instance * 8);
+               div = freq + 4;
+
+               /* Check for hardwired audio_pll_clk1 */
+               if (is_audio_pll_clk1(freq)) {
+                       freq = 0;
+                       div = 0;
+               } else {
+                       /* Does the synthesizer have a FREQ register? */
+                       v = readl_relaxed(freq);
+                       if (!v)
+                               freq = 0;
+               }
+               synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
+                                                output_name, node->name,
+                                                pll_clk);
+               if (IS_ERR(synth_clk))
+                       continue;
+
+               fd->outputs.clks[output_instance] = synth_clk;
+               fd->outputs.clk_num++;
+
+               clk_register_clkdev(synth_clk, output_name, NULL);
+       }
+
+       /* Register the child synthesizers as the FAPLL outputs */
+       of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
+       /* Add clock alias for the outputs */
+
+       kfree(init);
+
+       return;
+
+unmap:
+       iounmap(fd->base);
+free:
+       if (fd->clk_bypass)
+               clk_put(fd->clk_bypass);
+       if (fd->clk_ref)
+               clk_put(fd->clk_ref);
+       kfree(fd->outputs.clks);
+       kfree(fd);
+       kfree(init);
+}
+
+CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
index b326d2797feb23c0ed84fe52be5b9905102f0234..d493307b73f42b0bdef4c36544e5146ce00b977e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 #undef pr_fmt
@@ -90,63 +92,164 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
        return ret;
 }
 
-static void __init _of_ti_gate_clk_setup(struct device_node *node,
-                                        const struct clk_ops *ops,
-                                        const struct clk_hw_omap_ops *hw_ops)
+static struct clk *_register_gate(struct device *dev, const char *name,
+                                 const char *parent_name, unsigned long flags,
+                                 void __iomem *reg, u8 bit_idx,
+                                 u8 clk_gate_flags, const struct clk_ops *ops,
+                                 const struct clk_hw_omap_ops *hw_ops)
 {
-       struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *clk_name = node->name;
-       const char *parent_name;
-       u32 val;
+       struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw)
-               return;
+               return ERR_PTR(-ENOMEM);
 
        clk_hw->hw.init = &init;
 
-       init.name = clk_name;
+       init.name = name;
        init.ops = ops;
 
-       if (ops != &omap_gate_clkdm_clk_ops) {
-               clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-               if (!clk_hw->enable_reg)
-                       goto cleanup;
+       clk_hw->enable_reg = reg;
+       clk_hw->enable_bit = bit_idx;
+       clk_hw->ops = hw_ops;
 
-               if (!of_property_read_u32(node, "ti,bit-shift", &val))
-                       clk_hw->enable_bit = val;
+       clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags;
+
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       init.flags = flags;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (IS_ERR(clk))
+               kfree(clk_hw);
+
+       return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_gate(struct ti_clk *setup)
+{
+       const struct clk_ops *ops = &omap_gate_clk_ops;
+       const struct clk_hw_omap_ops *hw_ops = NULL;
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+       u32 flags = 0;
+       u8 clk_gate_flags = 0;
+       struct ti_clk_gate *gate;
+
+       gate = setup->data;
+
+       if (gate->flags & CLKF_INTERFACE)
+               return ti_clk_register_interface(setup);
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       if (gate->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
+               clk_gate_flags |= INVERT_ENABLE;
+
+       if (gate->flags & CLKF_HSDIV) {
+               ops = &omap_gate_clk_hsdiv_restore_ops;
+               hw_ops = &clkhwops_wait;
        }
 
-       clk_hw->ops = hw_ops;
+       if (gate->flags & CLKF_DSS)
+               hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
+
+       if (gate->flags & CLKF_WAIT)
+               hw_ops = &clkhwops_wait;
+
+       if (gate->flags & CLKF_CLKDM)
+               ops = &omap_gate_clkdm_clk_ops;
+
+       if (gate->flags & CLKF_AM35XX)
+               hw_ops = &clkhwops_am35xx_ipss_module_wait;
 
-       clk_hw->flags = MEMMAP_ADDRESSING;
+       reg_setup->index = gate->module;
+       reg_setup->offset = gate->reg;
+
+       return _register_gate(NULL, setup->name, gate->parent, flags,
+                             (void __iomem *)reg, gate->bit_shift,
+                             clk_gate_flags, ops, hw_ops);
+}
+
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
+{
+       struct clk_hw_omap *gate;
+       struct clk_omap_reg *reg;
+       const struct clk_hw_omap_ops *ops = &clkhwops_wait;
+
+       if (!setup)
+               return NULL;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&gate->enable_reg;
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       gate->enable_bit = setup->bit_shift;
+
+       if (setup->flags & CLKF_NO_WAIT)
+               ops = NULL;
+
+       if (setup->flags & CLKF_INTERFACE)
+               ops = &clkhwops_iclk_wait;
+
+       gate->ops = ops;
+       gate->flags = MEMMAP_ADDRESSING;
+
+       return &gate->hw;
+}
+#endif
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+                                        const struct clk_ops *ops,
+                                        const struct clk_hw_omap_ops *hw_ops)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg = NULL;
+       u8 enable_bit = 0;
+       u32 val;
+       u32 flags = 0;
+       u8 clk_gate_flags = 0;
+
+       if (ops != &omap_gate_clkdm_clk_ops) {
+               reg = ti_clk_get_reg_addr(node, 0);
+               if (!reg)
+                       return;
+
+               if (!of_property_read_u32(node, "ti,bit-shift", &val))
+                       enable_bit = val;
+       }
 
        if (of_clk_get_parent_count(node) != 1) {
-               pr_err("%s must have 1 parent\n", clk_name);
-               goto cleanup;
+               pr_err("%s must have 1 parent\n", node->name);
+               return;
        }
 
        parent_name = of_clk_get_parent_name(node, 0);
-       init.parent_names = &parent_name;
-       init.num_parents = 1;
 
        if (of_property_read_bool(node, "ti,set-rate-parent"))
-               init.flags |= CLK_SET_RATE_PARENT;
+               flags |= CLK_SET_RATE_PARENT;
 
        if (of_property_read_bool(node, "ti,set-bit-to-disable"))
-               clk_hw->flags |= INVERT_ENABLE;
+               clk_gate_flags |= INVERT_ENABLE;
 
-       clk = clk_register(NULL, &clk_hw->hw);
+       clk = _register_gate(NULL, node->name, parent_name, flags, reg,
+                            enable_bit, clk_gate_flags, ops, hw_ops);
 
-       if (!IS_ERR(clk)) {
+       if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               return;
-       }
-
-cleanup:
-       kfree(clk_hw);
 }
 
 static void __init
index 9c3e8c4aaa40c0b8a46048bab734286941a77e6b..265d91f071c5e34554cc71c474278997917a7a6f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -31,53 +32,102 @@ static const struct clk_ops ti_interface_clk_ops = {
        .is_enabled     = &omap2_dflt_clk_is_enabled,
 };
 
-static void __init _of_ti_interface_clk_setup(struct device_node *node,
-                                             const struct clk_hw_omap_ops *ops)
+static struct clk *_register_interface(struct device *dev, const char *name,
+                                      const char *parent_name,
+                                      void __iomem *reg, u8 bit_idx,
+                                      const struct clk_hw_omap_ops *ops)
 {
-       struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *parent_name;
-       u32 val;
+       struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw)
-               return;
+               return ERR_PTR(-ENOMEM);
 
        clk_hw->hw.init = &init;
        clk_hw->ops = ops;
        clk_hw->flags = MEMMAP_ADDRESSING;
+       clk_hw->enable_reg = reg;
+       clk_hw->enable_bit = bit_idx;
 
-       clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-       if (!clk_hw->enable_reg)
-               goto cleanup;
-
-       if (!of_property_read_u32(node, "ti,bit-shift", &val))
-               clk_hw->enable_bit = val;
-
-       init.name = node->name;
+       init.name = name;
        init.ops = &ti_interface_clk_ops;
        init.flags = 0;
 
-       parent_name = of_clk_get_parent_name(node, 0);
-       if (!parent_name) {
-               pr_err("%s must have a parent\n", node->name);
-               goto cleanup;
-       }
-
        init.num_parents = 1;
        init.parent_names = &parent_name;
 
        clk = clk_register(NULL, &clk_hw->hw);
 
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (IS_ERR(clk))
+               kfree(clk_hw);
+       else
                omap2_init_clk_hw_omap_clocks(clk);
+
+       return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_interface(struct ti_clk *setup)
+{
+       const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+       struct ti_clk_gate *gate;
+
+       gate = setup->data;
+       reg_setup = (struct clk_omap_reg *)&reg;
+       reg_setup->index = gate->module;
+       reg_setup->offset = gate->reg;
+
+       if (gate->flags & CLKF_NO_WAIT)
+               ops = &clkhwops_iclk;
+
+       if (gate->flags & CLKF_HSOTGUSB)
+               ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
+
+       if (gate->flags & CLKF_DSS)
+               ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+
+       if (gate->flags & CLKF_SSI)
+               ops = &clkhwops_omap3430es2_iclk_ssi_wait;
+
+       if (gate->flags & CLKF_AM35XX)
+               ops = &clkhwops_am35xx_ipss_wait;
+
+       return _register_interface(NULL, setup->name, gate->parent,
+                                  (void __iomem *)reg, gate->bit_shift, ops);
+}
+#endif
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+                                             const struct clk_hw_omap_ops *ops)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg;
+       u8 enable_bit = 0;
+       u32 val;
+
+       reg = ti_clk_get_reg_addr(node, 0);
+       if (!reg)
+               return;
+
+       if (!of_property_read_u32(node, "ti,bit-shift", &val))
+               enable_bit = val;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       if (!parent_name) {
+               pr_err("%s must have a parent\n", node->name);
                return;
        }
 
-cleanup:
-       kfree(clk_hw);
+       clk = _register_interface(NULL, node->name, parent_name, reg,
+                                 enable_bit, ops);
+
+       if (!IS_ERR(clk))
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
 }
 
 static void __init of_ti_interface_clk_setup(struct device_node *node)
index e9d650e51287d50fd53704636241445667bdcf7c..728e253606bce51a9435e6915ee1a445dcfff606 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -144,6 +145,39 @@ static struct clk *_register_mux(struct device *dev, const char *name,
        return clk;
 }
 
+struct clk *ti_clk_register_mux(struct ti_clk *setup)
+{
+       struct ti_clk_mux *mux;
+       u32 flags;
+       u8 mux_flags = 0;
+       struct clk_omap_reg *reg_setup;
+       u32 reg;
+       u32 mask;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       mux = setup->data;
+       flags = CLK_SET_RATE_NO_REPARENT;
+
+       mask = mux->num_parents;
+       if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE))
+               mask--;
+
+       mask = (1 << fls(mask)) - 1;
+       reg_setup->index = mux->module;
+       reg_setup->offset = mux->reg;
+
+       if (mux->flags & CLKF_INDEX_STARTS_AT_ONE)
+               mux_flags |= CLK_MUX_INDEX_ONE;
+
+       if (mux->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
+                            flags, (void __iomem *)reg, mux->bit_shift, mask,
+                            mux_flags, NULL, NULL);
+}
+
 /**
  * of_mux_clk_setup - Setup function for simple mux rate clock
  * @node: DT node for the clock
@@ -194,8 +228,9 @@ static void of_mux_clk_setup(struct device_node *node)
 
        mask = (1 << fls(mask)) - 1;
 
-       clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
-                           reg, shift, mask, clk_mux_flags, NULL, NULL);
+       clk = _register_mux(NULL, node->name, parent_names, num_parents,
+                           flags, reg, shift, mask, clk_mux_flags, NULL,
+                           NULL);
 
        if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -205,6 +240,37 @@ cleanup:
 }
 CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
 
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
+{
+       struct clk_mux *mux;
+       struct clk_omap_reg *reg;
+       int num_parents;
+
+       if (!setup)
+               return NULL;
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&mux->reg;
+
+       mux->shift = setup->bit_shift;
+
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+               mux->flags |= CLK_MUX_INDEX_ONE;
+
+       num_parents = setup->num_parents;
+
+       mux->mask = num_parents - 1;
+       mux->mask = (1 << fls(mux->mask)) - 1;
+
+       return &mux->hw;
+}
+
 static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
 {
        struct clk_mux *mux;
index bd4769a8448582284b2734b0a77cfa6484306e17..0e950769ed033185cf2ad95587958de586ad1246 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/err.h>
index e2d63bc47436d1ee51014a24dc21ebcc1131ca51..bf63c96acb1a2947ce7e274f2869b51ab8e95550 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/slab.h>
 #include <linux/io.h>
index 9037bebd69f79cd9121d101913bb34d0cffa4420..f870aad57711f6a69d248dc56f0309251ed0da74 100644 (file)
@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
        clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
                        "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
                        26, 0, &armclk_lock);
+       clk_prepare_enable(clks[cpu_2x]);
 
        clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
                        4 + 2 * tmp);
index 1c2506f68122567df92d6197990ee1b015d0984f..68161f7a07d6c8bef677fdd04bb2a1b20715a664 100644 (file)
@@ -63,6 +63,11 @@ config VT8500_TIMER
 config CADENCE_TTC_TIMER
        bool
 
+config ASM9260_TIMER
+       bool
+       select CLKSRC_MMIO
+       select CLKSRC_OF
+
 config CLKSRC_NOMADIK_MTU
        bool
        depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -245,15 +250,4 @@ config CLKSRC_PXA
        help
          This enables OST0 support available on PXA and SA-11x0
          platforms.
-
-config ASM9260_TIMER
-       bool "Alphascale ASM9260 timer driver"
-       depends on GENERIC_CLOCKEVENTS
-       select CLKSRC_MMIO
-       select CLKSRC_OF
-       default y if MACH_ASM9260
-       help
-         This enables build of a clocksource and clockevent driver for
-         the 32-bit System Timer hardware available on a Alphascale ASM9260.
-
 endmenu
index 32a3d25795d3a2b9303db120107d017c13c7165f..68ab42356d0e7a7cd4d97a453465633bba1c5e33 100644 (file)
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
        }
        rate = clk_get_rate(clk);
 
+       mtk_timer_global_reset(evt);
+
        if (request_irq(evt->dev.irq, mtk_timer_interrupt,
                        IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
                pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
 
        evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
 
-       mtk_timer_global_reset(evt);
-
        /* Configure clock source */
        mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
        clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
 
        /* Configure clock event */
        mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
-       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
        clockevents_config_and_register(&evt->dev, rate, 0x3,
                                        0xffffffff);
+
+       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+
        return;
 
 err_clk_disable:
index 941f3f344e08ab2ab552638f0e21ab41718a57ad..d9438af2bbd6b7d001bbdf524b7281a7a0c0816e 100644 (file)
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = {
        .dev_id         = &ckevt_pxa_osmr0,
 };
 
-static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
 {
        timer_writel(0, OIER);
        timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
index bba62f9deefbd0f71d59409daf85857cc2caa983..ec57ba2bbd87ac9f2a251e350598fa52019d7815 100644 (file)
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
        clock_event_ddata.base = base;
        clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
 
-       setup_irq(irq, &efm32_clock_event_irq);
-
        clockevents_config_and_register(&clock_event_ddata.evtdev,
                                        DIV_ROUND_CLOSEST(rate, 1024),
                                        0xf, 0xffff);
 
+       setup_irq(irq, &efm32_clock_event_irq);
+
        return 0;
 
 err_get_irq:
index 02268448dc8540a9f53113f28c29e6a292209a5a..5dcbf90b8015ce40787d2acf27a53a142d4aed8d 100644 (file)
@@ -178,10 +178,6 @@ static void __init sun5i_timer_init(struct device_node *node)
 
        ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
 
-       ret = setup_irq(irq, &sun5i_timer_irq);
-       if (ret)
-               pr_warn("failed to setup irq %d\n", irq);
-
        /* Enable timer0 interrupt */
        val = readl(timer_base + TIMER_IRQ_EN_REG);
        writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
@@ -191,6 +187,10 @@ static void __init sun5i_timer_init(struct device_node *node)
 
        clockevents_config_and_register(&sun5i_clockevent, rate,
                                        TIMER_SYNC_TICKS, 0xffffffff);
+
+       ret = setup_irq(irq, &sun5i_timer_irq);
+       if (ret)
+               pr_warn("failed to setup irq %d\n", irq);
 }
 CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
                       sun5i_timer_init);
index 72564b701b4a7018643c77de03f58a9092ff5f71..7ea24413cee6855c65daa7954bc4843622402e75 100644 (file)
@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE
 config PPC_CORENET_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
        depends on PPC_E500MC && OF && COMMON_CLK
-       select CLK_PPC_CORENET
+       select CLK_QORIQ
        help
          This adds the CPUFreq driver support for Freescale e500mc,
          e5500 and e6500 series SoCs which are capable of changing
index 5e98c6b1f284b651f0b74d3673c55f997e47f081..82d2fbb20f7eb70d94a9c9c6e1d6ec3708f53ee0 100644 (file)
@@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = {
 
 static int exynos_cpufreq_probe(struct platform_device *pdev)
 {
-       struct device_node *cpus, *np;
+       struct device_node *cpu0;
        int ret = -EINVAL;
 
        exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        if (ret)
                goto err_cpufreq_reg;
 
-       cpus = of_find_node_by_path("/cpus");
-       if (!cpus) {
-               pr_err("failed to find cpus node\n");
+       cpu0 = of_get_cpu_node(0, NULL);
+       if (!cpu0) {
+               pr_err("failed to find cpu0 node\n");
                return 0;
        }
 
-       np = of_get_next_child(cpus, NULL);
-       if (!np) {
-               pr_err("failed to find cpus child node\n");
-               of_node_put(cpus);
-               return 0;
-       }
-
-       if (of_find_property(np, "#cooling-cells", NULL)) {
-               cdev = of_cpufreq_cooling_register(np,
+       if (of_find_property(cpu0, "#cooling-cells", NULL)) {
+               cdev = of_cpufreq_cooling_register(cpu0,
                                                   cpu_present_mask);
                if (IS_ERR(cdev))
                        pr_err("running cpufreq without cooling device: %ld\n",
                               PTR_ERR(cdev));
        }
-       of_node_put(np);
-       of_node_put(cpus);
 
        return 0;
 
index bee5df7794d33d1078116c8ac2f3618075230c8c..7cb4b766cf948d3f3e4b813325eda0aa0f0129aa 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/smp.h>
 #include <sysdev/fsl_soc.h>
 
+#include <asm/smp.h>   /* for get_hard_smp_processor_id() in UP configs */
+
 /**
  * struct cpu_data - per CPU data struct
  * @parent: the parent node of cpu clock
index 2fd53eaaec20bf30ef00d6a19226fba13670501a..d6d425773fa497274301eaa88f247fb8dd770e89 100644 (file)
@@ -263,7 +263,7 @@ out:
 }
 
 #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
 {
        int count, v, i, found;
        struct cpufreq_frequency_table *pos;
@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
        .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
 };
 
-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
        struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
        struct cpufreq_frequency_table *pos;
index d00f1cee45094a6c01e004934de6e2928d4e9222..733aa5153e7451f645a65e3452ed44d6d488054a 100644 (file)
@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
        (cfg->info->set_fvco)(cfg);
 }
 
-static inline void s3c_cpufreq_resume_clocks(void)
-{
-       cpu_cur.info->resume_clocks();
-}
-
 static inline void s3c_cpufreq_updateclk(struct clk *clk,
                                         unsigned int freq)
 {
@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
 
        last_target = ~0;       /* invalidate last_target setting */
 
-       /* first, find out what speed we resumed at. */
-       s3c_cpufreq_resume_clocks();
-
        /* whilst we will be called later on, we try and re-set the
         * cpu frequencies as soon as possible so that we do not end
         * up resuming devices and then immediately having to re-set
@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
 };
 
 
-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
 {
        if (!info || !info->name) {
                printk(KERN_ERR "%s: failed to pass valid information\n",
index aedec09579340b2db42095e3acebcbc4776543c8..59372077ec7c1a1b7d64e5ac67880b5e625526b5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/notifier.h>
 #include <linux/clockchips.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
@@ -158,70 +159,83 @@ static int powernv_add_idle_states(void)
        struct device_node *power_mgt;
        int nr_idle_states = 1; /* Snooze */
        int dt_idle_states;
-       const __be32 *idle_state_flags;
-       const __be32 *idle_state_latency;
-       u32 len_flags, flags, latency_ns;
-       int i;
+       u32 *latency_ns, *residency_ns, *flags;
+       int i, rc;
 
        /* Currently we have snooze statically defined */
 
        power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
        if (!power_mgt) {
                pr_warn("opal: PowerMgmt Node not found\n");
-               return nr_idle_states;
+               goto out;
        }
 
-       idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
-       if (!idle_state_flags) {
-               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
-               return nr_idle_states;
+       /* Read values of any property to determine the num of idle states */
+       dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
+       if (dt_idle_states < 0) {
+               pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+               goto out;
        }
 
-       idle_state_latency = of_get_property(power_mgt,
-                       "ibm,cpu-idle-state-latencies-ns", NULL);
-       if (!idle_state_latency) {
-               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
-               return nr_idle_states;
+       flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+       if (of_property_read_u32_array(power_mgt,
+                       "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+               pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
+               goto out_free_flags;
        }
 
-       dt_idle_states = len_flags / sizeof(u32);
+       latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
+       rc = of_property_read_u32_array(power_mgt,
+               "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
+       if (rc) {
+               pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+               goto out_free_latency;
+       }
 
-       for (i = 0; i < dt_idle_states; i++) {
+       residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
+       rc = of_property_read_u32_array(power_mgt,
+               "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
 
-               flags = be32_to_cpu(idle_state_flags[i]);
+       for (i = 0; i < dt_idle_states; i++) {
 
-               /* Cpuidle accepts exit_latency in us and we estimate
-                * target residency to be 10x exit_latency
+               /*
+                * Cpuidle accepts exit_latency and target_residency in us.
+                * Use default target_residency values if f/w does not expose it.
                 */
-               latency_ns = be32_to_cpu(idle_state_latency[i]);
-               if (flags & OPAL_PM_NAP_ENABLED) {
+               if (flags[i] & OPAL_PM_NAP_ENABLED) {
                        /* Add NAP state */
                        strcpy(powernv_states[nr_idle_states].name, "Nap");
                        strcpy(powernv_states[nr_idle_states].desc, "Nap");
                        powernv_states[nr_idle_states].flags = 0;
-                       powernv_states[nr_idle_states].exit_latency =
-                                       ((unsigned int)latency_ns) / 1000;
-                       powernv_states[nr_idle_states].target_residency =
-                                       ((unsigned int)latency_ns / 100);
+                       powernv_states[nr_idle_states].target_residency = 100;
                        powernv_states[nr_idle_states].enter = &nap_loop;
-                       nr_idle_states++;
-               }
-
-               if (flags & OPAL_PM_SLEEP_ENABLED ||
-                       flags & OPAL_PM_SLEEP_ENABLED_ER1) {
+               } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+                       flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
                        /* Add FASTSLEEP state */
                        strcpy(powernv_states[nr_idle_states].name, "FastSleep");
                        strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
                        powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
-                       powernv_states[nr_idle_states].exit_latency =
-                                       ((unsigned int)latency_ns) / 1000;
-                       powernv_states[nr_idle_states].target_residency =
-                                       ((unsigned int)latency_ns / 100);
+                       powernv_states[nr_idle_states].target_residency = 300000;
                        powernv_states[nr_idle_states].enter = &fastsleep_loop;
-                       nr_idle_states++;
                }
+
+               powernv_states[nr_idle_states].exit_latency =
+                               ((unsigned int)latency_ns[i]) / 1000;
+
+               if (!rc) {
+                       powernv_states[nr_idle_states].target_residency =
+                               ((unsigned int)residency_ns[i]) / 1000;
+               }
+
+               nr_idle_states++;
        }
 
+       kfree(residency_ns);
+out_free_latency:
+       kfree(latency_ns);
+out_free_flags:
+       kfree(flags);
+out:
        return nr_idle_states;
 }
 
index 4d534582514e014b5fdb3fc5e0b9db7e52c7b306..080bd2dbde4ba5408504a451e9454b51497202e3 100644 (file)
@@ -44,6 +44,12 @@ void disable_cpuidle(void)
        off = 1;
 }
 
+bool cpuidle_not_available(struct cpuidle_driver *drv,
+                          struct cpuidle_device *dev)
+{
+       return off || !initialized || !drv || !dev || !dev->enabled;
+}
+
 /**
  * cpuidle_play_dead - cpu off-lining
  *
@@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
        return -ENODEV;
 }
 
-/**
- * cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
- * @drv: cpuidle driver for the given CPU.
- * @dev: cpuidle device for the given CPU.
- * @freeze: Whether or not the state should be suitable for suspend-to-idle.
- */
-static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
-                                     struct cpuidle_device *dev, bool freeze)
+static int find_deepest_state(struct cpuidle_driver *drv,
+                             struct cpuidle_device *dev, bool freeze)
 {
        unsigned int latency_req = 0;
        int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
        return ret;
 }
 
+/**
+ * cpuidle_find_deepest_state - Find the deepest available idle state.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
+ */
+int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+                              struct cpuidle_device *dev)
+{
+       return find_deepest_state(drv, dev, false);
+}
+
 static void enter_freeze_proper(struct cpuidle_driver *drv,
                                struct cpuidle_device *dev, int index)
 {
@@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
 
 /**
  * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
  *
  * If there are states with the ->enter_freeze callback, find the deepest of
- * them and enter it with frozen tick.  Otherwise, find the deepest state
- * available and enter it normally.
+ * them and enter it with frozen tick.
  */
-void cpuidle_enter_freeze(void)
+int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
-       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
-       struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int index;
 
        /*
@@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
         * that interrupts won't be enabled when it exits and allows the tick to
         * be frozen safely.
         */
-       index = cpuidle_find_deepest_state(drv, dev, true);
-       if (index >= 0) {
-               enter_freeze_proper(drv, dev, index);
-               return;
-       }
-
-       /*
-        * It is not safe to freeze the tick, find the deepest state available
-        * at all and try to enter it normally.
-        */
-       index = cpuidle_find_deepest_state(drv, dev, false);
+       index = find_deepest_state(drv, dev, true);
        if (index >= 0)
-               cpuidle_enter(drv, dev, index);
-       else
-               arch_cpu_idle();
+               enter_freeze_proper(drv, dev, index);
 
-       /* Interrupts are enabled again here. */
-       local_irq_disable();
+       return index;
 }
 
 /**
@@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
  */
 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
-       if (off || !initialized)
-               return -ENODEV;
-
-       if (!drv || !dev || !dev->enabled)
-               return -EBUSY;
-
        return cpuidle_curr_governor->select(drv, dev);
 }
 
index e5541117b3e915de7f08ecc38a58ec9f72bf2c12..50ef8bd8708ba69d93808510c9a8be473d377d7c 100644 (file)
@@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
        if (WARN_ON(timeout < 0))
                return -EINVAL;
 
+       if (timeout == 0)
+               return fence_is_signaled(fence);
+
        trace_fence_wait_start(fence);
        ret = fence->ops->wait(fence, intr, timeout);
        trace_fence_wait_end(fence);
index 3c97c8fa8d02687290e7340f82666f3466fbda2e..39920d77f288d7802c054130acf72caa8f40465d 100644 (file)
@@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
        unsigned seq, shared_count, i = 0;
        long ret = timeout;
 
+       if (!timeout)
+               return reservation_object_test_signaled_rcu(obj, wait_all);
+
 retry:
        fence = NULL;
        shared_count = 0;
@@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
        int ret = 1;
 
        if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-               int ret;
-
                fence = fence_get_rcu(lfence);
                if (!fence)
                        return -1;
index 09e2825a547a2098cc28a0e3e20079c33c52fe09..d9891d3461f6fbd42f5cb85041147513ca07ba73 100644 (file)
@@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
        struct at_xdmac_desc    *first = NULL, *prev = NULL;
        unsigned int            periods = buf_len / period_len;
        int                     i;
-       u32                     cfg;
 
        dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
                __func__, &buf_addr, buf_len, period_len,
@@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
                if (direction == DMA_DEV_TO_MEM) {
                        desc->lld.mbr_sa = atchan->per_src_addr;
                        desc->lld.mbr_da = buf_addr + i * period_len;
-                       cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
                } else {
                        desc->lld.mbr_sa = buf_addr + i * period_len;
                        desc->lld.mbr_da = atchan->per_dst_addr;
-                       cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
                }
                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
                        | AT_XDMAC_MBR_UBC_NDEN
                        | AT_XDMAC_MBR_UBC_NSEN
                        | AT_XDMAC_MBR_UBC_NDE
-                       | period_len >> at_xdmac_get_dwidth(cfg);
+                       | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
 
                dev_dbg(chan2dev(chan),
                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
index 455b7a4f1e87fe890019eb6acd459e8d7c000667..a8ad05291b274498b55deef4bba88a2dbf1ffbd3 100644 (file)
@@ -626,7 +626,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
        dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
 
        /* Check if we have any interrupt from the DMAC */
-       if (!status)
+       if (!status || !dw->in_use)
                return IRQ_NONE;
 
        /*
index 77a6dcf25b98b818b9d83ecbbf0d99edc3783056..194ec20c940841c9b0de473ac43417f4235c0482 100644 (file)
@@ -230,6 +230,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
        switch (pdev->device) {
        case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
        case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+       case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
                return true;
        default:
                return false;
index 8926f271904e45dcf1f4ed7532afe6fc3cbdb323..eb410044e1af5415f4aa1aad48f7564588be1bb1 100644 (file)
@@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 
        while (dint) {
                i = __ffs(dint);
+               /* only handle interrupts belonging to pdma driver*/
+               if (i >= pdev->dma_channels)
+                       break;
                dint &= (dint - 1);
                phy = &pdev->phy[i];
                ret = mmp_pdma_chan_handler(irq, phy);
@@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op)
        struct resource *iores;
        int i, ret, irq = 0;
        int dma_channels = 0, irq_num = 0;
+       const enum dma_slave_buswidth widths =
+               DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
+               DMA_SLAVE_BUSWIDTH_4_BYTES;
 
        pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
@@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op)
        pdev->device.device_config = mmp_pdma_config;
        pdev->device.device_terminate_all = mmp_pdma_terminate_all;
        pdev->device.copy_align = PDMA_ALIGNMENT;
+       pdev->device.src_addr_widths = widths;
+       pdev->device.dst_addr_widths = widths;
+       pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+       pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 
        if (pdev->dev->coherent_dma_mask)
                dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
index 70c2fa9963cd4d942afa0b8a99f42c4d2ede784b..b6f4e1fc9c784cc0a3fce3d76877774760cd8c59 100644 (file)
@@ -110,7 +110,7 @@ struct mmp_tdma_chan {
        struct tasklet_struct           tasklet;
 
        struct mmp_tdma_desc            *desc_arr;
-       phys_addr_t                     desc_arr_phys;
+       dma_addr_t                      desc_arr_phys;
        int                             desc_num;
        enum dma_transfer_direction     dir;
        dma_addr_t                      dev_addr;
@@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
 static int mmp_tdma_disable_chan(struct dma_chan *chan)
 {
        struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+       u32 tdcr;
 
-       writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
-                                       tdmac->reg_base + TDCR);
+       tdcr = readl(tdmac->reg_base + TDCR);
+       tdcr |= TDCR_ABR;
+       tdcr &= ~TDCR_CHANEN;
+       writel(tdcr, tdmac->reg_base + TDCR);
 
        tdmac->status = DMA_COMPLETE;
 
@@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
        return -EAGAIN;
 }
 
+static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
+{
+       size_t reg;
+
+       if (tdmac->idx == 0) {
+               reg = __raw_readl(tdmac->reg_base + TDSAR);
+               reg -= tdmac->desc_arr[0].src_addr;
+       } else if (tdmac->idx == 1) {
+               reg = __raw_readl(tdmac->reg_base + TDDAR);
+               reg -= tdmac->desc_arr[0].dst_addr;
+       } else
+               return -EINVAL;
+
+       return reg;
+}
+
 static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
 {
        struct mmp_tdma_chan *tdmac = dev_id;
 
        if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
-               tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
                tasklet_schedule(&tdmac->tasklet);
                return IRQ_HANDLED;
        } else
@@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
        int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
        gpool = tdmac->pool;
-       if (tdmac->desc_arr)
+       if (gpool && tdmac->desc_arr)
                gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
                                size);
        tdmac->desc_arr = NULL;
@@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
 {
        struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
 
+       tdmac->pos = mmp_tdma_get_pos(tdmac);
        dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
                         tdmac->buf_len - tdmac->pos);
 
@@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        int i, ret;
        int irq = 0, irq_num = 0;
        int chan_num = TDMA_CHANNEL_NUM;
-       struct gen_pool *pool;
+       struct gen_pool *pool = NULL;
 
        of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
        if (of_id)
index d7a33b3ac46603883e61577f80e29076c0b71221..9c914d62590626fb8f407d7f0bed63b27c37a4f1 100644 (file)
@@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
        [BAM_P_IRQ_STTS]        = { 0x1010, 0x1000, 0x00, 0x00 },
        [BAM_P_IRQ_CLR]         = { 0x1014, 0x1000, 0x00, 0x00 },
        [BAM_P_IRQ_EN]          = { 0x1018, 0x1000, 0x00, 0x00 },
-       [BAM_P_EVNT_DEST_ADDR]  = { 0x102C, 0x00, 0x1000, 0x00 },
-       [BAM_P_EVNT_REG]        = { 0x1018, 0x00, 0x1000, 0x00 },
-       [BAM_P_SW_OFSTS]        = { 0x1000, 0x00, 0x1000, 0x00 },
+       [BAM_P_EVNT_DEST_ADDR]  = { 0x182C, 0x00, 0x1000, 0x00 },
+       [BAM_P_EVNT_REG]        = { 0x1818, 0x00, 0x1000, 0x00 },
+       [BAM_P_SW_OFSTS]        = { 0x1800, 0x00, 0x1000, 0x00 },
        [BAM_P_DATA_FIFO_ADDR]  = { 0x1824, 0x00, 0x1000, 0x00 },
        [BAM_P_DESC_FIFO_ADDR]  = { 0x181C, 0x00, 0x1000, 0x00 },
        [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
@@ -1143,6 +1143,10 @@ static int bam_dma_probe(struct platform_device *pdev)
        dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
 
        /* initialize dmaengine apis */
+       bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
        bdev->common.device_alloc_chan_resources = bam_alloc_chan;
        bdev->common.device_free_chan_resources = bam_free_chan;
        bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
index b2431aa300331270fa949ea5c14c41602a6c8e69..9f1d4c7dbab8389039e4ae1da636ca13521314ee 100644 (file)
@@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
        }
 }
 
-static void sh_dmae_shutdown(struct platform_device *pdev)
-{
-       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
-       sh_dmae_ctl_stop(shdev);
-}
-
 #ifdef CONFIG_PM
 static int sh_dmae_runtime_suspend(struct device *dev)
 {
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+       sh_dmae_ctl_stop(shdev);
        return 0;
 }
 
@@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int sh_dmae_suspend(struct device *dev)
 {
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+       sh_dmae_ctl_stop(shdev);
        return 0;
 }
 
@@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver sh_dmae_driver = {
-       .driver         = {
+       .driver         = {
                .pm     = &sh_dmae_pm,
                .name   = SH_DMAE_DRV_NAME,
                .of_match_table = sh_dmae_of_match,
        },
        .remove         = sh_dmae_remove,
-       .shutdown       = sh_dmae_shutdown,
 };
 
 static int __init sh_dmae_init(void)
index c5f7b4e9eb6c6e490454958473893820bccaeb10..69fac068669fde566f41013cefbdf48db023466c 100644 (file)
@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
  *     We have to be cautious here. We have seen BIOSes with DMI pointers
  *     pointing to completely the wrong place for example
  */
-static void dmi_table(u8 *buf, int len, int num,
+static void dmi_table(u8 *buf, u32 len, int num,
                      void (*decode)(const struct dmi_header *, void *),
                      void *private_data)
 {
@@ -92,12 +92,6 @@ static void dmi_table(u8 *buf, int len, int num,
        while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
                const struct dmi_header *dm = (const struct dmi_header *)data;
 
-               /*
-                * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
-                */
-               if (dm->type == DMI_ENTRY_END_OF_TABLE)
-                       break;
-
                /*
                 *  We want to know the total length (formatted area and
                 *  strings) before decoding to make sure we won't run off the
@@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
                        data++;
                if (data - buf < len - 1)
                        decode(dm, private_data);
+
+               /*
+                * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+                */
+               if (dm->type == DMI_ENTRY_END_OF_TABLE)
+                       break;
+
                data += 2;
                i++;
        }
 }
 
 static phys_addr_t dmi_base;
-static u16 dmi_len;
+static u32 dmi_len;
 static u16 dmi_num;
 
 static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
index af5d63c7cc53ded6b5ab4130b2b9279a5d52e988..f07d4a67fa76b3a3cb542e31a24a093c6f7aff97 100644 (file)
@@ -75,29 +75,25 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
        unsigned long key;
        u32 desc_version;
 
-       *map_size = 0;
-       *desc_size = 0;
-       key = 0;
-       status = efi_call_early(get_memory_map, map_size, NULL,
-                               &key, desc_size, &desc_version);
-       if (status != EFI_BUFFER_TOO_SMALL)
-               return EFI_LOAD_ERROR;
-
+       *map_size = sizeof(*m) * 32;
+again:
        /*
         * Add an additional efi_memory_desc_t because we're doing an
         * allocation which may be in a new descriptor region.
         */
-       *map_size += *desc_size;
+       *map_size += sizeof(*m);
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                *map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
+       *desc_size = 0;
+       key = 0;
        status = efi_call_early(get_memory_map, map_size, m,
                                &key, desc_size, &desc_version);
        if (status == EFI_BUFFER_TOO_SMALL) {
                efi_call_early(free_pool, m);
-               return EFI_LOAD_ERROR;
+               goto again;
        }
 
        if (status != EFI_SUCCESS)
@@ -183,12 +179,12 @@ again:
                start = desc->phys_addr;
                end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
 
-               if ((start + size) > end || (start + size) > max)
-                       continue;
-
-               if (end - size > max)
+               if (end > max)
                        end = max;
 
+               if ((start + size) > end)
+                       continue;
+
                if (round_down(end - size, align) < start)
                        continue;
 
index 472fb5b8779f3723f969998b54716b3081901649..9cdbc0c9cb2da87abc65dda442fc75a1612cc51e 100644 (file)
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
        struct gpio_chip gpio_chip;
 };
 
+#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
+
 static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
        int val;
 
        val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
                              int value)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
        if (value)
                tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
 static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
                                int value)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
        /* Set the initial value */
        tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
 
 static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
 {
-       struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+       struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+       struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
        return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
                                                                GPIO_CFG_MASK);
index 8cad8e400b44d674ad144e817daa7b63139f2041..4650bf830d6b6306f96e309d4f2782da8a859575 100644 (file)
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
 
        ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
        if (ret < 0) {
-               /* We've found the gpio chip, but the translation failed.
-                * Return true to stop looking and return the translation
-                * error via out_gpio
+               /* We've found a gpio chip, but the translation failed.
+                * Store translation error in out_gpio.
+                * Return false to keep looking, as more than one gpio chip
+                * could be registered per of-node.
                 */
                gg_data->out_gpio = ERR_PTR(ret);
-               return true;
+               return false;
         }
 
        gg_data->out_gpio = gpiochip_get_desc(gc, ret);
index b3589d0e39b9c2027a4e680625663dfdcbe3cc63..910ff8ab9c9cfb018c6a18e8ae3126767b6e2580 100644 (file)
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
        return KFD_MQD_TYPE_CP;
 }
 
-static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+unsigned int get_first_pipe(struct device_queue_manager *dqm)
 {
-       BUG_ON(!dqm);
+       BUG_ON(!dqm || !dqm->dev);
        return dqm->dev->shared_resources.first_compute_pipe;
 }
 
+unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+       BUG_ON(!dqm || !dqm->dev);
+       return dqm->dev->shared_resources.compute_pipe_count;
+}
+
 static inline unsigned int get_pipes_num_cpsch(void)
 {
        return PIPE_PER_ME_CP_SCHEDULING;
index d64f86cda34f5a155e176526d5f3a3463b96c826..488f51d19427a8273b672df18962a4dafe9feaf5 100644 (file)
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd);
 int init_pipelines(struct device_queue_manager *dqm,
                unsigned int pipes_num, unsigned int first_pipe);
+unsigned int get_first_pipe(struct device_queue_manager *dqm);
+unsigned int get_pipes_num(struct device_queue_manager *dqm);
 
 extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
 {
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
        return (pdd->lds_base >> 60) & 0x0E;
 }
 
-extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
-{
-       BUG_ON(!dqm || !dqm->dev);
-       return dqm->dev->shared_resources.compute_pipe_count;
-}
-
 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
index 6b072466e2a6f628c8a1ff4a026cf5788af65c43..5469efe0523e8d85b11ad33729bfed9036051dfa 100644 (file)
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
 
 static int initialize_cpsch_cik(struct device_queue_manager *dqm)
 {
-       return init_pipelines(dqm, get_pipes_num(dqm), 0);
+       return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
 }
index 0409b907de5d5cc771543d17cc50ed20bad3e3ba..b3e3068c6ec07f8caa8bac6a4edc68e75741190d 100644 (file)
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
                     (adj->crtc_hdisplay - 1) |
                     ((adj->crtc_vdisplay - 1) << 16));
 
-       cfg = ATMEL_HLCDC_CLKPOL;
+       cfg = 0;
 
        prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
        mode_rate = mode->crtc_clock * 1000;
index 7320a6c6613f174c1776f75c6dcc832cf91f1ad8..c1cb17493e0d4e212821c832009012c2384c7143 100644 (file)
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
        pm_runtime_enable(dev->dev);
 
-       pm_runtime_put_sync(dev->dev);
-
        ret = atmel_hlcdc_dc_modeset_init(dev);
        if (ret < 0) {
                dev_err(dev->dev, "failed to initialize mode setting\n");
index 063d2a7b941fcaa4f5b527afd41a193fd382d64d..e79bd9ba474b3c181140b0d9f519a96066443acd 100644 (file)
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
 
        /* Disable the layer */
        regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
-                    ATMEL_HLCDC_LAYER_RST);
+                    ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
+                    ATMEL_HLCDC_LAYER_UPDATE);
 
        /* Clear all pending interrupts */
        regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
index 6b00173d1be4fd6c96f749a803dd23ab5495de88..f6d04c7b5115a965bce68ad265080a46fcad3a6b 100644 (file)
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
 
-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
-                                                       struct drm_mode_fb_cmd2 *r,
-                                                       struct drm_file *file_priv);
+static struct drm_framebuffer *
+internal_framebuffer_create(struct drm_device *dev,
+                           struct drm_mode_fb_cmd2 *r,
+                           struct drm_file *file_priv);
 
 /* Avoid boilerplate.  I'm tired of typing. */
 #define DRM_ENUM_NAME_FN(fnname, list)                         \
@@ -2127,7 +2128,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
 
        mutex_lock(&dev->mode_config.mutex);
-       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
        connector = drm_connector_find(dev, out_resp->connector_id);
        if (!connector) {
@@ -2157,6 +2157,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        out_resp->mm_height = connector->display_info.height_mm;
        out_resp->subpixel = connector->display_info.subpixel_order;
        out_resp->connection = connector->status;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
        encoder = drm_connector_get_encoder(connector);
        if (encoder)
                out_resp->encoder_id = encoder->base.id;
@@ -2907,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
         */
        if (req->flags & DRM_MODE_CURSOR_BO) {
                if (req->handle) {
-                       fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+                       fb = internal_framebuffer_create(dev, &fbreq, file_priv);
                        if (IS_ERR(fb)) {
                                DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
                                return PTR_ERR(fb);
                        }
-
-                       drm_framebuffer_reference(fb);
                } else {
                        fb = NULL;
                }
@@ -3266,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
        return 0;
 }
 
-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
-                                                       struct drm_mode_fb_cmd2 *r,
-                                                       struct drm_file *file_priv)
+static struct drm_framebuffer *
+internal_framebuffer_create(struct drm_device *dev,
+                           struct drm_mode_fb_cmd2 *r,
+                           struct drm_file *file_priv)
 {
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_framebuffer *fb;
@@ -3300,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
                return fb;
        }
 
-       mutex_lock(&file_priv->fbs_lock);
-       r->fb_id = fb->base.id;
-       list_add(&fb->filp_head, &file_priv->fbs);
-       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-       mutex_unlock(&file_priv->fbs_lock);
-
        return fb;
 }
 
@@ -3327,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
 int drm_mode_addfb2(struct drm_device *dev,
                    void *data, struct drm_file *file_priv)
 {
+       struct drm_mode_fb_cmd2 *r = data;
        struct drm_framebuffer *fb;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       fb = add_framebuffer_internal(dev, data, file_priv);
+       fb = internal_framebuffer_create(dev, r, file_priv);
        if (IS_ERR(fb))
                return PTR_ERR(fb);
 
+       /* Transfer ownership to the filp for reaping on close */
+
+       DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+       mutex_lock(&file_priv->fbs_lock);
+       r->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+       mutex_unlock(&file_priv->fbs_lock);
+
        return 0;
 }
 
index 9a5b68717ec8c31fbd27bfa313833b568444fce9..379ab45557568c6e21615526a96a60e145ed1ee5 100644 (file)
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
                              struct drm_dp_sideband_msg_tx *txmsg)
 {
        bool ret;
-       mutex_lock(&mgr->qlock);
+
+       /*
+        * All updates to txmsg->state are protected by mgr->qlock, and the two
+        * cases we check here are terminal states. For those the barriers
+        * provided by the wake_up/wait_event pair are enough.
+        */
        ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
               txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
-       mutex_unlock(&mgr->qlock);
        return ret;
 }
 
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
        return 0;
 }
 
-/* must be called holding qlock */
 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 {
        struct drm_dp_sideband_msg_tx *txmsg;
        int ret;
 
+       WARN_ON(!mutex_is_locked(&mgr->qlock));
+
        /* construct a chunk from the first msg in the tx_msg queue */
        if (list_empty(&mgr->tx_msg_downq)) {
                mgr->tx_down_in_progress = false;
index 04a209e2b66d7d61428398bb5b58b2222ec2a712..1134526286c819c87bc523a1b8852dc485804046 100644 (file)
  */
 
 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                               unsigned long size,
+                                               u64 size,
                                                unsigned alignment,
                                                unsigned long color,
                                                enum drm_mm_search_flags flags);
 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-                                               unsigned long size,
+                                               u64 size,
                                                unsigned alignment,
                                                unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
+                                               u64 start,
+                                               u64 end,
                                                enum drm_mm_search_flags flags);
 
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
                                 struct drm_mm_node *node,
-                                unsigned long size, unsigned alignment,
+                                u64 size, unsigned alignment,
                                 unsigned long color,
                                 enum drm_mm_allocator_flags flags)
 {
        struct drm_mm *mm = hole_node->mm;
-       unsigned long hole_start = drm_mm_hole_node_start(hole_node);
-       unsigned long hole_end = drm_mm_hole_node_end(hole_node);
-       unsigned long adj_start = hole_start;
-       unsigned long adj_end = hole_end;
+       u64 hole_start = drm_mm_hole_node_start(hole_node);
+       u64 hole_end = drm_mm_hole_node_end(hole_node);
+       u64 adj_start = hole_start;
+       u64 adj_end = hole_end;
 
        BUG_ON(node->allocated);
 
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
                adj_start = adj_end - size;
 
        if (alignment) {
-               unsigned tmp = adj_start % alignment;
-               if (tmp) {
+               u64 tmp = adj_start;
+               unsigned rem;
+
+               rem = do_div(tmp, alignment);
+               if (rem) {
                        if (flags & DRM_MM_CREATE_TOP)
-                               adj_start -= tmp;
+                               adj_start -= rem;
                        else
-                               adj_start += alignment - tmp;
+                               adj_start += alignment - rem;
                }
        }
 
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
        struct drm_mm_node *hole;
-       unsigned long end = node->start + node->size;
-       unsigned long hole_start;
-       unsigned long hole_end;
+       u64 end = node->start + node->size;
+       u64 hole_start;
+       u64 hole_end;
 
        BUG_ON(node == NULL);
 
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-                              unsigned long size, unsigned alignment,
+                              u64 size, unsigned alignment,
                               unsigned long color,
                               enum drm_mm_search_flags sflags,
                               enum drm_mm_allocator_flags aflags)
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic);
 
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
                                       struct drm_mm_node *node,
-                                      unsigned long size, unsigned alignment,
+                                      u64 size, unsigned alignment,
                                       unsigned long color,
-                                      unsigned long start, unsigned long end,
+                                      u64 start, u64 end,
                                       enum drm_mm_allocator_flags flags)
 {
        struct drm_mm *mm = hole_node->mm;
-       unsigned long hole_start = drm_mm_hole_node_start(hole_node);
-       unsigned long hole_end = drm_mm_hole_node_end(hole_node);
-       unsigned long adj_start = hole_start;
-       unsigned long adj_end = hole_end;
+       u64 hole_start = drm_mm_hole_node_start(hole_node);
+       u64 hole_end = drm_mm_hole_node_end(hole_node);
+       u64 adj_start = hole_start;
+       u64 adj_end = hole_end;
 
        BUG_ON(!hole_node->hole_follows || node->allocated);
 
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
                mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
        if (alignment) {
-               unsigned tmp = adj_start % alignment;
-               if (tmp) {
+               u64 tmp = adj_start;
+               unsigned rem;
+
+               rem = do_div(tmp, alignment);
+               if (rem) {
                        if (flags & DRM_MM_CREATE_TOP)
-                               adj_start -= tmp;
+                               adj_start -= rem;
                        else
-                               adj_start += alignment - tmp;
+                               adj_start += alignment - rem;
                }
        }
 
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
-                                       unsigned long size, unsigned alignment,
+                                       u64 size, unsigned alignment,
                                        unsigned long color,
-                                       unsigned long start, unsigned long end,
+                                       u64 start, u64 end,
                                        enum drm_mm_search_flags sflags,
                                        enum drm_mm_allocator_flags aflags)
 {
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_remove_node);
 
-static int check_free_hole(unsigned long start, unsigned long end,
-                          unsigned long size, unsigned alignment)
+static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
 {
        if (end - start < size)
                return 0;
 
        if (alignment) {
-               unsigned tmp = start % alignment;
-               if (tmp)
-                       start += alignment - tmp;
+               u64 tmp = start;
+               unsigned rem;
+
+               rem = do_div(tmp, alignment);
+               if (rem)
+                       start += alignment - rem;
        }
 
        return end >= start + size;
 }
 
 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                                     unsigned long size,
+                                                     u64 size,
                                                      unsigned alignment,
                                                      unsigned long color,
                                                      enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
-       unsigned long adj_start;
-       unsigned long adj_end;
-       unsigned long best_size;
+       u64 adj_start;
+       u64 adj_end;
+       u64 best_size;
 
        BUG_ON(mm->scanned_blocks);
 
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 
        __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
                               flags & DRM_MM_SEARCH_BELOW) {
-               unsigned long hole_size = adj_end - adj_start;
+               u64 hole_size = adj_end - adj_start;
 
                if (mm->color_adjust) {
                        mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 }
 
 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-                                                       unsigned long size,
+                                                       u64 size,
                                                        unsigned alignment,
                                                        unsigned long color,
-                                                       unsigned long start,
-                                                       unsigned long end,
+                                                       u64 start,
+                                                       u64 end,
                                                        enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
-       unsigned long adj_start;
-       unsigned long adj_end;
-       unsigned long best_size;
+       u64 adj_start;
+       u64 adj_end;
+       u64 best_size;
 
        BUG_ON(mm->scanned_blocks);
 
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
 
        __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
                               flags & DRM_MM_SEARCH_BELOW) {
-               unsigned long hole_size = adj_end - adj_start;
+               u64 hole_size = adj_end - adj_start;
 
                if (adj_start < start)
                        adj_start = start;
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
  * adding/removing nodes to/from the scan list are allowed.
  */
 void drm_mm_init_scan(struct drm_mm *mm,
-                     unsigned long size,
+                     u64 size,
                      unsigned alignment,
                      unsigned long color)
 {
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan);
  * adding/removing nodes to/from the scan list are allowed.
  */
 void drm_mm_init_scan_with_range(struct drm_mm *mm,
-                                unsigned long size,
+                                u64 size,
                                 unsigned alignment,
                                 unsigned long color,
-                                unsigned long start,
-                                unsigned long end)
+                                u64 start,
+                                u64 end)
 {
        mm->scan_color = color;
        mm->scan_alignment = alignment;
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
        struct drm_mm_node *prev_node;
-       unsigned long hole_start, hole_end;
-       unsigned long adj_start, adj_end;
+       u64 hole_start, hole_end;
+       u64 adj_start, adj_end;
 
        mm->scanned_blocks++;
 
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean);
  *
  * Note that @mm must be cleared to 0 before calling this function.
  */
-void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
 {
        INIT_LIST_HEAD(&mm->hole_stack);
        mm->scanned_blocks = 0;
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm)
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
-static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
-                                      const char *prefix)
+static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
+                                    const char *prefix)
 {
-       unsigned long hole_start, hole_end, hole_size;
+       u64 hole_start, hole_end, hole_size;
 
        if (entry->hole_follows) {
                hole_start = drm_mm_hole_node_start(entry);
                hole_end = drm_mm_hole_node_end(entry);
                hole_size = hole_end - hole_start;
-               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
-                       prefix, hole_start, hole_end,
-                       hole_size);
+               pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
+                        hole_end, hole_size);
                return hole_size;
        }
 
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 {
        struct drm_mm_node *entry;
-       unsigned long total_used = 0, total_free = 0, total = 0;
+       u64 total_used = 0, total_free = 0, total = 0;
 
        total_free += drm_mm_debug_hole(&mm->head_node, prefix);
 
        drm_mm_for_each_node(entry, mm) {
-               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
-                       prefix, entry->start, entry->start + entry->size,
-                       entry->size);
+               pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
+                        entry->start + entry->size, entry->size);
                total_used += entry->size;
                total_free += drm_mm_debug_hole(entry, prefix);
        }
        total = total_free + total_used;
 
-       printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
-               total_used, total_free);
+       pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
+                total_used, total_free);
 }
 EXPORT_SYMBOL(drm_mm_debug_table);
 
 #if defined(CONFIG_DEBUG_FS)
-static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
 {
-       unsigned long hole_start, hole_end, hole_size;
+       u64 hole_start, hole_end, hole_size;
 
        if (entry->hole_follows) {
                hole_start = drm_mm_hole_node_start(entry);
                hole_end = drm_mm_hole_node_end(entry);
                hole_size = hole_end - hole_start;
-               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
-                               hole_start, hole_end, hole_size);
+               seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
+                          hole_end, hole_size);
                return hole_size;
        }
 
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 {
        struct drm_mm_node *entry;
-       unsigned long total_used = 0, total_free = 0, total = 0;
+       u64 total_used = 0, total_free = 0, total = 0;
 
        total_free += drm_mm_dump_hole(m, &mm->head_node);
 
        drm_mm_for_each_node(entry, mm) {
-               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
-                               entry->start, entry->start + entry->size,
-                               entry->size);
+               seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
+                          entry->start + entry->size, entry->size);
                total_used += entry->size;
                total_free += drm_mm_dump_hole(m, entry);
        }
        total = total_free + total_used;
 
-       seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
+       seq_printf(m, "total: %llu, used %llu free %llu\n", total,
+                  total_used, total_free);
        return 0;
 }
 EXPORT_SYMBOL(drm_mm_dump_table);
index 96e811fe24ca79cd1b6438591cf1eb05379f0edd..e8b18e542da4311169a4b0ec6c6f45fe73a711cd 100644 (file)
@@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                        seq_puts(m, " (pp");
                else
                        seq_puts(m, " (g");
-               seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
+               seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
                           vma->node.start, vma->node.size,
                           vma->ggtt_view.type);
        }
        if (obj->stolen)
-               seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
+               seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
        if (obj->pin_mappable || obj->fault_mappable) {
                char s[3], *t = s;
                if (obj->pin_mappable)
index 8039cec71fc24a3750812dc519b08bee6a824f41..cc6ea53d2b81951553d4b135a1760cb127e574c2 100644 (file)
@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
        return 0;
 }
 
-static int i915_drm_suspend_late(struct drm_device *drm_dev)
+static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 {
        struct drm_i915_private *dev_priv = drm_dev->dev_private;
        int ret;
@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
        }
 
        pci_disable_device(drm_dev->pdev);
-       pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+       /*
+        * During hibernation on some GEN4 platforms the BIOS may try to access
+        * the device even though it's already in D3 and hang the machine. So
+        * leave the device in D0 on those platforms and hope the BIOS will
+        * power down the device properly. Platforms where this was seen:
+        * Lenovo Thinkpad X301, X61s
+        */
+       if (!(hibernation &&
+             drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+             INTEL_INFO(dev_priv)->gen == 4))
+               pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
        return 0;
 }
@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
        if (error)
                return error;
 
-       return i915_drm_suspend_late(dev);
+       return i915_drm_suspend_late(dev, false);
 }
 
 static int i915_drm_resume(struct drm_device *dev)
@@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev)
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend_late(drm_dev);
+       return i915_drm_suspend_late(drm_dev, false);
+}
+
+static int i915_pm_poweroff_late(struct device *dev)
+{
+       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
+
+       return i915_drm_suspend_late(drm_dev, true);
 }
 
 static int i915_pm_resume_early(struct device *dev)
@@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = {
        .thaw_early = i915_pm_resume_early,
        .thaw = i915_pm_resume,
        .poweroff = i915_pm_suspend,
-       .poweroff_late = i915_pm_suspend_late,
+       .poweroff_late = i915_pm_poweroff_late,
        .restore_early = i915_pm_resume_early,
        .restore = i915_pm_resume,
 
index f2a825e39646427b7e4617627bd8ab3c3b9acb4d..8727086cf48ccce9e6548df8cf4e1d0df59012e7 100644 (file)
@@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
  * number comparisons on buffer last_read|write_seqno. It also allows an
  * emission time to be associated with the request for tracking how far ahead
  * of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
  */
 struct drm_i915_gem_request {
        struct kref ref;
@@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
        /** Position in the ringbuffer of the end of the whole request */
        u32 tail;
 
-       /** Context related to this request */
+       /**
+        * Context related to this request
+        * Contexts are refcounted, so when this request is associated with a
+        * context, we must increment the context's refcount, to guarantee that
+        * it persists while any request is linked to it. Requests themselves
+        * are also refcounted, so the request will only be freed when the last
+        * reference to it is dismissed, and the code in
+        * i915_gem_request_free() will then decrement the refcount on the
+        * context.
+        */
        struct intel_context *ctx;
 
        /** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
                                 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
+                                (INTEL_DEVID(dev) & 0xf) == 0xb ||     \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
index c26d36cc4b313ac4d03ade4167739d7fd7be6e9c..5b205863b6596d7fa72e6f465a017d9bcb204e78 100644 (file)
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                if (submit_req->ctx != ring->default_context)
                        intel_lr_context_unpin(ring, submit_req->ctx);
 
-               i915_gem_context_unreference(submit_req->ctx);
-               kfree(submit_req);
+               i915_gem_request_unreference(submit_req);
        }
 
        /*
@@ -2937,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        req = obj->last_read_req;
 
        /* Do this after OLR check to make sure we make forward progress polling
-        * on this IOCTL with a timeout <=0 (like busy ioctl)
+        * on this IOCTL with a timeout == 0 (like busy ioctl)
         */
-       if (args->timeout_ns <= 0) {
+       if (args->timeout_ns == 0) {
                ret = -ETIME;
                goto out;
        }
@@ -2949,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
 
-       ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+       ret = __i915_wait_request(req, reset_counter, true,
+                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                  file->driver_priv);
        mutex_lock(&dev->struct_mutex);
        i915_gem_request_unreference(req);
@@ -4793,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
 
+       /* Double layer security blanket, see i915_gem_init() */
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
        if (dev_priv->ellc_size)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
@@ -4825,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i) {
                ret = ring->init_hw(ring);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4842,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev)
                DRM_ERROR("Context enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
 
-               return ret;
+               goto out;
        }
 
+out:
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        return ret;
 }
 
@@ -4878,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev)
                dev_priv->gt.stop_ring = intel_logical_ring_stop;
        }
 
+       /* This is just a security blanket to placate dragons.
+        * On some systems, we very sporadically observe that the first TLBs
+        * used by the CS may be stale, despite us poking the TLB reset. If
+        * we hold the forcewake during initialisation these problems
+        * just magically go away.
+        */
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
        ret = i915_gem_init_userptr(dev);
        if (ret)
                goto out_unlock;
@@ -4904,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev)
        }
 
 out_unlock:
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
index 746f77fb57a314d5b94542b12b67b7a16bfb50eb..dccdc8aad2e24c2351766a1ec591f5a57dd3940b 100644 (file)
@@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 
        ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
-       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+       DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
                         ppgtt->node.size >> 20,
                         ppgtt->node.start / PAGE_SIZE);
 
@@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
                                  unsigned long color,
-                                 unsigned long *start,
-                                 unsigned long *end)
+                                 u64 *start,
+                                 u64 *end)
 {
        if (node->color != color)
                *start += 4096;
index a2045848bd1a3d5d37c299d34b7e19dce50173d7..9c6f93ec886b7023b0ce8fa2753bacaaf041aaff 100644 (file)
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                        stolen_offset, gtt_offset, size);
 
        /* KISS and expect everything to be page-aligned */
-       BUG_ON(stolen_offset & 4095);
-       BUG_ON(size & 4095);
-
-       if (WARN_ON(size == 0))
+       if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
+           WARN_ON(stolen_offset & 4095))
                return NULL;
 
        stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
index 7a24bd1a51f648b340ce15d5ee98771cd1fd77d9..6377b22269ad1e7157058baf1447cb548362306c 100644 (file)
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       mutex_lock(&dev->struct_mutex);
        if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
-               drm_gem_object_unreference_unlocked(&obj->base);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err;
        }
 
        if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                }
        }
 
-       mutex_lock(&dev->struct_mutex);
        if (args->tiling_mode != obj->tiling_mode ||
            args->stride != obj->stride) {
                /* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                obj->bit_17 = NULL;
        }
 
+err:
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 
index 4145d95902f54fbd9fb4f92668fe10fde2b330a0..ede5bbbd8a08a175873e24215a754b5be28ef23d 100644 (file)
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
        u32 iir, gt_iir, pm_iir;
        irqreturn_t ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        while (true) {
                /* Find, clear, then process each source of interrupt */
 
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        u32 master_ctl, iir;
        irqreturn_t ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        for (;;) {
                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
                iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        /* We get interrupts on unclaimed registers, so check for this before we
         * do any I915_{READ,WRITE}. */
        intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        enum pipe pipe;
        u32 aux_mask = GEN8_AUX_CHANNEL_A;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        if (IS_GEN9(dev))
                aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        iir = I915_READ16(IIR);
        if (iir == 0)
                return IRQ_NONE;
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
        int pipe, ret = IRQ_NONE;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        iir = I915_READ(IIR);
        do {
                bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
        iir = I915_READ(IIR);
 
        for (;;) {
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 {
        dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
        dev_priv->pm.irqs_enabled = false;
+       synchronize_irq(dev_priv->dev->irq);
 }
 
 /**
index 3d220a67f8656ed9173b88672cd114893070aec0..9943c20a741d46bc8f40452bdf87dcfa15d50d8c 100644 (file)
@@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_gem_object *obj = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-       u32 base = plane_config->base;
+       u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
+       u32 size_aligned = round_up(plane_config->base + plane_config->size,
+                                   PAGE_SIZE);
+
+       size_aligned -= base_aligned;
 
        if (plane_config->size == 0)
                return false;
 
-       obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
-                                                            plane_config->size);
+       obj = i915_gem_object_create_stolen_for_preallocated(dev,
+                                                            base_aligned,
+                                                            base_aligned,
+                                                            size_aligned);
        if (!obj)
                return false;
 
@@ -2725,10 +2731,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        case DRM_FORMAT_XRGB8888:
                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
                break;
+       case DRM_FORMAT_ARGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
        case DRM_FORMAT_XBGR8888:
                plane_ctl |= PLANE_CTL_ORDER_RGBX;
                plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
                break;
+       case DRM_FORMAT_ABGR8888:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
        case DRM_FORMAT_XRGB2101010:
                plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
                break;
@@ -6627,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
        aligned_height = intel_fb_align_height(dev, fb->height,
                                               plane_config->tiling);
 
-       plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+       plane_config->size = fb->pitches[0] * aligned_height;
 
        DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
                      pipe_name(pipe), plane, fb->width, fb->height,
@@ -7664,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        aligned_height = intel_fb_align_height(dev, fb->height,
                                               plane_config->tiling);
 
-       plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+       plane_config->size = fb->pitches[0] * aligned_height;
 
        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
                      pipe_name(pipe), fb->width, fb->height,
@@ -7755,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
        aligned_height = intel_fb_align_height(dev, fb->height,
                                               plane_config->tiling);
 
-       plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+       plane_config->size = fb->pitches[0] * aligned_height;
 
        DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
                      pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8713,7 @@ retry:
                        old->release_fb->funcs->destroy(old->release_fb);
                goto fail;
        }
+       crtc->primary->crtc = crtc;
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -9700,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       WARN_ON(!in_irq());
+       WARN_ON(!in_interrupt());
 
        if (crtc == NULL)
                return;
@@ -12182,9 +12198,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
                return -ENOMEM;
        }
 
-       if (fb == crtc->cursor->fb)
-               return 0;
-
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
        if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13109,9 @@ static struct intel_quirk intel_quirks[] = {
 
        /* HP Chromebook 14 (Celeron 2955U) */
        { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+       /* Dell Chromebook 11 */
+       { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
index 04e248dd2259715426e8ad82b817b59d3634b24a..54daa66c697077f5b0609ddb5eec0d7989db780a 100644 (file)
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static bool
-__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
-                                     enum pipe pipe)
-{
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
 /**
  * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
  * @dev_priv: i915 device instance
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
 void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+       /* We may be called too early in init, thanks BIOS! */
+       if (crtc == NULL)
+               return;
+
        /* GMCH can't disable fifo underruns, filter them. */
        if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
-           !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+           to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
                return;
 
        if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
index 0f358c5999ec8e0c8771a3d932447d281748a8b1..e8d3da9f337388e5a1647766a61281e4d25aa22d 100644 (file)
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                 * If there isn't a request associated with this submission,
                 * create one as a temporary holder.
                 */
-               WARN(1, "execlist context submission without request");
                request = kzalloc(sizeof(*request), GFP_KERNEL);
                if (request == NULL)
                        return -ENOMEM;
                request->ring = ring;
                request->ctx = to;
+               kref_init(&request->ref);
+               request->uniq = dev_priv->request_uniq++;
+               i915_gem_context_reference(request->ctx);
        } else {
+               i915_gem_request_reference(request);
                WARN_ON(to != request->ctx);
        }
        request->tail = tail;
-       i915_gem_request_reference(request);
-       i915_gem_context_reference(request->ctx);
 
        intel_runtime_pm_get(dev_priv);
 
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
                if (ctx_obj && (ctx != ring->default_context))
                        intel_lr_context_unpin(ring, ctx);
                intel_runtime_pm_put(dev_priv);
-               i915_gem_context_unreference(ctx);
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
index c47a3baa53d5963cdfc1663320ac2e88398f276d..4e8fb891d4eac88a4bbc3f68dcd01749948d13d6 100644 (file)
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
 
                /* We need to init first for ECOBUS access and then
                 * determine later if we want to reinit, in case of MT access is
-                * not working
+                * not working. In this stage we don't know which flavour this
+                * ivb is, so it is better to reset also the gen6 fw registers
+                * before the ecobus check.
                 */
+
+               __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+               __raw_posting_read(dev_priv, ECOBUS);
+
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
index 121d30ca2d4448f379c743ac781bb9910ccd2d44..87fe8ed92ebeb8bf996fb95b79731dbebdbed2dd 100644 (file)
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
                118800000, { 0x091c, 0x091c, 0x06dc },
        }, {
                216000000, { 0x06dc, 0x0b5c, 0x091c },
-       }
+       }, {
+               ~0UL, { 0x0000, 0x0000, 0x0000 },
+       },
 };
 
 static const struct dw_hdmi_sym_term imx_sym_term[] = {
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
        .destroy = drm_encoder_cleanup,
 };
 
+static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
+                                                 struct drm_display_mode *mode)
+{
+       if (mode->clock < 13500)
+               return MODE_CLOCK_LOW;
+       if (mode->clock > 266000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
+                                                  struct drm_display_mode *mode)
+{
+       if (mode->clock < 13500)
+               return MODE_CLOCK_LOW;
+       if (mode->clock > 270000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
 static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
-       .mpll_cfg = imx_mpll_cfg,
-       .cur_ctr  = imx_cur_ctr,
-       .sym_term = imx_sym_term,
-       .dev_type = IMX6Q_HDMI,
+       .mpll_cfg   = imx_mpll_cfg,
+       .cur_ctr    = imx_cur_ctr,
+       .sym_term   = imx_sym_term,
+       .dev_type   = IMX6Q_HDMI,
+       .mode_valid = imx6q_hdmi_mode_valid,
 };
 
 static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
        .cur_ctr  = imx_cur_ctr,
        .sym_term = imx_sym_term,
        .dev_type = IMX6DL_HDMI,
+       .mode_valid = imx6dl_hdmi_mode_valid,
 };
 
 static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
index 1b86aac0b341c7083b15e03163338989a1543592..2d6dc94e1e64e973f61a1294c24bba6d2d788ae2 100644 (file)
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
 {
        struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
-       struct drm_display_mode *mode = &encoder->crtc->hwmode;
        u32 pixel_fmt;
-       unsigned long serial_clk;
-       unsigned long di_clk = mode->clock * 1000;
-       int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
-
-       if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
-               /* dual channel LVDS mode */
-               serial_clk = 3500UL * mode->clock;
-               imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
-               imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
-       } else {
-               serial_clk = 7000UL * mode->clock;
-               imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
-                               di_clk);
-       }
 
        switch (imx_ldb_ch->chno) {
        case 0:
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
        int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+       unsigned long serial_clk;
+       unsigned long di_clk = mode->clock * 1000;
+       int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
 
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
                         "%s: mode exceeds 85 MHz pixel clock\n", __func__);
        }
 
+       if (dual) {
+               serial_clk = 3500UL * mode->clock;
+               imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
+               imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
+       } else {
+               serial_clk = 7000UL * mode->clock;
+               imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
+                                 di_clk);
+       }
+
        /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
        if (imx_ldb_ch == &ldb->channel[0]) {
                if (mode->flags & DRM_MODE_FLAG_NVSYNC)
index 5e83e007080f7ae757b3f6c567cd791a595bd9d1..900dda6a8e71b5501293b8d4be06ddb8288dee27 100644 (file)
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
        }
 
        panel_node = of_parse_phandle(np, "fsl,panel", 0);
-       if (panel_node)
+       if (panel_node) {
                imxpd->panel = of_drm_find_panel(panel_node);
+               if (!imxpd->panel)
+                       return -EPROBE_DEFER;
+       }
 
        imxpd->dev = dev;
 
index 8edd531cb62166ad1291be18ffc26ba033cbc71d..7369ee7f0c5544a6c44b9850e4ded92217da2596 100644 (file)
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
 void mdp4_irq_preinstall(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       mdp4_enable(mdp4_kms);
        mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+       mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+       mdp4_disable(mdp4_kms);
 }
 
 int mdp4_irq_postinstall(struct msm_kms *kms)
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms)
 void mdp4_irq_uninstall(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       mdp4_enable(mdp4_kms);
        mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+       mdp4_disable(mdp4_kms);
 }
 
 irqreturn_t mdp4_irq(struct msm_kms *kms)
index 09b4a25eb553fa9cad7f68ad8a61afa13a8d90a0..c276624290afedb3f8de9fe9db2ed9271cab5ff4 100644 (file)
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (   8253 bytes, from 2014-12-08 16:13:00)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml            (  27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2014-06-02 18:31:15)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2015-01-23 16:20:19)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx)
                case 2: return (mdp5_cfg->lm.base[2]);
                case 3: return (mdp5_cfg->lm.base[3]);
                case 4: return (mdp5_cfg->lm.base[4]);
+               case 5: return (mdp5_cfg->lm.base[5]);
                default: return INVALID_IDX(idx);
        }
 }
index 46fac545dc2bb8f84e8d6bc241d88931d926b23e..2f2863cf8b45f21e692e3e50f09c88275d83da63 100644 (file)
@@ -62,8 +62,8 @@ struct mdp5_crtc {
 
                /* current cursor being scanned out: */
                struct drm_gem_object *scanout_bo;
-               uint32_t width;
-               uint32_t height;
+               uint32_t width, height;
+               uint32_t x, y;
        } cursor;
 };
 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
        struct drm_plane *plane;
        uint32_t flush_mask = 0;
 
-       /* we could have already released CTL in the disable path: */
-       if (!mdp5_crtc->ctl)
+       /* this should not happen: */
+       if (WARN_ON(!mdp5_crtc->ctl))
                return;
 
        drm_atomic_crtc_for_each_plane(plane, crtc) {
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
        drm_atomic_crtc_for_each_plane(plane, crtc) {
                mdp5_plane_complete_flip(plane);
        }
+
+       if (mdp5_crtc->ctl && !crtc->state->enable) {
+               mdp5_ctl_release(mdp5_crtc->ctl);
+               mdp5_crtc->ctl = NULL;
+       }
 }
 
 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
        mdp5_crtc->event = crtc->state->event;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
+       /*
+        * If no CTL has been allocated in mdp5_crtc_atomic_check(),
+        * it means we are trying to flush a CRTC whose state is disabled:
+        * nothing else needs to be done.
+        */
+       if (unlikely(!mdp5_crtc->ctl))
+               return;
+
        blend_setup(crtc);
        crtc_flush_all(crtc);
        request_pending(crtc, PENDING_FLIP);
-
-       if (mdp5_crtc->ctl && !crtc->state->enable) {
-               mdp5_ctl_release(mdp5_crtc->ctl);
-               mdp5_crtc->ctl = NULL;
-       }
 }
 
 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
        return -EINVAL;
 }
 
+static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
+{
+       struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+       uint32_t xres = crtc->mode.hdisplay;
+       uint32_t yres = crtc->mode.vdisplay;
+
+       /*
+        * Cursor Region Of Interest (ROI) is a plane read from cursor
+        * buffer to render. The ROI region is determined by the visibility of
+        * the cursor point. In the default Cursor image the cursor point will
+        * be at the top left of the cursor image, unless it is specified
+        * otherwise using hotspot feature.
+        *
+        * If the cursor point reaches the right (xres - x < cursor.width) or
+        * bottom (yres - y < cursor.height) boundary of the screen, then ROI
+        * width and ROI height need to be evaluated to crop the cursor image
+        * accordingly.
+        * (xres-x) will be new cursor width when x > (xres - cursor.width)
+        * (yres-y) will be new cursor height when y > (yres - cursor.height)
+        */
+       *roi_w = min(mdp5_crtc->cursor.width, xres -
+                       mdp5_crtc->cursor.x);
+       *roi_h = min(mdp5_crtc->cursor.height, yres -
+                       mdp5_crtc->cursor.y);
+}
+
 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
                struct drm_file *file, uint32_t handle,
                uint32_t width, uint32_t height)
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        unsigned int depth;
        enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
        uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+       uint32_t roi_w, roi_h;
        unsigned long flags;
 
        if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
        old_bo = mdp5_crtc->cursor.scanout_bo;
 
+       mdp5_crtc->cursor.scanout_bo = cursor_bo;
+       mdp5_crtc->cursor.width = width;
+       mdp5_crtc->cursor.height = height;
+
+       get_roi(crtc, &roi_w, &roi_h);
+
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
                        MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
                        MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
                        MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
-                       MDP5_LM_CURSOR_SIZE_ROI_H(height) |
-                       MDP5_LM_CURSOR_SIZE_ROI_W(width));
+                       MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+                       MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
 
-
        blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
-       blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
        blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
 
-       mdp5_crtc->cursor.scanout_bo = cursor_bo;
-       mdp5_crtc->cursor.width = width;
-       mdp5_crtc->cursor.height = height;
        spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 
        ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
        struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
        uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
-       uint32_t xres = crtc->mode.hdisplay;
-       uint32_t yres = crtc->mode.vdisplay;
        uint32_t roi_w;
        uint32_t roi_h;
        unsigned long flags;
 
-       x = (x > 0) ? x : 0;
-       y = (y > 0) ? y : 0;
+       /* In case the CRTC is disabled, just drop the cursor update */
+       if (unlikely(!crtc->state->enable))
+               return 0;
 
-       /*
-        * Cursor Region Of Interest (ROI) is a plane read from cursor
-        * buffer to render. The ROI region is determined by the visiblity of
-        * the cursor point. In the default Cursor image the cursor point will
-        * be at the top left of the cursor image, unless it is specified
-        * otherwise using hotspot feature.
-        *
-        * If the cursor point reaches the right (xres - x < cursor.width) or
-        * bottom (yres - y < cursor.height) boundary of the screen, then ROI
-        * width and ROI height need to be evaluated to crop the cursor image
-        * accordingly.
-        * (xres-x) will be new cursor width when x > (xres - cursor.width)
-        * (yres-y) will be new cursor height when y > (yres - cursor.height)
-        */
-       roi_w = min(mdp5_crtc->cursor.width, xres - x);
-       roi_h = min(mdp5_crtc->cursor.height, yres - y);
+       mdp5_crtc->cursor.x = x = max(x, 0);
+       mdp5_crtc->cursor.y = y = max(y, 0);
+
+       get_roi(crtc, &roi_w, &roi_h);
 
        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
        .mode_fixup = mdp5_crtc_mode_fixup,
        .mode_set_nofb = mdp5_crtc_mode_set_nofb,
-       .prepare = mdp5_crtc_disable,
-       .commit = mdp5_crtc_enable,
+       .disable = mdp5_crtc_disable,
+       .enable = mdp5_crtc_enable,
        .atomic_check = mdp5_crtc_atomic_check,
        .atomic_begin = mdp5_crtc_atomic_begin,
        .atomic_flush = mdp5_crtc_atomic_flush,
index d6a14bb99988199db0993dd654255eecf2269539..af0e02fa4f4821ac7a71a4b641fde76db366a8cd 100644 (file)
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
        mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
        spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 
-       mdp5_encoder->enabled = false;
+       mdp5_encoder->enabled = true;
 }
 
 static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
        .mode_fixup = mdp5_encoder_mode_fixup,
        .mode_set = mdp5_encoder_mode_set,
-       .prepare = mdp5_encoder_disable,
-       .commit = mdp5_encoder_enable,
+       .disable = mdp5_encoder_disable,
+       .enable = mdp5_encoder_enable,
 };
 
 /* initialize encoder */
index 70ac81edd40f3bb7d6de68553b7be350005312ec..a9407105b9b799bea9fc787a923e7dc914bee30a 100644 (file)
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
 void mdp5_irq_preinstall(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       mdp5_enable(mdp5_kms);
        mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+       mdp5_disable(mdp5_kms);
 }
 
 int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
 void mdp5_irq_uninstall(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       mdp5_enable(mdp5_kms);
        mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+       mdp5_disable(mdp5_kms);
 }
 
 static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
index 871aa2108dc694795e74f2b308031c849ad1a966..18fd643b6e6915c3a2e13c149640d9917459eb12 100644 (file)
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev,
         * mark our set of crtc's as busy:
         */
        ret = start_atomic(dev->dev_private, c->crtc_mask);
-       if (ret)
+       if (ret) {
+               kfree(c);
                return ret;
+       }
 
        /*
         * This is the point of no return - everything below never fails except
index 79924e4b1b495577f26822f8f03801b67e4497a6..6751553abe4afe4bc408cd08863a7981b858bece 100644 (file)
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
        nouveau_fbcon_zfill(dev, fbcon);
 
        /* To allow resizeing without swapping buffers */
-       NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
+       NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
                nouveau_fb->base.width, nouveau_fb->base.height,
                nvbo->bo.offset, nvbo);
 
index ed644a4f6f57c4254349c3881a16955cd42cbc05..86807ee91bd13a7640f5b925aa4b5a4c0d9d615d 100644 (file)
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
               (x << 16) | y);
        viewport_w = crtc->mode.hdisplay;
        viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+       if ((rdev->family >= CHIP_BONAIRE) &&
+           (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
+               viewport_h *= 2;
        WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
               (viewport_w << 16) | viewport_h);
 
index 5bf825dfaa098ec6b6ad8f479856ce676f51ba21..8d74de82456e880cb01d7e8132cf6c1caf5fa864 100644 (file)
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_WRITE:
        case DP_AUX_I2C_WRITE:
+               /* The atom implementation only supports writes with a max payload of
+                * 12 bytes since it uses 4 bits for the total count (header + payload)
+                * in the parameter space.  The atom interface supports 16 byte
+                * payloads for reads. The hw itself supports up to 16 bytes of payload.
+                */
+               if (WARN_ON_ONCE(msg->size > 12))
+                       return -E2BIG;
                /* tx_size needs to be 4 even for bare address packets since the atom
                 * table needs the info in tx_buf[3].
                 */
index 7c9df1eac065948df99491b3819427e32ad425d7..c39c1d0d9d4e328d8df7f19caf2b52c68b1988b3 100644 (file)
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                dig_connector = radeon_connector->con_priv;
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
-                       if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+                       if (radeon_audio != 0 &&
+                           drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+                           ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
                                return ATOM_ENCODER_MODE_DP_AUDIO;
                        return ATOM_ENCODER_MODE_DP;
                } else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                }
                break;
        case DRM_MODE_CONNECTOR_eDP:
-               if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+               if (radeon_audio != 0 &&
+                   drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+                   ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
                        return ATOM_ENCODER_MODE_DP_AUDIO;
                return ATOM_ENCODER_MODE_DP;
        case DRM_MODE_CONNECTOR_DVIA:
@@ -1622,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        struct radeon_connector *radeon_connector = NULL;
        struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
        bool travis_quirk = false;
-       int encoder_mode;
 
        if (connector) {
                radeon_connector = to_radeon_connector(connector);
@@ -1718,11 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                }
                break;
        }
-
-       encoder_mode = atombios_get_encoder_mode(encoder);
-       if (radeon_audio != 0 &&
-               (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
-               radeon_audio_dpms(encoder, mode);
 }
 
 static void
@@ -1731,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       int encoder_mode = atombios_get_encoder_mode(encoder);
 
        DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
                  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
                  radeon_encoder->active_device);
+
+       if (connector && (radeon_audio != 0) &&
+           ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+            (ENCODER_MODE_IS_DP(encoder_mode) &&
+             drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+               radeon_audio_dpms(encoder, mode);
+
        switch (radeon_encoder->encoder_id) {
        case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -2136,6 +2143,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        int encoder_mode;
 
        radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2163,10 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
                /* handled in dpms */
-               encoder_mode = atombios_get_encoder_mode(encoder);
-               if (radeon_audio != 0 &&
-                       (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
-                       radeon_audio_mode_set(encoder, adjusted_mode);
                break;
        case ENCODER_OBJECT_ID_INTERNAL_DDI:
        case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2188,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+       encoder_mode = atombios_get_encoder_mode(encoder);
+       if (connector && (radeon_audio != 0) &&
+           ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+            (ENCODER_MODE_IS_DP(encoder_mode) &&
+             drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+               radeon_audio_mode_set(encoder, adjusted_mode);
 }
 
 static bool
index e6a4ba236c703dc812d8bc57035408cb9ac5821f..3e670d344a2047151e289ab7dce348001ddf4280 100644 (file)
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 0x1);
+       WREG32(SRBM_INT_ACK, 0x1);
 
        WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
        WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
        /* grbm */
        WREG32(GRBM_INT_CNTL, 0);
+       /* SRBM */
+       WREG32(SRBM_INT_CNTL, 0);
        /* vline/vblank, etc. */
        WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -7551,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev)
        WREG32(DC_HPD5_INT_CONTROL, hpd5);
        WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
+       /* posting read */
+       RREG32(SRBM_STATUS);
+
        return 0;
 }
 
@@ -8046,6 +8053,10 @@ restart_ih:
                                break;
                        }
                        break;
+               case 96:
+                       DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+                       WREG32(SRBM_INT_ACK, 0x1);
+                       break;
                case 124: /* UVD */
                        DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
                        radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
index 03003f8a6de63ba00c741824c053070a009cd319..c648e1996dabac449dfb838e018cad85b2d3bb61 100644 (file)
 #define                SOFT_RESET_ORB                          (1 << 23)
 #define                SOFT_RESET_VCE                          (1 << 24)
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 #define VM_L2_CNTL                                     0x1400
 #define                ENABLE_L2_CACHE                                 (1 << 0)
 #define                ENABLE_L2_FRAGMENT_PROCESSING                   (1 << 1)
index 192c8038915187df6714fbd6d653f57dba449c13..3adc2afe32aa6be372abcdd925001bbc79176745 100644 (file)
@@ -26,6 +26,9 @@
 #include "radeon_audio.h"
 #include "sid.h"
 
+#define DCE8_DCCG_AUDIO_DTO1_PHASE     0x05b8
+#define DCE8_DCCG_AUDIO_DTO1_MODULE    0x05bc
+
 u32 dce6_endpoint_rreg(struct radeon_device *rdev,
                              u32 block_offset, u32 reg)
 {
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev,
 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
        struct radeon_crtc *crtc, unsigned int clock)
 {
-    /* Two dtos; generally use dto0 for HDMI */
+       /* Two dtos; generally use dto0 for HDMI */
        u32 value = 0;
 
-    if (crtc)
+       if (crtc)
                value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
 
        WREG32(DCCG_AUDIO_DTO_SOURCE, value);
 
-    /* Express [24MHz / target pixel clock] as an exact rational
-     * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
-     * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
-     */
-    WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
-    WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+       /* Express [24MHz / target pixel clock] as an exact rational
+        * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+        * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+        */
+       WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
+       WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
 }
 
 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
        struct radeon_crtc *crtc, unsigned int clock)
 {
-    /* Two dtos; generally use dto1 for DP */
+       /* Two dtos; generally use dto1 for DP */
        u32 value = 0;
        value |= DCCG_AUDIO_DTO_SEL;
 
-    if (crtc)
+       if (crtc)
                value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
 
        WREG32(DCCG_AUDIO_DTO_SOURCE, value);
 
-    /* Express [24MHz / target pixel clock] as an exact rational
-     * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
-     * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
-     */
-    WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
-    WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+       /* Express [24MHz / target pixel clock] as an exact rational
+        * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+        * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+        */
+       if (ASIC_IS_DCE8(rdev)) {
+               WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+               WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+       } else {
+               WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+               WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+       }
 }
 
-void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       uint32_t offset;
 
        if (!dig || !dig->afmt)
                return;
 
-       offset = dig->afmt->offset;
-
        if (enable) {
-        if (dig->afmt->enabled)
-            return;
-
-               WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
-               WREG32(EVERGREEN_DP_SEC_CNTL + offset,
-                       EVERGREEN_DP_SEC_ASP_ENABLE |           /* Audio packet transmission */
-                       EVERGREEN_DP_SEC_ATP_ENABLE |           /* Audio timestamp packet transmission */
-                       EVERGREEN_DP_SEC_AIP_ENABLE |           /* Audio infoframe packet transmission */
-                       EVERGREEN_DP_SEC_STREAM_ENABLE);        /* Master enable for secondary stream engine */
-               radeon_audio_enable(rdev, dig->afmt->pin, true);
+               WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+                      EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+               WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
+                      EVERGREEN_DP_SEC_ASP_ENABLE |            /* Audio packet transmission */
+                      EVERGREEN_DP_SEC_ATP_ENABLE |            /* Audio timestamp packet transmission */
+                      EVERGREEN_DP_SEC_AIP_ENABLE |            /* Audio infoframe packet transmission */
+                      EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
        } else {
-               if (!dig->afmt->enabled)
-                       return;
-
-               WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
-               radeon_audio_enable(rdev, dig->afmt->pin, false);
+               WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
        }
 
        dig->afmt->enabled = enable;
index 78600f534c804b745b99f7aea8688381b4204182..973df064c14feb193a81c99109665c2186d03dc6 100644 (file)
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 0x1);
+       WREG32(SRBM_INT_ACK, 0x1);
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
        tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
        WREG32(DMA_CNTL, tmp);
        WREG32(GRBM_INT_CNTL, 0);
+       WREG32(SRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
        if (rdev->num_crtc >= 4) {
@@ -4590,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
        WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
        WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
 
+       /* posting read */
+       RREG32(SRBM_STATUS);
+
        return 0;
 }
 
@@ -5066,6 +5072,10 @@ restart_ih:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
                                break;
                        }
+               case 96:
+                       DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+                       WREG32(SRBM_INT_ACK, 0x1);
+                       break;
                case 124: /* UVD */
                        DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
                        radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
index 1d9aebc79595d050dd52d6e28da01939b908bc16..c18d4ecbd95d02baa907d4d15b6da43f282f4e67 100644 (file)
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
 }
 
 void dce4_dp_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                          struct radeon_crtc *crtc, unsigned int clock)
 {
        u32 value;
 
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
        WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
-       WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10);
+       WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
 }
 
 void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
-               HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
-               HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
-
        WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
                AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
 
-       WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
-               HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
-       WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
-               HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
-               HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
        WREG32(AFMT_60958_0 + offset,
                AFMT_60958_CS_CHANNEL_NUMBER_L(1));
 
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
        if (!dig || !dig->afmt)
                return;
 
-       /* Silent, r600_hdmi_enable will raise WARN for us */
-       if (enable && dig->afmt->enabled)
-               return;
-       if (!enable && !dig->afmt->enabled)
-               return;
+       if (enable) {
+               WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
+                      HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+               WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+                      HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+                      HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
 
-       if (!enable && dig->afmt->pin) {
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
-               dig->afmt->pin = NULL;
+               WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+                      HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+                      HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+       } else {
+               WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
        }
 
        dig->afmt->enabled = enable;
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
                  enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
 }
 
-void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       uint32_t offset;
 
        if (!dig || !dig->afmt)
                return;
 
-       offset = dig->afmt->offset;
-
        if (enable) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
                struct radeon_connector_atom_dig *dig_connector;
                uint32_t val;
 
-               if (dig->afmt->enabled)
-                       return;
-
-               WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+               WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+                      EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
 
                if (radeon_connector->con_priv) {
                        dig_connector = radeon_connector->con_priv;
-                       val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset);
+                       val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
                        val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
 
                        if (dig_connector->dp_clock == 162000)
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
                        else
                                val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
 
-                       WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val);
+                       WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
                }
 
-               WREG32(EVERGREEN_DP_SEC_CNTL + offset,
+               WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
                        EVERGREEN_DP_SEC_ASP_ENABLE |           /* Audio packet transmission */
                        EVERGREEN_DP_SEC_ATP_ENABLE |           /* Audio timestamp packet transmission */
                        EVERGREEN_DP_SEC_AIP_ENABLE |           /* Audio infoframe packet transmission */
                        EVERGREEN_DP_SEC_STREAM_ENABLE);        /* Master enable for secondary stream engine */
-               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
        } else {
-               if (!dig->afmt->enabled)
-                       return;
-
-               WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
+               WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
        }
 
        dig->afmt->enabled = enable;
index ee83d2a88750aafb865c30dd89e42c2041700a61..a8d1d5240fcb3088d1ea391ebcc8955c46f8c237 100644 (file)
 #define                SOFT_RESET_REGBB                        (1 << 22)
 #define                SOFT_RESET_ORB                          (1 << 23)
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 /* display watermarks */
 #define        DC_LB_MEMORY_SPLIT                                0x6b0c
 #define        PRIORITY_A_CNT                                    0x6b18
index 24242a7f0ac3d728c4c69366f8af1077ecc25190..dab00812abaabeeeee6295041e730143c99fecba 100644 (file)
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 0x1);
+       WREG32(SRBM_INT_ACK, 0x1);
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        if ((rdev->config.cayman.max_backends_per_se == 1) &&
            (rdev->flags & RADEON_IS_IGP)) {
-               if ((disabled_rb_mask & 3) == 1) {
-                       /* RB0 disabled, RB1 enabled */
-                       tmp = 0x11111111;
-               } else {
+               if ((disabled_rb_mask & 3) == 2) {
                        /* RB1 disabled, RB0 enabled */
                        tmp = 0x00000000;
+               } else {
+                       /* RB0 disabled, RB1 enabled */
+                       tmp = 0x11111111;
                }
        } else {
                tmp = gb_addr_config & NUM_PIPES_MASK;
index ad7125486894d18ae90b0bc507d248d92baaf3e6..6b44580440d09a10053abcb53bf6f6c048d4e063 100644 (file)
 #define                SOFT_RESET_REGBB                        (1 << 22)
 #define                SOFT_RESET_ORB                          (1 << 23)
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 #define        SRBM_STATUS2                                    0x0EC4
 #define                DMA_BUSY                                (1 << 5)
 #define                DMA1_BUSY                               (1 << 6)
index 279801ca5110aff68d80ea452751d7f9b0bf748f..04f2514f756453bfdc7275a52c65549e631cfc6c 100644 (file)
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
                tmp |= RADEON_FP2_DETECT_MASK;
        }
        WREG32(RADEON_GEN_INT_CNTL, tmp);
+
+       /* read back to post the write */
+       RREG32(RADEON_GEN_INT_CNTL);
+
        return 0;
 }
 
index 07a71a2488c93404f0803e3fbefe4c6204a4b5da..2fcad344492f526a98b46b34d2979cd9dd5a872e 100644 (file)
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev)
                WREG32(RV770_CG_THERMAL_INT, thermal_int);
        }
 
+       /* posting read */
+       RREG32(R_000E50_SRBM_STATUS);
+
        return 0;
 }
 
index 843b65f46ece168a8694a8a86a974befda12290b..fa2154493cf1537dee269149b1924468a6035325 100644 (file)
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                        radeon_crtc = to_radeon_crtc(crtc);
                        if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                               vrefresh = radeon_crtc->hw_mode.vrefresh;
+                               vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
                                break;
                        }
                }
index 62c91ed669ce24bdf4f96ea3eb0c7c446e457b1b..dd6606b8e23ca9a3bfd3f7be414b17d01af2aacb 100644 (file)
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
        if (!dig || !dig->afmt)
                return;
 
-       /* Silent, r600_hdmi_enable will raise WARN for us */
-       if (enable && dig->afmt->enabled)
-               return;
-       if (!enable && !dig->afmt->enabled)
-               return;
-
-       if (!enable && dig->afmt->pin) {
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
-               dig->afmt->pin = NULL;
-       }
-
        /* Older chipsets require setting HDMI and routing manually */
        if (!ASIC_IS_DCE3(rdev)) {
                if (enable)
index a3ceef6d9632509b5d1d4cb760539af02a033efe..b21ef69a34ac2a2703f7adad0ee20a15f07d3509 100644 (file)
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
 void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
 void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
-void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
-void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
+void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
 
 static const u32 pin_offsets[7] =
 {
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = {
        .set_avi_packet = evergreen_set_avi_packet,
        .set_audio_packet = dce4_set_audio_packet,
        .mode_set = radeon_audio_dp_mode_set,
-       .dpms = evergreen_enable_dp_audio_packets,
+       .dpms = evergreen_dp_enable,
 };
 
 static struct radeon_audio_funcs dce6_hdmi_funcs = {
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
        .set_avi_packet = evergreen_set_avi_packet,
        .set_audio_packet = dce4_set_audio_packet,
        .mode_set = radeon_audio_dp_mode_set,
-       .dpms = dce6_enable_dp_audio_packets,
+       .dpms = dce6_dp_enable,
 };
 
 static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev,
 }
 
 void radeon_audio_detect(struct drm_connector *connector,
-       enum drm_connector_status status)
+                        enum drm_connector_status status)
 {
        struct radeon_device *rdev;
        struct radeon_encoder *radeon_encoder;
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector,
                else
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
 
-               radeon_audio_write_speaker_allocation(connector->encoder);
-               radeon_audio_write_sad_regs(connector->encoder);
-               if (connector->encoder->crtc)
-                       radeon_audio_write_latency_fields(connector->encoder,
-                               &connector->encoder->crtc->mode);
+               dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
                radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
        } else {
                radeon_audio_enable(rdev, dig->afmt->pin, 0);
+               dig->afmt->pin = NULL;
        }
 }
 
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
  * update the info frames with the data from the current display mode
  */
 static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                      struct drm_display_mode *mode)
 {
-       struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
        if (!dig || !dig->afmt)
                return;
 
-       /* disable audio prior to setting up hw */
-       dig->afmt->pin = radeon_audio_get_pin(encoder);
-       radeon_audio_enable(rdev, dig->afmt->pin, 0);
+       radeon_audio_set_mute(encoder, true);
 
+       radeon_audio_write_speaker_allocation(encoder);
+       radeon_audio_write_sad_regs(encoder);
+       radeon_audio_write_latency_fields(encoder, mode);
        radeon_audio_set_dto(encoder, mode->clock);
        radeon_audio_set_vbi_packet(encoder);
        radeon_hdmi_set_color_depth(encoder);
-       radeon_audio_set_mute(encoder, false);
        radeon_audio_update_acr(encoder, mode->clock);
        radeon_audio_set_audio_packet(encoder);
        radeon_audio_select_pin(encoder);
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
        if (radeon_audio_set_avi_packet(encoder, mode) < 0)
                return;
 
-       /* enable audio after to setting up hw */
-       radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+       radeon_audio_set_mute(encoder, false);
 }
 
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       struct radeon_connector_atom_dig *dig_connector =
+               radeon_connector->con_priv;
 
        if (!dig || !dig->afmt)
                return;
 
-       /* disable audio prior to setting up hw */
-       dig->afmt->pin = radeon_audio_get_pin(encoder);
-       radeon_audio_enable(rdev, dig->afmt->pin, 0);
-
-       radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+       radeon_audio_write_speaker_allocation(encoder);
+       radeon_audio_write_sad_regs(encoder);
+       radeon_audio_write_latency_fields(encoder, mode);
+       if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+               radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+       else
+               radeon_audio_set_dto(encoder, dig_connector->dp_clock);
        radeon_audio_set_audio_packet(encoder);
        radeon_audio_select_pin(encoder);
 
        if (radeon_audio_set_avi_packet(encoder, mode) < 0)
                return;
-
-       /* enable audio after to setting up hw */
-       radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
 }
 
 void radeon_audio_mode_set(struct drm_encoder *encoder,
index c830863bc98aa0cb55e84aedfbbc76606945ee01..4d0f96cc3da4488b0a324058533d26500011293f 100644 (file)
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
        u32 ring = RADEON_CS_RING_GFX;
        s32 priority = 0;
 
+       INIT_LIST_HEAD(&p->validated);
+
        if (!cs->num_chunks) {
                return 0;
        }
+
        /* get chunks */
-       INIT_LIST_HEAD(&p->validated);
        p->idx = 0;
        p->ib.sa_bo = NULL;
        p->const_ib.sa_bo = NULL;
@@ -715,6 +717,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
        struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
        struct radeon_device *rdev = p->rdev;
        uint32_t header;
+       int ret = 0, i;
 
        if (idx >= ib_chunk->length_dw) {
                DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +746,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
                break;
        default:
                DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto dump_ib;
        }
        if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
                DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
                          pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto dump_ib;
        }
        return 0;
+
+dump_ib:
+       for (i = 0; i < ib_chunk->length_dw; i++) {
+               if (i == idx)
+                       printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
+               else
+                       printk("\t0x%08x\n", radeon_get_ib_value(p, i));
+       }
+       return ret;
 }
 
 /**
index 6b670b0bc47bb9dca0238ef35dcff1cc2f686c34..3a297037cc176250fff7b1dae4a2566a8fd3c1ca 100644 (file)
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
                    (rdev->pdev->subsystem_vendor == 0x1734) &&
                    (rdev->pdev->subsystem_device == 0x1107))
                        use_bl = false;
+/* Older PPC macs use on-GPU backlight controller */
+#ifndef CONFIG_PPC_PMAC
                /* disable native backlight control on older asics */
                else if (rdev->family < CHIP_R600)
                        use_bl = false;
+#endif
                else
                        use_bl = true;
        }
index d13d1b5a859f5b4d6aa69adc18cf85a2375398d1..df09ca7c488949896f68ff1d78ad7e152f0fd9a5 100644 (file)
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
        return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
+struct radeon_wait_cb {
+       struct fence_cb base;
+       struct task_struct *task;
+};
+
+static void
+radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+{
+       struct radeon_wait_cb *wait =
+               container_of(cb, struct radeon_wait_cb, base);
+
+       wake_up_process(wait->task);
+}
+
 static signed long radeon_fence_default_wait(struct fence *f, bool intr,
                                             signed long t)
 {
        struct radeon_fence *fence = to_radeon_fence(f);
        struct radeon_device *rdev = fence->rdev;
-       bool signaled;
+       struct radeon_wait_cb cb;
 
-       fence_enable_sw_signaling(&fence->base);
+       cb.task = current;
 
-       /*
-        * This function has to return -EDEADLK, but cannot hold
-        * exclusive_lock during the wait because some callers
-        * may already hold it. This means checking needs_reset without
-        * lock, and not fiddling with any gpu internals.
-        *
-        * The callback installed with fence_enable_sw_signaling will
-        * run before our wait_event_*timeout call, so we will see
-        * both the signaled fence and the changes to needs_reset.
-        */
+       if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+               return t;
+
+       while (t > 0) {
+               if (intr)
+                       set_current_state(TASK_INTERRUPTIBLE);
+               else
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+
+               /*
+                * radeon_test_signaled must be called after
+                * set_current_state to prevent a race with wake_up_process
+                */
+               if (radeon_test_signaled(fence))
+                       break;
+
+               if (rdev->needs_reset) {
+                       t = -EDEADLK;
+                       break;
+               }
+
+               t = schedule_timeout(t);
+
+               if (t > 0 && intr && signal_pending(current))
+                       t = -ERESTARTSYS;
+       }
+
+       __set_current_state(TASK_RUNNING);
+       fence_remove_callback(f, &cb.base);
 
-       if (intr)
-               t = wait_event_interruptible_timeout(rdev->fence_queue,
-                       ((signaled = radeon_test_signaled(fence)) ||
-                        rdev->needs_reset), t);
-       else
-               t = wait_event_timeout(rdev->fence_queue,
-                       ((signaled = radeon_test_signaled(fence)) ||
-                        rdev->needs_reset), t);
-
-       if (t > 0 && !signaled)
-               return -EDEADLK;
        return t;
 }
 
index 9f758d39420dd4affddb42116a09e695a17b6abe..33cf4108386dbba4ef70a0e372eb992d1ff7e4d3 100644 (file)
@@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
                        single_display = false;
        }
 
+       /* 120hz tends to be problematic even if they are under the
+        * vblank limit.
+        */
+       if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
+               single_display = false;
+
        /* certain older asics have a separare 3D performance state,
         * so try that first if the user selected performance
         */
index d81182ad53ec6920b4f7027595882cebf26cdcfc..97a904835759f7876b2b69be104493cf302554a7 100644 (file)
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev)
        WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
        if (ASIC_IS_DCE2(rdev))
                WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+
+       /* posting read */
+       RREG32(R_000040_GEN_INT_CNTL);
+
        return 0;
 }
 
index 73107fe9e46f7de25d1d22b3ad3ba0b28f40c8f0..a7fb2735d4a929b7a20127bce4d7e3c759de2449 100644 (file)
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        }
 
        WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 1);
+       WREG32(SRBM_INT_ACK, 1);
 
        evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
                switch (pkt.type) {
                case RADEON_PACKET_TYPE0:
                        dev_err(rdev->dev, "Packet0 not allowed!\n");
-                       for (i = 0; i < ib->length_dw; i++) {
-                               if (i == idx)
-                                       printk("\t0x%08x <---\n", ib->ptr[i]);
-                               else
-                                       printk("\t0x%08x\n", ib->ptr[i]);
-                       }
                        ret = -EINVAL;
                        break;
                case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
                        ret = -EINVAL;
                        break;
                }
-               if (ret)
+               if (ret) {
+                       for (i = 0; i < ib->length_dw; i++) {
+                               if (i == idx)
+                                       printk("\t0x%08x <---\n", ib->ptr[i]);
+                               else
+                                       printk("\t0x%08x\n", ib->ptr[i]);
+                       }
                        break;
+               }
        } while (idx < ib->length_dw);
 
        return ret;
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
        WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
        WREG32(GRBM_INT_CNTL, 0);
+       WREG32(SRBM_INT_CNTL, 0);
        if (rdev->num_crtc >= 2) {
                WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
                WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6199,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev)
 
        WREG32(CG_THERMAL_INT, thermal_int);
 
+       /* posting read */
+       RREG32(SRBM_STATUS);
+
        return 0;
 }
 
@@ -6609,6 +6616,10 @@ restart_ih:
                                break;
                        }
                        break;
+               case 96:
+                       DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+                       WREG32(SRBM_INT_ACK, 0x1);
+                       break;
                case 124: /* UVD */
                        DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
                        radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
@@ -7119,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
        WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
 
        if (!vclk || !dclk) {
-               /* keep the Bypass mode, put PLL to sleep */
-               WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+               /* keep the Bypass mode */
                return 0;
        }
 
@@ -7136,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
        /* set VCO_MODE to 1 */
        WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
 
-       /* toggle UPLL_SLEEP to 1 then back to 0 */
-       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+       /* disable sleep mode */
        WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
 
        /* deassert UPLL_RESET */
index cbd91d226f3ce232b5933686ad41cae3ace64ebe..99a9835c9f615919c063af1ce0ca0e596aed8c15 100644 (file)
 #define        CC_SYS_RB_BACKEND_DISABLE                       0xe80
 #define        GC_USER_SYS_RB_BACKEND_DISABLE                  0xe84
 
+#define SRBM_READ_ERROR                                        0xE98
+#define SRBM_INT_CNTL                                  0xEA0
+#define SRBM_INT_ACK                                   0xEA8
+
 #define        SRBM_STATUS2                                    0x0EC4
 #define                DMA_BUSY                                (1 << 5)
 #define                DMA1_BUSY                               (1 << 6)
 
 #define DCCG_AUDIO_DTO0_PHASE                           0x05b0
 #define DCCG_AUDIO_DTO0_MODULE                          0x05b4
-#define DCCG_AUDIO_DTO1_PHASE                           0x05b8
-#define DCCG_AUDIO_DTO1_MODULE                          0x05bc
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
 
 #define AFMT_AUDIO_SRC_CONTROL                          0x713c
 #define                AFMT_AUDIO_SRC_SELECT(x)                (((x) & 7) << 0)
index 3aaa84ae26811fb8c731a89e76b91297b1ff3bf3..1a52522f5da76790dae32fe1a423bd75eaddd1e4 100644 (file)
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
        crtc->state = NULL;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (state)
+       if (state) {
                crtc->state = &state->base;
+               crtc->state->crtc = crtc;
+       }
 }
 
 static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
                return NULL;
 
        copy->base.mode_changed = false;
+       copy->base.active_changed = false;
        copy->base.planes_changed = false;
        copy->base.event = NULL;
 
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
        /* program display mode */
        tegra_dc_set_timings(dc, mode);
 
-       if (dc->soc->supports_border_color)
-               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
        /* interlacing isn't supported yet, so disable it */
        if (dc->soc->supports_interlacing) {
                value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
 
 static void tegra_crtc_prepare(struct drm_crtc *crtc)
 {
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-       unsigned int syncpt;
-       unsigned long value;
-
        drm_crtc_vblank_off(crtc);
-
-       if (dc->pipe)
-               syncpt = SYNCPT_VBLANK1;
-       else
-               syncpt = SYNCPT_VBLANK0;
-
-       /* initialize display controller */
-       tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-       tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
-       /* initialize timer */
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
-               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
-               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
-       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
 }
 
 static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
        struct tegra_drm *tegra = drm->dev_private;
        struct drm_plane *primary = NULL;
        struct drm_plane *cursor = NULL;
+       unsigned int syncpt;
+       u32 value;
        int err;
 
        if (tegra->domain) {
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
                goto cleanup;
        }
 
+       /* initialize display controller */
+       if (dc->pipe)
+               syncpt = SYNCPT_VBLANK1;
+       else
+               syncpt = SYNCPT_VBLANK0;
+
+       tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+       tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+       /* initialize timer */
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+       if (dc->soc->supports_border_color)
+               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
        return 0;
 
 cleanup:
index 7e06657ae58bc1eb2202e7da6c193eb4c8ca862b..7eaaee74a039f36c180629b2233f8455d88b1b6a 100644 (file)
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
        h_back_porch = mode->htotal - mode->hsync_end;
        h_front_porch = mode->hsync_start - mode->hdisplay;
 
+       err = clk_set_rate(hdmi->clk, pclk);
+       if (err < 0) {
+               dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
+                       err);
+       }
+
+       DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
+
        /* power up sequence */
        value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
        value &= ~SOR_PLL_PDBG;
index d395b0bef73b0ce8afffa1d40fb9ea47022ac7fd..8d9b7de2561339b03e2b191e45454eadfd454668 100644 (file)
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
        pr_err("    has_type: %d\n", man->has_type);
        pr_err("    use_type: %d\n", man->use_type);
        pr_err("    flags: 0x%08X\n", man->flags);
-       pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
+       pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
        pr_err("    size: %llu\n", man->size);
        pr_err("    available_caching: 0x%08X\n", man->available_caching);
        pr_err("    default_caching: 0x%08X\n", man->default_caching);
index 6c6b655defcf4eac679913e70896810208dfd6ce..e13b9cbc304e9d17a0d5ff55179b2d8b7f93ca7b 100644 (file)
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_err1;
        }
 
-       ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
-                            (dev_priv->vram_size >> PAGE_SHIFT));
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed initializing memory manager for VRAM.\n");
-               goto out_err2;
-       }
-
-       dev_priv->has_gmr = true;
-       if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
-           refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-                                        VMW_PL_GMR) != 0) {
-               DRM_INFO("No GMR memory available. "
-                        "Graphics memory resources are very limited.\n");
-               dev_priv->has_gmr = false;
-       }
-
-       if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-               dev_priv->has_mob = true;
-               if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
-                                  VMW_PL_MOB) != 0) {
-                       DRM_INFO("No MOB memory available. "
-                                "3D will be disabled.\n");
-                       dev_priv->has_mob = false;
-               }
-       }
-
        dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
                                               dev_priv->mmio_size);
 
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_no_fman;
        }
 
+
+       ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+                            (dev_priv->vram_size >> PAGE_SHIFT));
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+               goto out_no_vram;
+       }
+
+       dev_priv->has_gmr = true;
+       if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+           refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+                                        VMW_PL_GMR) != 0) {
+               DRM_INFO("No GMR memory available. "
+                        "Graphics memory resources are very limited.\n");
+               dev_priv->has_gmr = false;
+       }
+
+       if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+               dev_priv->has_mob = true;
+               if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+                                  VMW_PL_MOB) != 0) {
+                       DRM_INFO("No MOB memory available. "
+                                "3D will be disabled.\n");
+                       dev_priv->has_mob = false;
+               }
+       }
+
        vmw_kms_save_vga(dev_priv);
 
        /* Start kms and overlay systems, needs fifo. */
@@ -838,6 +839,12 @@ out_no_fifo:
        vmw_kms_close(dev_priv);
 out_no_kms:
        vmw_kms_restore_vga(dev_priv);
+       if (dev_priv->has_mob)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+       if (dev_priv->has_gmr)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+       (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_no_vram:
        vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -853,12 +860,6 @@ out_err4:
        iounmap(dev_priv->mmio_virt);
 out_err3:
        arch_phys_wc_del(dev_priv->mmio_mtrr);
-       if (dev_priv->has_mob)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-       if (dev_priv->has_gmr)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-       (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
-out_err2:
        (void)ttm_bo_device_release(&dev_priv->bdev);
 out_err1:
        vmw_ttm_global_release(dev_priv);
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
        }
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
+
+       if (dev_priv->has_mob)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+       if (dev_priv->has_gmr)
+               (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+       (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+
        vmw_fence_manager_takedown(dev_priv->fman);
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
                drm_irq_uninstall(dev_priv->dev);
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
        ttm_object_device_release(&dev_priv->tdev);
        iounmap(dev_priv->mmio_virt);
        arch_phys_wc_del(dev_priv->mmio_mtrr);
-       if (dev_priv->has_mob)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-       if (dev_priv->has_gmr)
-               (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-       (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
        (void)ttm_bo_device_release(&dev_priv->bdev);
        vmw_ttm_global_release(dev_priv);
 
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
 
+       pci_disable_device(pdev);
        drm_put_dev(dev);
 }
 
index 33176d05db3542903f1c919b59f68cad2da22044..654c8daeb5ab3d0dd84a2ed1d32af633d6955ac9 100644 (file)
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use MOB buffer.\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_no_reloc;
        }
        bo = &vmw_bo->base;
 
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 
 out_no_reloc:
        vmw_dmabuf_unreference(&vmw_bo);
-       vmw_bo_p = NULL;
+       *vmw_bo_p = NULL;
        return ret;
 }
 
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use GMR region.\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_no_reloc;
        }
        bo = &vmw_bo->base;
 
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 
 out_no_reloc:
        vmw_dmabuf_unreference(&vmw_bo);
-       vmw_bo_p = NULL;
+       *vmw_bo_p = NULL;
        return ret;
 }
 
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
                                  NULL, arg->command_size, arg->throttle_us,
                                  (void __user *)(unsigned long)arg->fence_rep,
                                  NULL);
-
+       ttm_read_unlock(&dev_priv->reservation_sem);
        if (unlikely(ret != 0))
-               goto out_unlock;
+               return ret;
 
        vmw_kms_cursor_post_execbuf(dev_priv);
 
-out_unlock:
-       ttm_read_unlock(&dev_priv->reservation_sem);
-       return ret;
+       return 0;
 }
index 8725b79e7847d68239a25413c482883e44024704..07cda8cbbddbcb5e6f56127c57dac0e683541fb9 100644 (file)
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        int i;
        struct drm_mode_config *mode_config = &dev->mode_config;
 
-       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-       if (unlikely(ret != 0))
-               return ret;
-
        if (!arg->num_outputs) {
                struct drm_vmw_rect def_rect = {0, 0, 800, 600};
                vmw_du_update_layout(dev_priv, 1, &def_rect);
-               goto out_unlock;
+               return 0;
        }
 
        rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
        rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
                        GFP_KERNEL);
-       if (unlikely(!rects)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (unlikely(!rects))
+               return -ENOMEM;
 
        user_rects = (void __user *)(unsigned long)arg->rects;
        ret = copy_from_user(rects, user_rects, rects_size);
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
 
 out_free:
        kfree(rects);
-out_unlock:
-       ttm_read_unlock(&dev_priv->reservation_sem);
        return ret;
 }
index b61d6be97602222d3ae51b39d939d00cf8f5731b..3ddfb3d0b64d266cb95345ccb68c1a991521cc1d 100644 (file)
@@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di,
 
                clkrate = clk_get_rate(di->clk_ipu);
                div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
+               if (div == 0)
+                       div = 1;
                rate = clkrate / div;
 
                error = rate / (sig->mode.pixelclock / 1000);
index db4fb6e1cc5b3ca83d14d3f1e0e52e66fe75979a..56ce8c2b5530db20d851b585fa0c4f88ef81afa6 100644 (file)
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 #endif
 #if IS_ENABLED(CONFIG_HID_SAITEK)
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
@@ -1957,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
index 46edb4d3ed28efc61582811ea6b5fdeac2a2808a..9c4786759f16f4f397a839214600524b9826254c 100644 (file)
 #define USB_VENDOR_ID_LOGITECH         0x046d
 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
 #define USB_DEVICE_ID_LOGITECH_T651    0xb00c
+#define USB_DEVICE_ID_LOGITECH_C077    0xc007
 #define USB_DEVICE_ID_LOGITECH_RECEIVER        0xc101
 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
 #define USB_DEVICE_ID_MS_LK6K          0x00f9
 #define USB_DEVICE_ID_MS_PRESENTER_8K_BT       0x0701
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB      0x0713
+#define USB_DEVICE_ID_MS_NE7K          0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K      0x0730
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500    0x076c
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
 #define USB_VENDOR_ID_SAITEK           0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
+#define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
 
 #define USB_VENDOR_ID_TIVO             0x150a
 #define USB_DEVICE_ID_TIVO_SLIDE_BT    0x1200
 #define USB_DEVICE_ID_TIVO_SLIDE       0x1201
+#define USB_DEVICE_ID_TIVO_SLIDE_PRO   0x1203
 
 #define USB_VENDOR_ID_TOPSEED          0x0766
 #define USB_DEVICE_ID_TOPSEED_CYBERLINK        0x0204
index fbaea6eb882e21afb6cba576279834a099780434..af935eb198c93549867c4e50bb48a997e5cd3f2b 100644 (file)
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
                .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
                .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
+               .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
                .driver_data = MS_ERGONOMY | MS_RDESC },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
index 5632c54eadf0206faee31afefc99235763c37e14..a014f21275d8bfada33701b4bef5013f3b81eb2a 100644 (file)
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
 static const struct hid_device_id saitek_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
                .driver_data = SAITEK_FIX_PS1000 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 6a58b6c723aa215408051e2b3c967205569a7b94..e54ce1097e2cc57f5049cf852016087a32534c47 100644 (file)
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                        (collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
                                callback->hsdev->end_collection_index)) {
                        *priv = callback->priv;
                        *hsdev = callback->hsdev;
-                       spin_unlock(&pdata->dyn_callback_lock);
+                       spin_unlock_irqrestore(&pdata->dyn_callback_lock,
+                                              flags);
                        return callback->usage_callback;
                }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return NULL;
 }
index 31e9d25611064d0500eba47ea0b710beac31bb0b..1896c019e302934aa13c6f9ac434c51637f450ce 100644 (file)
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
 #define DS4_REPORT_0x81_SIZE 7
 #define SIXAXIS_REPORT_0xF2_SIZE 18
 
-static spinlock_t sony_dev_list_lock;
+static DEFINE_SPINLOCK(sony_dev_list_lock);
 static LIST_HEAD(sony_device_list);
 static DEFINE_IDA(sony_device_id_allocator);
 
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return -ENOMEM;
        }
 
+       spin_lock_init(&sc->lock);
+
        sc->quirks = quirks;
        hid_set_drvdata(hdev, sc);
        sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
 {
        dbg_hid("Sony:%s\n", __func__);
 
-       ida_destroy(&sony_device_id_allocator);
        hid_unregister_driver(&sony_driver);
+       ida_destroy(&sony_device_id_allocator);
 }
 module_init(sony_init);
 module_exit(sony_exit);
index d790d8d71f7fc6829f405b7665cc37f0af1bf5a8..d9869692745399b0406dc62b66c1269295ee9835 100644 (file)
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = {
        /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, tivo_devices);
index d43e967e75339ec7972e734e284c4356e31a4e38..36053f33d6d93e97009b0d6ba3f4aa5416be8fea 100644 (file)
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
        int ret, ret_size;
-       int size = ihid->bufsize;
+       int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+       if (size > ihid->bufsize)
+               size = ihid->bufsize;
 
        ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
        if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
        dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
 
        ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
-                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                       IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                        client->name, ihid);
        if (ret < 0) {
                dev_warn(&client->dev,
index 9be99a67bfe2ec9f042e765904913628f8c2b654..a821277534611708973ccf080ad78f4136ef0df5 100644 (file)
@@ -78,6 +78,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
index 1a6507999a6534f0b851e209bb702696d1f50e58..bbe32d66e5000157b4d3670c23350ff3fa1a0104 100644 (file)
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
           (features->type == CINTIQ && !(data[1] & 0x40)))
                return 1;
 
-       if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
+       if (wacom->shared) {
                wacom->shared->stylus_in_proximity = true;
 
+               if (wacom->shared->touch_down)
+                       return 1;
+       }
+
        /* in Range while exiting */
        if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
                input_report_key(input, BTN_TOUCH, 0);
@@ -778,6 +782,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
                        input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
                        input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
+                       if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
                } else if (features->type == CINTIQ_HYBRID) {
                        /*
                         * Do not send hardware buttons under Android. They
@@ -1038,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
        struct input_dev *input = wacom->input;
        unsigned char *data = wacom->data;
        int i;
-       int current_num_contacts = 0;
+       int current_num_contacts = data[61];
        int contacts_to_send = 0;
        int num_contacts_left = 4; /* maximum contacts per packet */
        int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
        int y_offset = 2;
+       static int contact_with_no_pen_down_count = 0;
 
        if (wacom->features.type == WACOM_27QHDT) {
                current_num_contacts = data[63];
                num_contacts_left = 10;
                byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
                y_offset = 0;
-       } else {
-               current_num_contacts = data[61];
        }
 
        /*
         * First packet resets the counter since only the first
         * packet in series will have non-zero current_num_contacts.
         */
-       if (current_num_contacts)
+       if (current_num_contacts) {
                wacom->num_contacts_left = current_num_contacts;
+               contact_with_no_pen_down_count = 0;
+       }
 
        contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
 
@@ -1091,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
                                input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
                                input_report_abs(input, ABS_MT_ORIENTATION, w > h);
                        }
+                       contact_with_no_pen_down_count++;
                }
        }
        input_mt_report_pointer_emulation(input, true);
 
        wacom->num_contacts_left -= contacts_to_send;
-       if (wacom->num_contacts_left <= 0)
+       if (wacom->num_contacts_left <= 0) {
                wacom->num_contacts_left = 0;
-
-       wacom->shared->touch_down = (wacom->num_contacts_left > 0);
+               wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
+       }
        return 1;
 }
 
@@ -1111,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
        int current_num_contacts = data[2];
        int contacts_to_send = 0;
        int x_offset = 0;
+       static int contact_with_no_pen_down_count = 0;
 
        /* MTTPC does not support Height and Width */
        if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
@@ -1120,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
         * First packet resets the counter since only the first
         * packet in series will have non-zero current_num_contacts.
         */
-       if (current_num_contacts)
+       if (current_num_contacts) {
                wacom->num_contacts_left = current_num_contacts;
+               contact_with_no_pen_down_count = 0;
+       }
 
        /* There are at most 5 contacts per packet */
        contacts_to_send = min(5, wacom->num_contacts_left);
@@ -1142,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
                        int y = get_unaligned_le16(&data[offset + x_offset + 9]);
                        input_report_abs(input, ABS_MT_POSITION_X, x);
                        input_report_abs(input, ABS_MT_POSITION_Y, y);
+                       contact_with_no_pen_down_count++;
                }
        }
        input_mt_report_pointer_emulation(input, true);
 
        wacom->num_contacts_left -= contacts_to_send;
-       if (wacom->num_contacts_left < 0)
+       if (wacom->num_contacts_left <= 0) {
                wacom->num_contacts_left = 0;
-
-       wacom->shared->touch_down = (wacom->num_contacts_left > 0);
+               wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
+       }
        return 1;
 }
 
@@ -1188,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
 {
        unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
-       bool prox;
+       bool prox = !wacom->shared->stylus_in_proximity;
        int x = 0, y = 0;
 
        if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
                return 0;
 
-       if (!wacom->shared->stylus_in_proximity) {
-               if (len == WACOM_PKGLEN_TPC1FG) {
-                       prox = data[0] & 0x01;
-                       x = get_unaligned_le16(&data[1]);
-                       y = get_unaligned_le16(&data[3]);
-               } else if (len == WACOM_PKGLEN_TPC1FG_B) {
-                       prox = data[2] & 0x01;
-                       x = get_unaligned_le16(&data[3]);
-                       y = get_unaligned_le16(&data[5]);
-               } else {
-                       prox = data[1] & 0x01;
-                       x = le16_to_cpup((__le16 *)&data[2]);
-                       y = le16_to_cpup((__le16 *)&data[4]);
-               }
-       } else
-               /* force touch out when pen is in prox */
-               prox = 0;
+       if (len == WACOM_PKGLEN_TPC1FG) {
+               prox = prox && (data[0] & 0x01);
+               x = get_unaligned_le16(&data[1]);
+               y = get_unaligned_le16(&data[3]);
+       } else if (len == WACOM_PKGLEN_TPC1FG_B) {
+               prox = prox && (data[2] & 0x01);
+               x = get_unaligned_le16(&data[3]);
+               y = get_unaligned_le16(&data[5]);
+       } else {
+               prox = prox && (data[1] & 0x01);
+               x = le16_to_cpup((__le16 *)&data[2]);
+               y = le16_to_cpup((__le16 *)&data[4]);
+       }
 
        if (prox) {
                input_report_abs(input, ABS_X, x);
@@ -1608,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
        struct input_dev *pad_input = wacom->pad_input;
        unsigned char *data = wacom->data;
        int i;
+       int contact_with_no_pen_down_count = 0;
 
        if (data[0] != 0x02)
            return 0;
@@ -1635,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
                        }
                        input_report_abs(input, ABS_MT_POSITION_X, x);
                        input_report_abs(input, ABS_MT_POSITION_Y, y);
+                       contact_with_no_pen_down_count++;
                }
        }
 
@@ -1644,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
        input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
        input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
        input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
+       wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
 
        return 1;
 }
 
-static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count)
 {
        struct wacom_features *features = &wacom->features;
        struct input_dev *input = wacom->input;
@@ -1656,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
        int slot = input_mt_get_slot_by_key(input, data[0]);
 
        if (slot < 0)
-               return;
+               return 0;
 
        touch = touch && !wacom->shared->stylus_in_proximity;
 
@@ -1688,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
                input_report_abs(input, ABS_MT_POSITION_Y, y);
                input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
                input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
+               last_touch_count++;
        }
+       return last_touch_count;
 }
 
 static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1713,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
        unsigned char *data = wacom->data;
        int count = data[1] & 0x07;
        int i;
+       int contact_with_no_pen_down_count = 0;
 
        if (data[0] != 0x02)
            return 0;
@@ -1723,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
                int msg_id = data[offset];
 
                if (msg_id >= 2 && msg_id <= 17)
-                       wacom_bpt3_touch_msg(wacom, data + offset);
+                       contact_with_no_pen_down_count = 
+                           wacom_bpt3_touch_msg(wacom, data + offset,
+                                                contact_with_no_pen_down_count);
                else if (msg_id == 128)
                        wacom_bpt3_button_msg(wacom, data + offset);
 
        }
        input_mt_report_pointer_emulation(input, true);
+       wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
 
        return 1;
 }
@@ -1754,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
                return 0;
        }
 
+       if (wacom->shared->touch_down)
+               return 0;
+
        prox = (data[1] & 0x20) == 0x20;
 
        /*
@@ -2725,9 +2748,9 @@ static const struct wacom_features wacom_features_0xF6 =
          .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
          .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x32A =
-       { "Wacom Cintiq 27QHD", 119740, 67520, 2047,
-         63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
-         WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+       { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+         WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x32B =
        { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
          WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
index bce4e9ff21bff76606484f0a04ad1bf52b1ffee2..6c99ee7bafa3fdf47d6479b7198697ec835a23e8 100644 (file)
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client,
                                                    &ads2830_regmap_config);
        }
 
+       if (IS_ERR(data->regmap))
+               return PTR_ERR(data->regmap);
+
        data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
        if (!diff_input)
                data->cmd_byte |= ADS7828_CMD_SD_SE;
index ab838d9e28b6389dc6d97dc633ea6259d2126ca3..22da9c2ffa2250cad9a7172bdbc94890d659a744 100644 (file)
@@ -79,7 +79,7 @@ config I2C_AMD8111
 
 config I2C_HIX5HD2
        tristate "Hix5hd2 high-speed I2C driver"
-       depends on ARCH_HIX5HD2
+       depends on ARCH_HIX5HD2 || COMPILE_TEST
        help
          Say Y here to include support for high-speed I2C controller in the
          Hisilicon based hix5hd2 SoCs.
@@ -372,6 +372,16 @@ config I2C_BCM2835
          This support is also available as a module.  If so, the module
          will be called i2c-bcm2835.
 
+config I2C_BCM_IPROC
+       tristate "Broadcom iProc I2C controller"
+       depends on ARCH_BCM_IPROC || COMPILE_TEST
+       default ARCH_BCM_IPROC
+       help
+         If you say yes to this option, support will be included for the
+         Broadcom iProc I2C controller.
+
+         If you don't know what to do here, say N.
+
 config I2C_BCM_KONA
        tristate "BCM Kona I2C adapter"
        depends on ARCH_BCM_MOBILE
@@ -465,6 +475,16 @@ config I2C_DESIGNWARE_PCI
          This driver can also be built as a module.  If so, the module
          will be called i2c-designware-pci.
 
+config I2C_DESIGNWARE_BAYTRAIL
+       bool "Intel Baytrail I2C semaphore support"
+       depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
+       help
+         This driver enables managed host access to the PMIC I2C bus on select
+         Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
+         the host to request uninterrupted access to the PMIC's I2C bus from
+         the platform firmware controlling it. You should say Y if running on
+         a BayTrail system using the AXP288.
+
 config I2C_EFM32
        tristate "EFM32 I2C controller"
        depends on ARCH_EFM32 || COMPILE_TEST
index 56388f658d2f2567cbcf4b38433212c94d0d0faf..3638feb6677e1d6d7991b6d0d831ebc1b99c2e11 100644 (file)
@@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91)                += i2c-at91.o
 obj-$(CONFIG_I2C_AU1550)       += i2c-au1550.o
 obj-$(CONFIG_I2C_AXXIA)                += i2c-axxia.o
 obj-$(CONFIG_I2C_BCM2835)      += i2c-bcm2835.o
+obj-$(CONFIG_I2C_BCM_IPROC)    += i2c-bcm-iproc.o
 obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CADENCE)      += i2c-cadence.o
 obj-$(CONFIG_I2C_CBUS_GPIO)    += i2c-cbus-gpio.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_I2C_DAVINCI)     += i2c-davinci.o
 obj-$(CONFIG_I2C_DESIGNWARE_CORE)      += i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)  += i2c-designware-platform.o
 i2c-designware-platform-objs := i2c-designware-platdrv.o
+i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
 obj-$(CONFIG_I2C_DESIGNWARE_PCI)       += i2c-designware-pci.o
 i2c-designware-pci-objs := i2c-designware-pcidrv.o
 obj-$(CONFIG_I2C_EFM32)                += i2c-efm32.o
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
new file mode 100644 (file)
index 0000000..d3c8915
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CFG_OFFSET                   0x00
+#define CFG_RESET_SHIFT              31
+#define CFG_EN_SHIFT                 30
+#define CFG_M_RETRY_CNT_SHIFT        16
+#define CFG_M_RETRY_CNT_MASK         0x0f
+
+#define TIM_CFG_OFFSET               0x04
+#define TIM_CFG_MODE_400_SHIFT       31
+
+#define M_FIFO_CTRL_OFFSET           0x0c
+#define M_FIFO_RX_FLUSH_SHIFT        31
+#define M_FIFO_TX_FLUSH_SHIFT        30
+#define M_FIFO_RX_CNT_SHIFT          16
+#define M_FIFO_RX_CNT_MASK           0x7f
+#define M_FIFO_RX_THLD_SHIFT         8
+#define M_FIFO_RX_THLD_MASK          0x3f
+
+#define M_CMD_OFFSET                 0x30
+#define M_CMD_START_BUSY_SHIFT       31
+#define M_CMD_STATUS_SHIFT           25
+#define M_CMD_STATUS_MASK            0x07
+#define M_CMD_STATUS_SUCCESS         0x0
+#define M_CMD_STATUS_LOST_ARB        0x1
+#define M_CMD_STATUS_NACK_ADDR       0x2
+#define M_CMD_STATUS_NACK_DATA       0x3
+#define M_CMD_STATUS_TIMEOUT         0x4
+#define M_CMD_PROTOCOL_SHIFT         9
+#define M_CMD_PROTOCOL_MASK          0xf
+#define M_CMD_PROTOCOL_BLK_WR        0x7
+#define M_CMD_PROTOCOL_BLK_RD        0x8
+#define M_CMD_PEC_SHIFT              8
+#define M_CMD_RD_CNT_SHIFT           0
+#define M_CMD_RD_CNT_MASK            0xff
+
+#define IE_OFFSET                    0x38
+#define IE_M_RX_FIFO_FULL_SHIFT      31
+#define IE_M_RX_THLD_SHIFT           30
+#define IE_M_START_BUSY_SHIFT        28
+
+#define IS_OFFSET                    0x3c
+#define IS_M_RX_FIFO_FULL_SHIFT      31
+#define IS_M_RX_THLD_SHIFT           30
+#define IS_M_START_BUSY_SHIFT        28
+
+#define M_TX_OFFSET                  0x40
+#define M_TX_WR_STATUS_SHIFT         31
+#define M_TX_DATA_SHIFT              0
+#define M_TX_DATA_MASK               0xff
+
+#define M_RX_OFFSET                  0x44
+#define M_RX_STATUS_SHIFT            30
+#define M_RX_STATUS_MASK             0x03
+#define M_RX_PEC_ERR_SHIFT           29
+#define M_RX_DATA_SHIFT              0
+#define M_RX_DATA_MASK               0xff
+
+#define I2C_TIMEOUT_MESC             100
+#define M_TX_RX_FIFO_SIZE            64
+
+enum bus_speed_index {
+       I2C_SPD_100K = 0,
+       I2C_SPD_400K,
+};
+
+struct bcm_iproc_i2c_dev {
+       struct device *device;
+       int irq;
+
+       void __iomem *base;
+
+       struct i2c_adapter adapter;
+
+       struct completion done;
+       int xfer_is_done;
+};
+
+/*
+ * Can be expanded in the future if more interrupt status bits are utilized
+ */
+#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
+
+static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = data;
+       u32 status = readl(iproc_i2c->base + IS_OFFSET);
+
+       status &= ISR_MASK;
+
+       if (!status)
+               return IRQ_NONE;
+
+       writel(status, iproc_i2c->base + IS_OFFSET);
+       iproc_i2c->xfer_is_done = 1;
+       complete_all(&iproc_i2c->done);
+
+       return IRQ_HANDLED;
+}
+
+static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                     struct i2c_msg *msg)
+{
+       u32 val;
+
+       val = readl(iproc_i2c->base + M_CMD_OFFSET);
+       val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
+
+       switch (val) {
+       case M_CMD_STATUS_SUCCESS:
+               return 0;
+
+       case M_CMD_STATUS_LOST_ARB:
+               dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+               return -EAGAIN;
+
+       case M_CMD_STATUS_NACK_ADDR:
+               dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+               return -ENXIO;
+
+       case M_CMD_STATUS_NACK_DATA:
+               dev_dbg(iproc_i2c->device, "NAK data\n");
+               return -ENXIO;
+
+       case M_CMD_STATUS_TIMEOUT:
+               dev_dbg(iproc_i2c->device, "bus timeout\n");
+               return -ETIMEDOUT;
+
+       default:
+               dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+               return -EIO;
+       }
+}
+
+static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                        struct i2c_msg *msg)
+{
+       int ret, i;
+       u8 addr;
+       u32 val;
+       unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
+
+       /* need to reserve one byte in the FIFO for the slave address */
+       if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
+               dev_err(iproc_i2c->device,
+                       "only support data length up to %u bytes\n",
+                       M_TX_RX_FIFO_SIZE - 1);
+               return -EOPNOTSUPP;
+       }
+
+       /* check if bus is busy */
+       if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
+              BIT(M_CMD_START_BUSY_SHIFT))) {
+               dev_warn(iproc_i2c->device, "bus is busy\n");
+               return -EBUSY;
+       }
+
+       /* format and load slave address into the TX FIFO */
+       addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
+       writel(addr, iproc_i2c->base + M_TX_OFFSET);
+
+       /* for a write transaction, load data into the TX FIFO */
+       if (!(msg->flags & I2C_M_RD)) {
+               for (i = 0; i < msg->len; i++) {
+                       val = msg->buf[i];
+
+                       /* mark the last byte */
+                       if (i == msg->len - 1)
+                               val |= 1 << M_TX_WR_STATUS_SHIFT;
+
+                       writel(val, iproc_i2c->base + M_TX_OFFSET);
+               }
+       }
+
+       /* mark as incomplete before starting the transaction */
+       reinit_completion(&iproc_i2c->done);
+       iproc_i2c->xfer_is_done = 0;
+
+       /*
+        * Enable the "start busy" interrupt, which will be triggered after the
+        * transaction is done, i.e., the internal start_busy bit, transitions
+        * from 1 to 0.
+        */
+       writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
+
+       /*
+        * Now we can activate the transfer. For a read operation, specify the
+        * number of bytes to read
+        */
+       val = 1 << M_CMD_START_BUSY_SHIFT;
+       if (msg->flags & I2C_M_RD) {
+               val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
+                      (msg->len << M_CMD_RD_CNT_SHIFT);
+       } else {
+               val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
+       }
+       writel(val, iproc_i2c->base + M_CMD_OFFSET);
+
+       time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
+
+       /* disable all interrupts */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+       /* read it back to flush the write */
+       readl(iproc_i2c->base + IE_OFFSET);
+
+       /* make sure the interrupt handler isn't running */
+       synchronize_irq(iproc_i2c->irq);
+
+       if (!time_left && !iproc_i2c->xfer_is_done) {
+               dev_err(iproc_i2c->device, "transaction timed out\n");
+
+               /* flush FIFOs */
+               val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+                     (1 << M_FIFO_TX_FLUSH_SHIFT);
+               writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+               return -ETIMEDOUT;
+       }
+
+       ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
+       if (ret) {
+               /* flush both TX/RX FIFOs */
+               val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+                     (1 << M_FIFO_TX_FLUSH_SHIFT);
+               writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+               return ret;
+       }
+
+       /*
+        * For a read operation, we now need to load the data from FIFO
+        * into the memory buffer
+        */
+       if (msg->flags & I2C_M_RD) {
+               for (i = 0; i < msg->len; i++) {
+                       msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
+                                     M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+               }
+       }
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
+                             struct i2c_msg msgs[], int num)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
+       int ret, i;
+
+       /* go through all messages */
+       for (i = 0; i < num; i++) {
+               ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
+               if (ret) {
+                       dev_dbg(iproc_i2c->device, "xfer failed\n");
+                       return ret;
+               }
+       }
+
+       return num;
+}
+
+static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm bcm_iproc_algo = {
+       .master_xfer = bcm_iproc_i2c_xfer,
+       .functionality = bcm_iproc_i2c_functionality,
+};
+
+static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+       unsigned int bus_speed;
+       u32 val;
+       int ret = of_property_read_u32(iproc_i2c->device->of_node,
+                                      "clock-frequency", &bus_speed);
+       if (ret < 0) {
+               dev_info(iproc_i2c->device,
+                       "unable to interpret clock-frequency DT property\n");
+               bus_speed = 100000;
+       }
+
+       if (bus_speed < 100000) {
+               dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
+                       bus_speed);
+               dev_err(iproc_i2c->device,
+                       "valid speeds are 100khz and 400khz\n");
+               return -EINVAL;
+       } else if (bus_speed < 400000) {
+               bus_speed = 100000;
+       } else {
+               bus_speed = 400000;
+       }
+
+       val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
+       val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+       val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+       writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+
+       dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+       u32 val;
+
+       /* put controller in reset */
+       val = readl(iproc_i2c->base + CFG_OFFSET);
+       val |= 1 << CFG_RESET_SHIFT;
+       val &= ~(1 << CFG_EN_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+
+       /* wait 100 usec per spec */
+       udelay(100);
+
+       /* bring controller out of reset */
+       val &= ~(1 << CFG_RESET_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+
+       /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
+       val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
+       writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+
+       /* disable all interrupts */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+
+       /* clear all pending interrupts */
+       writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+
+       return 0;
+}
+
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                        bool enable)
+{
+       u32 val;
+
+       val = readl(iproc_i2c->base + CFG_OFFSET);
+       if (enable)
+               val |= BIT(CFG_EN_SHIFT);
+       else
+               val &= ~BIT(CFG_EN_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+}
+
+static int bcm_iproc_i2c_probe(struct platform_device *pdev)
+{
+       int irq, ret = 0;
+       struct bcm_iproc_i2c_dev *iproc_i2c;
+       struct i2c_adapter *adap;
+       struct resource *res;
+
+       iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
+                                GFP_KERNEL);
+       if (!iproc_i2c)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, iproc_i2c);
+       iproc_i2c->device = &pdev->dev;
+       init_completion(&iproc_i2c->done);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
+       if (IS_ERR(iproc_i2c->base))
+               return PTR_ERR(iproc_i2c->base);
+
+       ret = bcm_iproc_i2c_init(iproc_i2c);
+       if (ret)
+               return ret;
+
+       ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
+       if (ret)
+               return ret;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(iproc_i2c->device, "no irq resource\n");
+               return irq;
+       }
+       iproc_i2c->irq = irq;
+
+       ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
+                              pdev->name, iproc_i2c);
+       if (ret < 0) {
+               dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
+               return ret;
+       }
+
+       bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+
+       adap = &iproc_i2c->adapter;
+       i2c_set_adapdata(adap, iproc_i2c);
+       strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
+       adap->algo = &bcm_iproc_algo;
+       adap->dev.parent = &pdev->dev;
+       adap->dev.of_node = pdev->dev.of_node;
+
+       ret = i2c_add_adapter(adap);
+       if (ret) {
+               dev_err(iproc_i2c->device, "failed to add adapter\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_remove(struct platform_device *pdev)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
+
+       /* make sure there's no pending interrupt when we remove the adapter */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+       readl(iproc_i2c->base + IE_OFFSET);
+       synchronize_irq(iproc_i2c->irq);
+
+       i2c_del_adapter(&iproc_i2c->adapter);
+       bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+
+       return 0;
+}
+
+static const struct of_device_id bcm_iproc_i2c_of_match[] = {
+       { .compatible = "brcm,iproc-i2c" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
+
+static struct platform_driver bcm_iproc_i2c_driver = {
+       .driver = {
+               .name = "bcm-iproc-i2c",
+               .of_match_table = bcm_iproc_i2c_of_match,
+       },
+       .probe = bcm_iproc_i2c_probe,
+       .remove = bcm_iproc_i2c_remove,
+};
+module_platform_driver(bcm_iproc_i2c_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iProc I2C Driver");
+MODULE_LICENSE("GPL v2");
index 626f74ecd4be4ad40a0b45b2c6730c0afd4b0528..7d7a14cdadfb187d5fd4f95918146c8c6555443a 100644 (file)
  * @suspended:         Flag holding the device's PM status
  * @send_count:                Number of bytes still expected to send
  * @recv_count:                Number of bytes still expected to receive
+ * @curr_recv_count:   Number of bytes to be received in current transfer
  * @irq:               IRQ number
  * @input_clk:         Input clock to I2C controller
  * @i2c_clk:           Maximum I2C clock speed
@@ -146,6 +147,7 @@ struct cdns_i2c {
        u8 suspended;
        unsigned int send_count;
        unsigned int recv_count;
+       unsigned int curr_recv_count;
        int irq;
        unsigned long input_clk;
        unsigned int i2c_clk;
@@ -182,14 +184,15 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
  */
 static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 {
-       unsigned int isr_status, avail_bytes;
-       unsigned int bytes_to_recv, bytes_to_send;
+       unsigned int isr_status, avail_bytes, updatetx;
+       unsigned int bytes_to_send;
        struct cdns_i2c *id = ptr;
        /* Signal completion only after everything is updated */
        int done_flag = 0;
        irqreturn_t status = IRQ_NONE;
 
        isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+       cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
 
        /* Handling nack and arbitration lost interrupt */
        if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -197,89 +200,112 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
                status = IRQ_HANDLED;
        }
 
-       /* Handling Data interrupt */
-       if ((isr_status & CDNS_I2C_IXR_DATA) &&
-                       (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) {
-               /* Always read data interrupt threshold bytes */
-               bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH;
-               id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH;
-               avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-
-               /*
-                * if the tranfer size register value is zero, then
-                * check for the remaining bytes and update the
-                * transfer size register.
-                */
-               if (!avail_bytes) {
-                       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
-                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                       else
-                               cdns_i2c_writereg(id->recv_count,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-               }
+       /*
+        * Check if transfer size register needs to be updated again for a
+        * large data receive operation.
+        */
+       updatetx = 0;
+       if (id->recv_count > id->curr_recv_count)
+               updatetx = 1;
+
+       /* When receiving, handle data interrupt and completion interrupt */
+       if (id->p_recv_buf &&
+           ((isr_status & CDNS_I2C_IXR_COMP) ||
+            (isr_status & CDNS_I2C_IXR_DATA))) {
+               /* Read data if receive data valid is set */
+               while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
+                      CDNS_I2C_SR_RXDV) {
+                       /*
+                        * Clear hold bit that was set for FIFO control if
+                        * RX data left is less than FIFO depth, unless
+                        * repeated start is selected.
+                        */
+                       if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
+                           !id->bus_hold_flag)
+                               cdns_i2c_clear_bus_hold(id);
 
-               /* Process the data received */
-               while (bytes_to_recv--)
                        *(id->p_recv_buf)++ =
                                cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+                       id->recv_count--;
+                       id->curr_recv_count--;
 
-               if (!id->bus_hold_flag &&
-                               (id->recv_count <= CDNS_I2C_FIFO_DEPTH))
-                       cdns_i2c_clear_bus_hold(id);
+                       if (updatetx &&
+                           (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
+                               break;
+               }
 
-               status = IRQ_HANDLED;
-       }
+               /*
+                * The controller sends NACK to the slave when transfer size
+                * register reaches zero without considering the HOLD bit.
+                * This workaround is implemented for large data transfers to
+                * maintain transfer size non-zero while performing a large
+                * receive operation.
+                */
+               if (updatetx &&
+                   (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
+                       /* wait while fifo is full */
+                       while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
+                              (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
+                               ;
 
-       /* Handling Transfer Complete interrupt */
-       if (isr_status & CDNS_I2C_IXR_COMP) {
-               if (!id->p_recv_buf) {
                        /*
-                        * If the device is sending data If there is further
-                        * data to be sent. Calculate the available space
-                        * in FIFO and fill the FIFO with that many bytes.
+                        * Check number of bytes to be received against maximum
+                        * transfer size and update register accordingly.
                         */
-                       if (id->send_count) {
-                               avail_bytes = CDNS_I2C_FIFO_DEPTH -
-                                   cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-                               if (id->send_count > avail_bytes)
-                                       bytes_to_send = avail_bytes;
-                               else
-                                       bytes_to_send = id->send_count;
-
-                               while (bytes_to_send--) {
-                                       cdns_i2c_writereg(
-                                               (*(id->p_send_buf)++),
-                                                CDNS_I2C_DATA_OFFSET);
-                                       id->send_count--;
-                               }
+                       if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) >
+                           CDNS_I2C_TRANSFER_SIZE) {
+                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+                                                 CDNS_I2C_XFER_SIZE_OFFSET);
+                               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE +
+                                                     CDNS_I2C_FIFO_DEPTH;
                        } else {
-                               /*
-                                * Signal the completion of transaction and
-                                * clear the hold bus bit if there are no
-                                * further messages to be processed.
-                                */
-                               done_flag = 1;
+                               cdns_i2c_writereg(id->recv_count -
+                                                 CDNS_I2C_FIFO_DEPTH,
+                                                 CDNS_I2C_XFER_SIZE_OFFSET);
+                               id->curr_recv_count = id->recv_count;
                        }
-                       if (!id->send_count && !id->bus_hold_flag)
-                               cdns_i2c_clear_bus_hold(id);
-               } else {
+               }
+
+               /* Clear hold (if not repeated start) and signal completion */
+               if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) {
                        if (!id->bus_hold_flag)
                                cdns_i2c_clear_bus_hold(id);
+                       done_flag = 1;
+               }
+
+               status = IRQ_HANDLED;
+       }
+
+       /* When sending, handle transfer complete interrupt */
+       if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) {
+               /*
+                * If there is more data to be sent, calculate the
+                * space available in FIFO and fill with that many bytes.
+                */
+               if (id->send_count) {
+                       avail_bytes = CDNS_I2C_FIFO_DEPTH -
+                           cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+                       if (id->send_count > avail_bytes)
+                               bytes_to_send = avail_bytes;
+                       else
+                               bytes_to_send = id->send_count;
+
+                       while (bytes_to_send--) {
+                               cdns_i2c_writereg(
+                                       (*(id->p_send_buf)++),
+                                        CDNS_I2C_DATA_OFFSET);
+                               id->send_count--;
+                       }
+               } else {
                        /*
-                        * If the device is receiving data, then signal
-                        * the completion of transaction and read the data
-                        * present in the FIFO. Signal the completion of
-                        * transaction.
+                        * Signal the completion of transaction and
+                        * clear the hold bus bit if there are no
+                        * further messages to be processed.
                         */
-                       while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
-                                       CDNS_I2C_SR_RXDV) {
-                               *(id->p_recv_buf)++ =
-                                       cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
-                               id->recv_count--;
-                       }
                        done_flag = 1;
                }
+               if (!id->send_count && !id->bus_hold_flag)
+                       cdns_i2c_clear_bus_hold(id);
 
                status = IRQ_HANDLED;
        }
@@ -289,8 +315,6 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
        if (id->err_status)
                status = IRQ_HANDLED;
 
-       cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
-
        if (done_flag)
                complete(&id->xfer_done);
 
@@ -316,6 +340,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
        if (id->p_msg->flags & I2C_M_RECV_LEN)
                id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
 
+       id->curr_recv_count = id->recv_count;
+
        /*
         * Check for the message size against FIFO depth and set the
         * 'hold bus' bit if it is greater than FIFO depth.
@@ -335,11 +361,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
         * receive if it is less than transfer size and transfer size if
         * it is more. Enable the interrupts.
         */
-       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
+       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
                cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
                                  CDNS_I2C_XFER_SIZE_OFFSET);
-       else
+               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
+       } else {
                cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
+       }
+
        /* Clear the bus hold flag if bytes to receive is less than FIFO size */
        if (!id->bus_hold_flag &&
                ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
@@ -516,6 +545,20 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * processed with a repeated start.
         */
        if (num > 1) {
+               /*
+                * This controller does not give completion interrupt after a
+                * master receive message if HOLD bit is set (repeated start),
+                * resulting in SW timeout. Hence, if a receive message is
+                * followed by any other message, an error is returned
+                * indicating that this sequence is not supported.
+                */
+               for (count = 0; count < num - 1; count++) {
+                       if (msgs[count].flags & I2C_M_RD) {
+                               dev_warn(adap->dev.parent,
+                                        "Can't do repeated start after a receive message\n");
+                               return -EOPNOTSUPP;
+                       }
+               }
                id->bus_hold_flag = 1;
                reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
                reg |= CDNS_I2C_CR_HOLD;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
new file mode 100644 (file)
index 0000000..7d7ae97
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Intel BayTrail PMIC I2C bus semaphore implementaion
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include <asm/iosf_mbi.h>
+
+#include "i2c-designware-core.h"
+
+#define SEMAPHORE_TIMEOUT      100
+#define PUNIT_SEMAPHORE                0x7
+#define PUNIT_SEMAPHORE_BIT    BIT(0)
+#define PUNIT_SEMAPHORE_ACQUIRE        BIT(1)
+
+static unsigned long acquired;
+
+static int get_sem(struct device *dev, u32 *sem)
+{
+       u32 data;
+       int ret;
+
+       ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
+                               &data);
+       if (ret) {
+               dev_err(dev, "iosf failed to read punit semaphore\n");
+               return ret;
+       }
+
+       *sem = data & PUNIT_SEMAPHORE_BIT;
+
+       return 0;
+}
+
+static void reset_semaphore(struct device *dev)
+{
+       u32 data;
+
+       if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+                               PUNIT_SEMAPHORE, &data)) {
+               dev_err(dev, "iosf failed to reset punit semaphore during read\n");
+               return;
+       }
+
+       data &= ~PUNIT_SEMAPHORE_BIT;
+       if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+                               PUNIT_SEMAPHORE, data))
+               dev_err(dev, "iosf failed to reset punit semaphore during write\n");
+}
+
+static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
+{
+       u32 sem;
+       int ret;
+       unsigned long start, end;
+
+       might_sleep();
+
+       if (!dev || !dev->dev)
+               return -ENODEV;
+
+       if (!dev->release_lock)
+               return 0;
+
+       /* host driver writes to side band semaphore register */
+       ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+                               PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE);
+       if (ret) {
+               dev_err(dev->dev, "iosf punit semaphore request failed\n");
+               return ret;
+       }
+
+       /* host driver waits for bit 0 to be set in semaphore register */
+       start = jiffies;
+       end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
+       do {
+               ret = get_sem(dev->dev, &sem);
+               if (!ret && sem) {
+                       acquired = jiffies;
+                       dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
+                               jiffies_to_msecs(jiffies - start));
+                       return 0;
+               }
+
+               usleep_range(1000, 2000);
+       } while (time_before(jiffies, end));
+
+       dev_err(dev->dev, "punit semaphore timed out, resetting\n");
+       reset_semaphore(dev->dev);
+
+       ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+                               PUNIT_SEMAPHORE, &sem);
+       if (ret)
+               dev_err(dev->dev, "iosf failed to read punit semaphore\n");
+       else
+               dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
+
+       WARN_ON(1);
+
+       return -ETIMEDOUT;
+}
+
+static void baytrail_i2c_release(struct dw_i2c_dev *dev)
+{
+       if (!dev || !dev->dev)
+               return;
+
+       if (!dev->acquire_lock)
+               return;
+
+       reset_semaphore(dev->dev);
+       dev_dbg(dev->dev, "punit semaphore held for %ums\n",
+               jiffies_to_msecs(jiffies - acquired));
+}
+
+int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
+{
+       acpi_status status;
+       unsigned long long shared_host = 0;
+       acpi_handle handle;
+
+       if (!dev || !dev->dev)
+               return 0;
+
+       handle = ACPI_HANDLE(dev->dev);
+       if (!handle)
+               return 0;
+
+       status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+       if (ACPI_FAILURE(status))
+               return 0;
+
+       if (shared_host) {
+               dev_info(dev->dev, "I2C bus managed by PUNIT\n");
+               dev->acquire_lock = baytrail_i2c_acquire;
+               dev->release_lock = baytrail_i2c_release;
+               dev->pm_runtime_disabled = true;
+       }
+
+       if (!iosf_mbi_available())
+               return -EPROBE_DEFER;
+
+       return 0;
+}
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
+MODULE_LICENSE("GPL v2");
index 23628b7bfb8d8df208c6e434efb95e887dfad6e6..6e25c010e69037a544ad04f82df46e27e80ca9af 100644 (file)
@@ -170,10 +170,10 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
        u32 value;
 
        if (dev->accessor_flags & ACCESS_16BIT)
-               value = readw(dev->base + offset) |
-                       (readw(dev->base + offset + 2) << 16);
+               value = readw_relaxed(dev->base + offset) |
+                       (readw_relaxed(dev->base + offset + 2) << 16);
        else
-               value = readl(dev->base + offset);
+               value = readl_relaxed(dev->base + offset);
 
        if (dev->accessor_flags & ACCESS_SWAP)
                return swab32(value);
@@ -187,10 +187,10 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
                b = swab32(b);
 
        if (dev->accessor_flags & ACCESS_16BIT) {
-               writew((u16)b, dev->base + offset);
-               writew((u16)(b >> 16), dev->base + offset + 2);
+               writew_relaxed((u16)b, dev->base + offset);
+               writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
        } else {
-               writel(b, dev->base + offset);
+               writel_relaxed(b, dev->base + offset);
        }
 }
 
@@ -285,6 +285,15 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        u32 hcnt, lcnt;
        u32 reg;
        u32 sda_falling_time, scl_falling_time;
+       int ret;
+
+       if (dev->acquire_lock) {
+               ret = dev->acquire_lock(dev);
+               if (ret) {
+                       dev_err(dev->dev, "couldn't acquire bus ownership\n");
+                       return ret;
+               }
+       }
 
        input_clock_khz = dev->get_clk_rate_khz(dev);
 
@@ -298,6 +307,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        } else if (reg != DW_IC_COMP_TYPE_VALUE) {
                dev_err(dev->dev, "Unknown Synopsys component type: "
                        "0x%08x\n", reg);
+               if (dev->release_lock)
+                       dev->release_lock(dev);
                return -ENODEV;
        }
 
@@ -309,40 +320,39 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
        scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
 
-       /* Standard-mode */
-       hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-                               4000,   /* tHD;STA = tHIGH = 4.0 us */
-                               sda_falling_time,
-                               0,      /* 0: DW default, 1: Ideal */
-                               0);     /* No offset */
-       lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-                               4700,   /* tLOW = 4.7 us */
-                               scl_falling_time,
-                               0);     /* No offset */
-
-       /* Allow platforms to specify the ideal HCNT and LCNT values */
+       /* Set SCL timing parameters for standard-mode */
        if (dev->ss_hcnt && dev->ss_lcnt) {
                hcnt = dev->ss_hcnt;
                lcnt = dev->ss_lcnt;
+       } else {
+               hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+                                       4000,   /* tHD;STA = tHIGH = 4.0 us */
+                                       sda_falling_time,
+                                       0,      /* 0: DW default, 1: Ideal */
+                                       0);     /* No offset */
+               lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+                                       4700,   /* tLOW = 4.7 us */
+                                       scl_falling_time,
+                                       0);     /* No offset */
        }
        dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
        dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
        dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
-       /* Fast-mode */
-       hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-                               600,    /* tHD;STA = tHIGH = 0.6 us */
-                               sda_falling_time,
-                               0,      /* 0: DW default, 1: Ideal */
-                               0);     /* No offset */
-       lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-                               1300,   /* tLOW = 1.3 us */
-                               scl_falling_time,
-                               0);     /* No offset */
-
+       /* Set SCL timing parameters for fast-mode */
        if (dev->fs_hcnt && dev->fs_lcnt) {
                hcnt = dev->fs_hcnt;
                lcnt = dev->fs_lcnt;
+       } else {
+               hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+                                       600,    /* tHD;STA = tHIGH = 0.6 us */
+                                       sda_falling_time,
+                                       0,      /* 0: DW default, 1: Ideal */
+                                       0);     /* No offset */
+               lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+                                       1300,   /* tLOW = 1.3 us */
+                                       scl_falling_time,
+                                       0);     /* No offset */
        }
        dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
        dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
@@ -364,6 +374,9 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
 
        /* configure the i2c master */
        dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
+       if (dev->release_lock)
+               dev->release_lock(dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -627,6 +640,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        dev->abort_source = 0;
        dev->rx_outstanding = 0;
 
+       if (dev->acquire_lock) {
+               ret = dev->acquire_lock(dev);
+               if (ret) {
+                       dev_err(dev->dev, "couldn't acquire bus ownership\n");
+                       goto done_nolock;
+               }
+       }
+
        ret = i2c_dw_wait_bus_not_busy(dev);
        if (ret < 0)
                goto done;
@@ -672,6 +693,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        ret = -EIO;
 
 done:
+       if (dev->release_lock)
+               dev->release_lock(dev);
+
+done_nolock:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
        mutex_unlock(&dev->lock);
index 5a410ef17abd40c0ab7c7ece3ab3fa539adf1f95..9630222abf32197f48d580e43a14b049821f570f 100644 (file)
@@ -61,6 +61,9 @@
  * @ss_lcnt: standard speed LCNT value
  * @fs_hcnt: fast speed HCNT value
  * @fs_lcnt: fast speed LCNT value
+ * @acquire_lock: function to acquire a hardware lock on the bus
+ * @release_lock: function to release a hardware lock on the bus
+ * @pm_runtime_disabled: true if pm runtime is disabled
  *
  * HCNT and LCNT parameters can be used if the platform knows more accurate
  * values than the one computed based only on the input clock frequency.
@@ -101,6 +104,9 @@ struct dw_i2c_dev {
        u16                     ss_lcnt;
        u16                     fs_hcnt;
        u16                     fs_lcnt;
+       int                     (*acquire_lock)(struct dw_i2c_dev *dev);
+       void                    (*release_lock)(struct dw_i2c_dev *dev);
+       bool                    pm_runtime_disabled;
 };
 
 #define ACCESS_SWAP            0x00000001
@@ -119,3 +125,9 @@ extern void i2c_dw_disable(struct dw_i2c_dev *dev);
 extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
 extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
 extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+
+#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
+extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+#else
+static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+#endif
index acb40f95db78f512c561f6d4d2b5b67479844811..6643d2dc0b250ddbf022c669db4fd2b4b4f848e7 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2006 Texas Instruments.
  * Copyright (C) 2007 MontaVista Software Inc.
  * Copyright (C) 2009 Provigent Ltd.
- * Copyright (C) 2011 Intel corporation.
+ * Copyright (C) 2011, 2015 Intel Corporation.
  *
  * ----------------------------------------------------------------------------
  *
 #define DRIVER_NAME "i2c-designware-pci"
 
 enum dw_pci_ctl_id_t {
-       moorestown_0,
-       moorestown_1,
-       moorestown_2,
-
        medfield_0,
        medfield_1,
        medfield_2,
@@ -101,28 +97,7 @@ static struct dw_scl_sda_cfg hsw_config = {
        .sda_hold = 0x9,
 };
 
-static struct  dw_pci_controller  dw_pci_controllers[] = {
-       [moorestown_0] = {
-               .bus_num     = 0,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
-       [moorestown_1] = {
-               .bus_num     = 1,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
-       [moorestown_2] = {
-               .bus_num     = 2,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
+static struct dw_pci_controller dw_pci_controllers[] = {
        [medfield_0] = {
                .bus_num     = 0,
                .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
@@ -170,7 +145,6 @@ static struct  dw_pci_controller  dw_pci_controllers[] = {
                .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
                .tx_fifo_depth = 32,
                .rx_fifo_depth = 32,
-               .clk_khz = 100000,
                .functionality = I2C_FUNC_10BIT_ADDR,
                .scl_sda_cfg = &byt_config,
        },
@@ -179,7 +153,6 @@ static struct  dw_pci_controller  dw_pci_controllers[] = {
                .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
                .tx_fifo_depth = 32,
                .rx_fifo_depth = 32,
-               .clk_khz = 100000,
                .functionality = I2C_FUNC_10BIT_ADDR,
                .scl_sda_cfg = &hsw_config,
        },
@@ -259,7 +232,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
        dev->functionality = controller->functionality |
                                DW_DEFAULT_FUNCTIONALITY;
 
-       dev->master_cfg =  controller->bus_cfg;
+       dev->master_cfg = controller->bus_cfg;
        if (controller->scl_sda_cfg) {
                cfg = controller->scl_sda_cfg;
                dev->ss_hcnt = cfg->ss_hcnt;
@@ -325,12 +298,8 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
 MODULE_ALIAS("i2c_designware-pci");
 
 static const struct pci_device_id i2_designware_pci_ids[] = {
-       /* Moorestown */
-       { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
-       { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
-       { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
        /* Medfield */
-       { PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
+       { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
        { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
        { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
        { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
@@ -348,7 +317,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x9c61), haswell },
        { PCI_VDEVICE(INTEL, 0x9c62), haswell },
        /* Braswell / Cherrytrail */
-       { PCI_VDEVICE(INTEL, 0x22C1), baytrail,},
+       { PCI_VDEVICE(INTEL, 0x22C1), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
index 2b463c313e4e03305282b76c5fdefa8f90a48874..c270f5f9a8f9af3d3712bbd0f99874708875aa18 100644 (file)
@@ -195,6 +195,10 @@ static int dw_i2c_probe(struct platform_device *pdev)
                        clk_freq = pdata->i2c_scl_freq;
        }
 
+       r = i2c_dw_eval_lock_support(dev);
+       if (r)
+               return r;
+
        dev->functionality =
                I2C_FUNC_I2C |
                I2C_FUNC_10BIT_ADDR |
@@ -257,10 +261,14 @@ static int dw_i2c_probe(struct platform_device *pdev)
                return r;
        }
 
-       pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
+       if (dev->pm_runtime_disabled) {
+               pm_runtime_forbid(&pdev->dev);
+       } else {
+               pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+               pm_runtime_use_autosuspend(&pdev->dev);
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        return 0;
 }
@@ -310,7 +318,9 @@ static int dw_i2c_resume(struct device *dev)
        struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
 
        clk_prepare_enable(i_dev->clk);
-       i2c_dw_init(i_dev);
+
+       if (!i_dev->pm_runtime_disabled)
+               i2c_dw_init(i_dev);
 
        return 0;
 }
index 7f3a9fe9bf4e729a2c446ba905a117a325f17621..d7b26fc6f432005bb02a2394fd4bfb6720271ab1 100644 (file)
@@ -201,7 +201,7 @@ struct imx_i2c_struct {
        void __iomem            *base;
        wait_queue_head_t       queue;
        unsigned long           i2csr;
-       unsigned int            disable_delay;
+       unsigned int            disable_delay;
        int                     stopped;
        unsigned int            ifdr; /* IMX_I2C_IFDR */
        unsigned int            cur_clk;
@@ -295,7 +295,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
        dma->chan_tx = dma_request_slave_channel(dev, "tx");
        if (!dma->chan_tx) {
                dev_dbg(dev, "can't request DMA tx channel\n");
-               ret = -ENODEV;
                goto fail_al;
        }
 
@@ -313,7 +312,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
        dma->chan_rx = dma_request_slave_channel(dev, "rx");
        if (!dma->chan_rx) {
                dev_dbg(dev, "can't request DMA rx channel\n");
-               ret = -ENODEV;
                goto fail_tx;
        }
 
@@ -481,8 +479,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
        i2c_clk_rate = clk_get_rate(i2c_imx->clk);
        if (i2c_imx->cur_clk == i2c_clk_rate)
                return;
-       else
-               i2c_imx->cur_clk = i2c_clk_rate;
+
+       i2c_imx->cur_clk = i2c_clk_rate;
 
        div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
        if (div < i2c_clk_div[0].div)
@@ -490,7 +488,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
        else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
                i = i2c_imx->hwdata->ndivs - 1;
        else
-               for (i = 0; i2c_clk_div[i].div < div; i++);
+               for (i = 0; i2c_clk_div[i].div < div; i++)
+                       ;
 
        /* Store divider value */
        i2c_imx->ifdr = i2c_clk_div[i].val;
@@ -628,9 +627,9 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
        result = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
-       if (result <= 0) {
+       if (result == 0) {
                dmaengine_terminate_all(dma->chan_using);
-               return result ?: -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        /* Waiting for transfer complete. */
@@ -686,9 +685,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        result = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
-       if (result <= 0) {
+       if (result == 0) {
                dmaengine_terminate_all(dma->chan_using);
-               return result ?: -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        /* waiting for transfer complete. */
@@ -822,6 +821,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
        /* read data */
        for (i = 0; i < msgs->len; i++) {
                u8 len = 0;
+
                result = i2c_imx_trx_complete(i2c_imx);
                if (result)
                        return result;
@@ -917,15 +917,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
                /* write/read data */
 #ifdef CONFIG_I2C_DEBUG_BUS
                temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
-               dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, "
-                       "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__,
+               dev_dbg(&i2c_imx->adapter.dev,
+                       "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n",
+                       __func__,
                        (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
                        (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
                        (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
                temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
                dev_dbg(&i2c_imx->adapter.dev,
-                       "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, "
-                       "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__,
+                       "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n",
+                       __func__,
                        (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0),
                        (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0),
                        (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
@@ -1004,7 +1005,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
        i2c_imx->adapter.owner          = THIS_MODULE;
        i2c_imx->adapter.algo           = &i2c_imx_algo;
        i2c_imx->adapter.dev.parent     = &pdev->dev;
-       i2c_imx->adapter.nr             = pdev->id;
+       i2c_imx->adapter.nr             = pdev->id;
        i2c_imx->adapter.dev.of_node    = pdev->dev.of_node;
        i2c_imx->base                   = base;
 
@@ -1063,7 +1064,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
                i2c_imx->adapter.name);
        dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
 
-       /* Init DMA config if support*/
+       /* Init DMA config if supported */
        i2c_imx_dma_request(i2c_imx, phy_addr);
 
        return 0;   /* Return OK */
index 7249b5b1e5d091bbd9d906d4bc16cd89fbd36d82..abf5db7e441ebab65fc7c8ad99b5f9bca6218b15 100644 (file)
@@ -12,6 +12,7 @@
  * kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -35,7 +36,9 @@ struct ocores_i2c {
        int pos;
        int nmsgs;
        int state; /* see STATE_ */
-       int clock_khz;
+       struct clk *clk;
+       int ip_clock_khz;
+       int bus_clock_khz;
        void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
        u8 (*getreg)(struct ocores_i2c *i2c, int reg);
 };
@@ -215,21 +218,34 @@ static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                return -ETIMEDOUT;
 }
 
-static void ocores_init(struct ocores_i2c *i2c)
+static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
 {
        int prescale;
+       int diff;
        u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
 
        /* make sure the device is disabled */
        oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
-       prescale = (i2c->clock_khz / (5*100)) - 1;
+       prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1;
+       prescale = clamp(prescale, 0, 0xffff);
+
+       diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz;
+       if (abs(diff) > i2c->bus_clock_khz / 10) {
+               dev_err(dev,
+                       "Unsupported clock settings: core: %d KHz, bus: %d KHz\n",
+                       i2c->ip_clock_khz, i2c->bus_clock_khz);
+               return -EINVAL;
+       }
+
        oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff);
        oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
 
        /* Init the device */
        oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
        oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN);
+
+       return 0;
 }
 
 
@@ -304,6 +320,8 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
        struct device_node *np = pdev->dev.of_node;
        const struct of_device_id *match;
        u32 val;
+       u32 clock_frequency;
+       bool clock_frequency_present;
 
        if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
                /* no 'reg-shift', check for deprecated 'regstep' */
@@ -319,12 +337,42 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
                }
        }
 
-       if (of_property_read_u32(np, "clock-frequency", &val)) {
-               dev_err(&pdev->dev,
-                       "Missing required parameter 'clock-frequency'\n");
-               return -ENODEV;
+       clock_frequency_present = !of_property_read_u32(np, "clock-frequency",
+                                                       &clock_frequency);
+       i2c->bus_clock_khz = 100;
+
+       i2c->clk = devm_clk_get(&pdev->dev, NULL);
+
+       if (!IS_ERR(i2c->clk)) {
+               int ret = clk_prepare_enable(i2c->clk);
+
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "clk_prepare_enable failed: %d\n", ret);
+                       return ret;
+               }
+               i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
+               if (clock_frequency_present)
+                       i2c->bus_clock_khz = clock_frequency / 1000;
+       }
+
+       if (i2c->ip_clock_khz == 0) {
+               if (of_property_read_u32(np, "opencores,ip-clock-frequency",
+                                               &val)) {
+                       if (!clock_frequency_present) {
+                               dev_err(&pdev->dev,
+                                       "Missing required parameter 'opencores,ip-clock-frequency'\n");
+                               return -ENODEV;
+                       }
+                       i2c->ip_clock_khz = clock_frequency / 1000;
+                       dev_warn(&pdev->dev,
+                                "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n");
+               } else {
+                       i2c->ip_clock_khz = val / 1000;
+                       if (clock_frequency_present)
+                               i2c->bus_clock_khz = clock_frequency / 1000;
+               }
        }
-       i2c->clock_khz = val / 1000;
 
        of_property_read_u32(pdev->dev.of_node, "reg-io-width",
                                &i2c->reg_io_width);
@@ -368,7 +416,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        if (pdata) {
                i2c->reg_shift = pdata->reg_shift;
                i2c->reg_io_width = pdata->reg_io_width;
-               i2c->clock_khz = pdata->clock_khz;
+               i2c->ip_clock_khz = pdata->clock_khz;
+               i2c->bus_clock_khz = 100;
        } else {
                ret = ocores_i2c_of_probe(pdev, i2c);
                if (ret)
@@ -402,7 +451,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
                }
        }
 
-       ocores_init(i2c);
+       ret = ocores_init(&pdev->dev, i2c);
+       if (ret)
+               return ret;
 
        init_waitqueue_head(&i2c->wait);
        ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
@@ -446,6 +497,9 @@ static int ocores_i2c_remove(struct platform_device *pdev)
        /* remove adapter & data */
        i2c_del_adapter(&i2c->adap);
 
+       if (!IS_ERR(i2c->clk))
+               clk_disable_unprepare(i2c->clk);
+
        return 0;
 }
 
@@ -458,6 +512,8 @@ static int ocores_i2c_suspend(struct device *dev)
        /* make sure the device is disabled */
        oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
+       if (!IS_ERR(i2c->clk))
+               clk_disable_unprepare(i2c->clk);
        return 0;
 }
 
@@ -465,9 +521,20 @@ static int ocores_i2c_resume(struct device *dev)
 {
        struct ocores_i2c *i2c = dev_get_drvdata(dev);
 
-       ocores_init(i2c);
+       if (!IS_ERR(i2c->clk)) {
+               unsigned long rate;
+               int ret = clk_prepare_enable(i2c->clk);
 
-       return 0;
+               if (ret) {
+                       dev_err(dev,
+                               "clk_prepare_enable failed: %d\n", ret);
+                       return ret;
+               }
+               rate = clk_get_rate(i2c->clk) / 1000;
+               if (rate)
+                       i2c->ip_clock_khz = rate;
+       }
+       return ocores_init(dev, i2c);
 }
 
 static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
index 44f03eed00dd4f36ae655df26315699556a576a8..d37d9db6681e7b5745a45331ae60737a9b36f99a 100644 (file)
@@ -148,13 +148,6 @@ static inline u32 pmcmsptwi_clock_to_reg(
        return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
 }
 
-static inline void pmcmsptwi_reg_to_clock(
-                       u32 reg, struct pmcmsptwi_clock *clock)
-{
-       clock->filter = (reg >> 12) & 0xf;
-       clock->clock = reg & 0x03ff;
-}
-
 static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
 {
        return ((cfg->arbf & 0xf) << 12) |
index 92462843db663d09b1106468603b1b9eb65243d2..5f96b1b3e3a5a30e2163098e4afe94fc4a06deeb 100644 (file)
@@ -102,6 +102,9 @@ struct rk3x_i2c {
 
        /* Settings */
        unsigned int scl_frequency;
+       unsigned int scl_rise_ns;
+       unsigned int scl_fall_ns;
+       unsigned int sda_fall_ns;
 
        /* Synchronization & notification */
        spinlock_t lock;
@@ -435,6 +438,9 @@ out:
  *
  * @clk_rate: I2C input clock rate
  * @scl_rate: Desired SCL rate
+ * @scl_rise_ns: How many ns it takes for SCL to rise.
+ * @scl_fall_ns: How many ns it takes for SCL to fall.
+ * @sda_fall_ns: How many ns it takes for SDA to fall.
  * @div_low: Divider output for low
  * @div_high: Divider output for high
  *
@@ -443,11 +449,16 @@ out:
  * too high, we silently use the highest possible rate.
  */
 static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
+                             unsigned long scl_rise_ns,
+                             unsigned long scl_fall_ns,
+                             unsigned long sda_fall_ns,
                              unsigned long *div_low, unsigned long *div_high)
 {
-       unsigned long min_low_ns, min_high_ns;
-       unsigned long max_data_hold_ns;
+       unsigned long spec_min_low_ns, spec_min_high_ns;
+       unsigned long spec_setup_start, spec_max_data_hold_ns;
        unsigned long data_hold_buffer_ns;
+
+       unsigned long min_low_ns, min_high_ns;
        unsigned long max_low_ns, min_total_ns;
 
        unsigned long clk_rate_khz, scl_rate_khz;
@@ -469,29 +480,50 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
                scl_rate = 1000;
 
        /*
-        * min_low_ns:  The minimum number of ns we need to hold low
-        *              to meet i2c spec
-        * min_high_ns: The minimum number of ns we need to hold high
-        *              to meet i2c spec
-        * max_low_ns:  The maximum number of ns we can hold low
-        *              to meet i2c spec
+        * min_low_ns:  The minimum number of ns we need to hold low to
+        *              meet I2C specification, should include fall time.
+        * min_high_ns: The minimum number of ns we need to hold high to
+        *              meet I2C specification, should include rise time.
+        * max_low_ns:  The maximum number of ns we can hold low to meet
+        *              I2C specification.
         *
-        * Note: max_low_ns should be (max data hold time * 2 - buffer)
+        * Note: max_low_ns should be (maximum data hold time * 2 - buffer)
         *       This is because the i2c host on Rockchip holds the data line
         *       for half the low time.
         */
        if (scl_rate <= 100000) {
-               min_low_ns = 4700;
-               min_high_ns = 4000;
-               max_data_hold_ns = 3450;
+               /* Standard-mode */
+               spec_min_low_ns = 4700;
+               spec_setup_start = 4700;
+               spec_min_high_ns = 4000;
+               spec_max_data_hold_ns = 3450;
                data_hold_buffer_ns = 50;
        } else {
-               min_low_ns = 1300;
-               min_high_ns = 600;
-               max_data_hold_ns = 900;
+               /* Fast-mode */
+               spec_min_low_ns = 1300;
+               spec_setup_start = 600;
+               spec_min_high_ns = 600;
+               spec_max_data_hold_ns = 900;
                data_hold_buffer_ns = 50;
        }
-       max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns;
+       min_high_ns = scl_rise_ns + spec_min_high_ns;
+
+       /*
+        * Timings for repeated start:
+        * - controller appears to drop SDA at .875x (7/8) programmed clk high.
+        * - controller appears to keep SCL high for 2x programmed clk high.
+        *
+        * We need to account for those rules in picking our "high" time so
+        * we meet tSU;STA and tHD;STA times.
+        */
+       min_high_ns = max(min_high_ns,
+               DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
+       min_high_ns = max(min_high_ns,
+               DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
+                             sda_fall_ns + spec_min_high_ns), 2));
+
+       min_low_ns = scl_fall_ns + spec_min_low_ns;
+       max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
        min_total_ns = min_low_ns + min_high_ns;
 
        /* Adjust to avoid overflow */
@@ -510,8 +542,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
        min_div_for_hold = (min_low_div + min_high_div);
 
        /*
-        * This is the maximum divider so we don't go over the max.
-        * We don't round up here (we round down) since this is a max.
+        * This is the maximum divider so we don't go over the maximum.
+        * We don't round up here (we round down) since this is a maximum.
         */
        max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
 
@@ -544,7 +576,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
                ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
                                             scl_rate_khz * 8 * min_total_ns);
 
-               /* Don't allow it to go over the max */
+               /* Don't allow it to go over the maximum */
                if (ideal_low_div > max_low_div)
                        ideal_low_div = max_low_div;
 
@@ -588,9 +620,9 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
        u64 t_low_ns, t_high_ns;
        int ret;
 
-       ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low,
-                                &div_high);
-
+       ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
+                                i2c->scl_fall_ns, i2c->sda_fall_ns,
+                                &div_low, &div_high);
        WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
 
        clk_enable(i2c->clk);
@@ -633,9 +665,10 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
        switch (event) {
        case PRE_RATE_CHANGE:
                if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
-                                     &div_low, &div_high) != 0) {
+                                      i2c->scl_rise_ns, i2c->scl_fall_ns,
+                                      i2c->sda_fall_ns,
+                                      &div_low, &div_high) != 0)
                        return NOTIFY_STOP;
-               }
 
                /* scale up */
                if (ndata->new_rate > ndata->old_rate)
@@ -859,6 +892,24 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
                i2c->scl_frequency = DEFAULT_SCL_RATE;
        }
 
+       /*
+        * Read rise and fall time from device tree. If not available use
+        * the default maximum timing from the specification.
+        */
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
+                                &i2c->scl_rise_ns)) {
+               if (i2c->scl_frequency <= 100000)
+                       i2c->scl_rise_ns = 1000;
+               else
+                       i2c->scl_rise_ns = 300;
+       }
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
+                                &i2c->scl_fall_ns))
+               i2c->scl_fall_ns = 300;
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
+                                &i2c->scl_fall_ns))
+               i2c->sda_fall_ns = i2c->scl_fall_ns;
+
        strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
        i2c->adap.owner = THIS_MODULE;
        i2c->adap.algo = &rk3x_i2c_algorithm;
index 28b87e683503df42c2bc11ee36c95b505e04f884..29f14331dd9d01fcb5d66f1e74602b61da9593cf 100644 (file)
@@ -286,6 +286,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (rx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+               val = cpu_to_le32(val);
                memcpy(buf, &val, buf_remaining);
                buf_remaining = 0;
                rx_fifo_avail--;
@@ -344,6 +345,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+               val = le32_to_cpu(val);
 
                /* Again update before writing to FIFO to make sure isr sees. */
                i2c_dev->msg_buf_remaining = 0;
index e9eae57a2b50f77e3d25c4d9fcfa003728464740..edf274cabe817208f0c4b3fceb98bca95f2671f4 100644 (file)
@@ -102,7 +102,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
                struct acpi_resource_i2c_serialbus *sb;
 
                sb = &ares->data.i2c_serial_bus;
-               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+               if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
                        info->addr = sb->slave_address;
                        if (sb->access_mode == ACPI_I2C_10BIT_MODE)
                                info->flags |= I2C_CLIENT_TEN;
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev)
                status = driver->remove(client);
        }
 
-       if (dev->of_node)
-               irq_dispose_mapping(client->irq);
-
        dev_pm_domain_detach(&client->dev, true);
        return status;
 }
@@ -698,101 +695,6 @@ static void i2c_device_shutdown(struct device *dev)
                driver->shutdown(client);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
-       struct i2c_client *client = i2c_verify_client(dev);
-       struct i2c_driver *driver;
-
-       if (!client || !dev->driver)
-               return 0;
-       driver = to_i2c_driver(dev->driver);
-       if (!driver->suspend)
-               return 0;
-       return driver->suspend(client, mesg);
-}
-
-static int i2c_legacy_resume(struct device *dev)
-{
-       struct i2c_client *client = i2c_verify_client(dev);
-       struct i2c_driver *driver;
-
-       if (!client || !dev->driver)
-               return 0;
-       driver = to_i2c_driver(dev->driver);
-       if (!driver->resume)
-               return 0;
-       return driver->resume(client);
-}
-
-static int i2c_device_pm_suspend(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_suspend(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_SUSPEND);
-}
-
-static int i2c_device_pm_resume(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_resume(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_freeze(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_freeze(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_FREEZE);
-}
-
-static int i2c_device_pm_thaw(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_thaw(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_poweroff(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_poweroff(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
-}
-
-static int i2c_device_pm_restore(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_restore(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-#else /* !CONFIG_PM_SLEEP */
-#define i2c_device_pm_suspend  NULL
-#define i2c_device_pm_resume   NULL
-#define i2c_device_pm_freeze   NULL
-#define i2c_device_pm_thaw     NULL
-#define i2c_device_pm_poweroff NULL
-#define i2c_device_pm_restore  NULL
-#endif /* !CONFIG_PM_SLEEP */
-
 static void i2c_client_dev_release(struct device *dev)
 {
        kfree(to_i2c_client(dev));
@@ -804,6 +706,7 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
        return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
                       to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
 }
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
 static ssize_t
 show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -817,8 +720,6 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
 
        return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
 }
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
 static struct attribute *i2c_dev_attrs[] = {
@@ -827,29 +728,7 @@ static struct attribute *i2c_dev_attrs[] = {
        &dev_attr_modalias.attr,
        NULL
 };
-
-static struct attribute_group i2c_dev_attr_group = {
-       .attrs          = i2c_dev_attrs,
-};
-
-static const struct attribute_group *i2c_dev_attr_groups[] = {
-       &i2c_dev_attr_group,
-       NULL
-};
-
-static const struct dev_pm_ops i2c_device_pm_ops = {
-       .suspend = i2c_device_pm_suspend,
-       .resume = i2c_device_pm_resume,
-       .freeze = i2c_device_pm_freeze,
-       .thaw = i2c_device_pm_thaw,
-       .poweroff = i2c_device_pm_poweroff,
-       .restore = i2c_device_pm_restore,
-       SET_RUNTIME_PM_OPS(
-               pm_generic_runtime_suspend,
-               pm_generic_runtime_resume,
-               NULL
-       )
-};
+ATTRIBUTE_GROUPS(i2c_dev);
 
 struct bus_type i2c_bus_type = {
        .name           = "i2c",
@@ -857,12 +736,11 @@ struct bus_type i2c_bus_type = {
        .probe          = i2c_device_probe,
        .remove         = i2c_device_remove,
        .shutdown       = i2c_device_shutdown,
-       .pm             = &i2c_device_pm_ops,
 };
 EXPORT_SYMBOL_GPL(i2c_bus_type);
 
 static struct device_type i2c_client_type = {
-       .groups         = i2c_dev_attr_groups,
+       .groups         = i2c_dev_groups,
        .uevent         = i2c_device_uevent,
        .release        = i2c_client_dev_release,
 };
@@ -1261,6 +1139,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
 
        return count;
 }
+static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 
 /*
  * And of course let the users delete the devices they instantiated, if
@@ -1315,8 +1194,6 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
                        "delete_device");
        return res;
 }
-
-static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
                                   i2c_sysfs_delete_device);
 
@@ -1326,18 +1203,10 @@ static struct attribute *i2c_adapter_attrs[] = {
        &dev_attr_delete_device.attr,
        NULL
 };
-
-static struct attribute_group i2c_adapter_attr_group = {
-       .attrs          = i2c_adapter_attrs,
-};
-
-static const struct attribute_group *i2c_adapter_attr_groups[] = {
-       &i2c_adapter_attr_group,
-       NULL
-};
+ATTRIBUTE_GROUPS(i2c_adapter);
 
 struct device_type i2c_adapter_type = {
-       .groups         = i2c_adapter_attr_groups,
+       .groups         = i2c_adapter_groups,
        .release        = i2c_adapter_dev_release,
 };
 EXPORT_SYMBOL_GPL(i2c_adapter_type);
@@ -1419,8 +1288,6 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
        if (of_get_property(node, "wakeup-source", NULL))
                info.flags |= I2C_CLIENT_WAKE;
 
-       request_module("%s%s", I2C_MODULE_PREFIX, info.type);
-
        result = i2c_new_device(adap, &info);
        if (result == NULL) {
                dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
@@ -1796,11 +1663,15 @@ void i2c_del_adapter(struct i2c_adapter *adap)
        /* device name is gone after device_unregister */
        dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
 
-       /* clean up the sysfs representation */
+       /* wait until all references to the device are gone
+        *
+        * FIXME: This is old code and should ideally be replaced by an
+        * alternative which results in decoupling the lifetime of the struct
+        * device from the i2c_adapter, like spi or netdev do. Any solution
+        * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled!
+        */
        init_completion(&adap->dev_released);
        device_unregister(&adap->dev);
-
-       /* wait for sysfs to drop all references */
        wait_for_completion(&adap->dev_released);
 
        /* free bus id */
@@ -1859,14 +1730,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
        if (res)
                return res;
 
-       /* Drivers should switch to dev_pm_ops instead. */
-       if (driver->suspend)
-               pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
-                       driver->driver.name);
-       if (driver->resume)
-               pr_warn("i2c-core: driver [%s] using legacy resume method\n",
-                       driver->driver.name);
-
        pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
        INIT_LIST_HEAD(&driver->clients);
index ec11b404b433737657957ef9dd0e559a955b45aa..3d8f4fe2e47e52eefff7da7967fe41bf0fafe801 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/i2c-mux.h>
 #include <linux/i2c/pca954x.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 
@@ -186,6 +187,8 @@ static int pca954x_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
        struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+       struct device_node *of_node = client->dev.of_node;
+       bool idle_disconnect_dt;
        struct gpio_desc *gpio;
        int num, force, class;
        struct pca954x *data;
@@ -217,8 +220,13 @@ static int pca954x_probe(struct i2c_client *client,
        data->type = id->driver_data;
        data->last_chan = 0;               /* force the first selection */
 
+       idle_disconnect_dt = of_node &&
+               of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+
        /* Now create an adapter for each channel */
        for (num = 0; num < chips[data->type].nchans; num++) {
+               bool idle_disconnect_pd = false;
+
                force = 0;                        /* dynamic adap number */
                class = 0;                        /* no class by default */
                if (pdata) {
@@ -229,12 +237,13 @@ static int pca954x_probe(struct i2c_client *client,
                        } else
                                /* discard unconfigured channels */
                                break;
+                       idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
                }
 
                data->virt_adaps[num] =
                        i2c_add_mux_adapter(adap, &client->dev, client,
                                force, num, class, pca954x_select_chan,
-                               (pdata && pdata->modes[num].deselect_on_exit)
+                               (idle_disconnect_pd || idle_disconnect_dt)
                                        ? pca954x_deselect_mux : NULL);
 
                if (data->virt_adaps[num] == NULL) {
index 1793aea4a7d2c58192f6023d43e90c10daedbb52..6eb738ca6d2f353717aac1e63bade383f763afa5 100644 (file)
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
        tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
                                         IDETAPE_DSC_RW_MAX);
        printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
-               "%lums tDSC%s\n",
+               "%ums tDSC%s\n",
                drive->name, tape->name, *(u16 *)&tape->caps[14],
                (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
                tape->buffer_size / 1024,
-               tape->best_dsc_rw_freq * 1000 / HZ,
+               jiffies_to_msecs(tape->best_dsc_rw_freq),
                (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
 
        ide_proc_register_driver(drive, tape->driver);
index 51672256072bc8db2778e6c60982f320c680f3d4..b96c636470ef504e3b7f3d5612602bcc76a3a3a0 100644 (file)
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
        }
 
-/* LSB is in nV to eliminate floating point */
-static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
-
-/*
- *  scales calculated as:
- *  rates_to_lsb[sample_rate] / (1 << pga);
- *  pga is 1 for 0, 2
- */
-
 static const int mcp3422_scales[4][4] = {
-       { 1000000, 250000, 62500, 15625 },
-       { 500000 , 125000, 31250, 7812 },
-       { 250000 , 62500 , 15625, 3906 },
-       { 125000 , 31250 , 7812 , 1953 } };
+       { 1000000, 500000, 250000, 125000 },
+       { 250000 , 125000, 62500 , 31250  },
+       { 62500  , 31250 , 15625 , 7812   },
+       { 15625  , 7812  , 3906  , 1953   } };
 
 /* Constant msleep times for data acquisitions */
 static const int mcp3422_read_times[4] = {
index b9666f2f5e514f8d3a66a0a192b190bcbd2a44d5..fabd24edc2a1d07534d2a67e9a1c8e78ef353cae 100644 (file)
@@ -296,7 +296,8 @@ static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
        if (iadc->poll_eoc) {
                ret = iadc_poll_wait_eoc(iadc, wait);
        } else {
-               ret = wait_for_completion_timeout(&iadc->complete, wait);
+               ret = wait_for_completion_timeout(&iadc->complete,
+                       usecs_to_jiffies(wait));
                if (!ret)
                        ret = -ETIMEDOUT;
                else
index 52d70435f5a11c55ed186003c70bce311002f918..55a90082a29bd4846aaea896f8c7f847d96fcb45 100644 (file)
@@ -640,6 +640,7 @@ static int ssp_remove(struct spi_device *spi)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ssp_suspend(struct device *dev)
 {
        int ret;
@@ -688,6 +689,7 @@ static int ssp_resume(struct device *dev)
 
        return 0;
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct dev_pm_ops ssp_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume)
index f57562aa396f44462a9fdc822741d09bf37531f4..15c73e20272d874655e544366e2a8d6f1c24912a 100644 (file)
@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
        st = iio_priv(indio_dev);
        spi_set_drvdata(spi, indio_dev);
 
-       st->reg = devm_regulator_get(&spi->dev, "vcc");
+       st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
        if (!IS_ERR(st->reg)) {
                ret = regulator_enable(st->reg);
                if (ret)
index 623c145d8a97241bd0c2e284d9bb273dfcfb72da..7d79a1ac5f5f09ba332fee3ebb94c486280ce024 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/bitops.h>
 #include <linux/completion.h>
+#include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 
 #define DHT11_DATA_VALID_TIME  2000000000  /* 2s in ns */
 
-#define DHT11_EDGES_PREAMBLE 4
+#define DHT11_EDGES_PREAMBLE 2
 #define DHT11_BITS_PER_READ 40
+/*
+ * Note that when reading the sensor actually 84 edges are detected, but
+ * since the last edge is not significant, we only store 83:
+ */
 #define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
 
 /* Data transmission timing (nano seconds) */
@@ -57,6 +62,7 @@ struct dht11 {
        int                             irq;
 
        struct completion               completion;
+       struct mutex                    lock;
 
        s64                             timestamp;
        int                             temperature;
@@ -88,7 +94,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
        unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
 
        /* Calculate timestamp resolution */
-       for (i = 0; i < dht11->num_edges; ++i) {
+       for (i = 1; i < dht11->num_edges; ++i) {
                t = dht11->edges[i].ts - dht11->edges[i-1].ts;
                if (t > 0 && t < timeres)
                        timeres = t;
@@ -138,6 +144,27 @@ static int dht11_decode(struct dht11 *dht11, int offset)
        return 0;
 }
 
+/*
+ * IRQ handler called on GPIO edges
+ */
+static irqreturn_t dht11_handle_irq(int irq, void *data)
+{
+       struct iio_dev *iio = data;
+       struct dht11 *dht11 = iio_priv(iio);
+
+       /* TODO: Consider making the handler safe for IRQ sharing */
+       if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
+               dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+               dht11->edges[dht11->num_edges++].value =
+                                               gpio_get_value(dht11->gpio);
+
+               if (dht11->num_edges >= DHT11_EDGES_PER_READ)
+                       complete(&dht11->completion);
+       }
+
+       return IRQ_HANDLED;
+}
+
 static int dht11_read_raw(struct iio_dev *iio_dev,
                        const struct iio_chan_spec *chan,
                        int *val, int *val2, long m)
@@ -145,6 +172,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
        struct dht11 *dht11 = iio_priv(iio_dev);
        int ret;
 
+       mutex_lock(&dht11->lock);
        if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
                reinit_completion(&dht11->completion);
 
@@ -157,8 +185,17 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
                if (ret)
                        goto err;
 
+               ret = request_irq(dht11->irq, dht11_handle_irq,
+                                 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                                 iio_dev->name, iio_dev);
+               if (ret)
+                       goto err;
+
                ret = wait_for_completion_killable_timeout(&dht11->completion,
                                                                 HZ);
+
+               free_irq(dht11->irq, iio_dev);
+
                if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
                        dev_err(&iio_dev->dev,
                                        "Only %d signal edges detected\n",
@@ -185,6 +222,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
                ret = -EINVAL;
 err:
        dht11->num_edges = -1;
+       mutex_unlock(&dht11->lock);
        return ret;
 }
 
@@ -193,27 +231,6 @@ static const struct iio_info dht11_iio_info = {
        .read_raw               = dht11_read_raw,
 };
 
-/*
- * IRQ handler called on GPIO edges
-*/
-static irqreturn_t dht11_handle_irq(int irq, void *data)
-{
-       struct iio_dev *iio = data;
-       struct dht11 *dht11 = iio_priv(iio);
-
-       /* TODO: Consider making the handler safe for IRQ sharing */
-       if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
-               dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
-               dht11->edges[dht11->num_edges++].value =
-                                               gpio_get_value(dht11->gpio);
-
-               if (dht11->num_edges >= DHT11_EDGES_PER_READ)
-                       complete(&dht11->completion);
-       }
-
-       return IRQ_HANDLED;
-}
-
 static const struct iio_chan_spec dht11_chan_spec[] = {
        { .type = IIO_TEMP,
                .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
@@ -256,11 +273,6 @@ static int dht11_probe(struct platform_device *pdev)
                dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
                return -EINVAL;
        }
-       ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
-                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                               pdev->name, iio);
-       if (ret)
-               return ret;
 
        dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
        dht11->num_edges = -1;
@@ -268,6 +280,7 @@ static int dht11_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, iio);
 
        init_completion(&dht11->completion);
+       mutex_init(&dht11->lock);
        iio->name = pdev->name;
        iio->dev.parent = &pdev->dev;
        iio->info = &dht11_iio_info;
index b54164677b898d5c9e5b4b9983fe61271f3c80b5..fa3b809aff5efd425ce96031aec1c066b644feb2 100644 (file)
@@ -45,12 +45,12 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
                           struct iio_chan_spec const *chan, int *val,
                           int *val2, long mask)
 {
-       struct i2c_client *client = iio_priv(indio_dev);
+       struct i2c_client **client = iio_priv(indio_dev);
        int ret;
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               ret = i2c_smbus_read_word_data(client,
+               ret = i2c_smbus_read_word_data(*client,
                                               chan->type == IIO_TEMP ?
                                               SI7020CMD_TEMP_HOLD :
                                               SI7020CMD_RH_HOLD);
@@ -126,7 +126,7 @@ static int si7020_probe(struct i2c_client *client,
        /* Wait the maximum power-up time after software reset. */
        msleep(15);
 
-       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client));
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
 
index b70873de04ea50a7cb73f34835f15c43ceff9eb1..fa795dcd5f75ec0a1e8de143bc0122ef36bf9409 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/debugfs.h>
+#include <linux/bitops.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
                mutex_unlock(&indio_dev->mlock);
                if (ret)
                        return ret;
-               val16 = ((val16 & 0xFFF) << 4) >> 4;
+               val16 = sign_extend32(val16, 11);
                *val = val16;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_OFFSET:
index f73e60b7a79611b8825bfe90ea3ba5a5e1b253b1..d8d5bed65e072cae577968edb78e2e592c2a5bfa 100644 (file)
@@ -780,7 +780,11 @@ static int inv_mpu_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, indio_dev);
        indio_dev->dev.parent = &client->dev;
-       indio_dev->name = id->name;
+       /* id will be NULL when enumerated via ACPI */
+       if (id)
+               indio_dev->name = (char *)id->name;
+       else
+               indio_dev->name = (char *)dev_name(&client->dev);
        indio_dev->channels = inv_mpu_channels;
        indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
 
index ae68c64bdad3ef7480d15da5d1187c1fd4f7ba4f..a224afd6380c84eff7c76996c7c38eb621493140 100644 (file)
@@ -73,6 +73,7 @@ config CM36651
 config GP2AP020A00F
        tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
        depends on I2C
+       select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select IRQ_WORK
@@ -126,6 +127,7 @@ config HID_SENSOR_PROX
 config JSA1212
        tristate "JSA1212 ALS and proximity sensor driver"
        depends on I2C
+       select REGMAP_I2C
        help
         Say Y here if you want to build a IIO driver for JSA1212
         proximity & ALS sensor device.
index 4c7a4c52dd06bf7be91a6a1b1d9d56ff7984650c..a5d6de72c523bab274f003cc8431ce05b29630c7 100644 (file)
@@ -18,6 +18,8 @@ config AK8975
 
 config AK09911
        tristate "Asahi Kasei AK09911 3-axis Compass"
+       depends on I2C
+       depends on GPIOLIB
        select AK8975
        help
          Deprecated: AK09911 is now supported by AK8975 driver.
index 56a4b7ca7ee38ba118796d6d5e3eb770bbebc69a..45d67e9228d75b44d71354d248cf97140dfe37ad 100644 (file)
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
        if (!optlen)
                return -EINVAL;
 
+       memset(&sa_path, 0, sizeof(sa_path));
+       sa_path.vlan_id = 0xffff;
+
        ib_sa_unpack_path(path_data->path_rec, &sa_path);
        ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
        if (ret)
index 6095872549e79fb0dd9c0b3702c4eb01e974fb3f..8b8cc6fa0ab0c1ebf966b2ace4932807d9181bf1 100644 (file)
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
        if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
                rbt_ib_umem_insert(&umem->odp_data->interval_tree,
                                   &context->umem_tree);
-       if (likely(!atomic_read(&context->notifier_count)))
+       if (likely(!atomic_read(&context->notifier_count)) ||
+           context->odp_mrs_count == 1)
                umem->odp_data->mn_counters_active = true;
        else
                list_add(&umem->odp_data->no_private_counters,
index 643c08a025a52d015431b8a27be1ddcacbd36845..b716b08156446e186c9ae608f3f4e6343c6f200f 100644 (file)
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
index b7943ff16ed3f2edece8ec4cc3c7931594bd943a..a9f048990dfcd833de09978c0448ad979749e4c9 100644 (file)
@@ -400,6 +400,52 @@ err:
        return ret;
 }
 
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+                                 struct ib_uverbs_query_device_resp *resp,
+                                 struct ib_device_attr *attr)
+{
+       resp->fw_ver            = attr->fw_ver;
+       resp->node_guid         = file->device->ib_dev->node_guid;
+       resp->sys_image_guid    = attr->sys_image_guid;
+       resp->max_mr_size       = attr->max_mr_size;
+       resp->page_size_cap     = attr->page_size_cap;
+       resp->vendor_id         = attr->vendor_id;
+       resp->vendor_part_id    = attr->vendor_part_id;
+       resp->hw_ver            = attr->hw_ver;
+       resp->max_qp            = attr->max_qp;
+       resp->max_qp_wr         = attr->max_qp_wr;
+       resp->device_cap_flags  = attr->device_cap_flags;
+       resp->max_sge           = attr->max_sge;
+       resp->max_sge_rd        = attr->max_sge_rd;
+       resp->max_cq            = attr->max_cq;
+       resp->max_cqe           = attr->max_cqe;
+       resp->max_mr            = attr->max_mr;
+       resp->max_pd            = attr->max_pd;
+       resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
+       resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
+       resp->max_res_rd_atom   = attr->max_res_rd_atom;
+       resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
+       resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
+       resp->atomic_cap                = attr->atomic_cap;
+       resp->max_ee                    = attr->max_ee;
+       resp->max_rdd                   = attr->max_rdd;
+       resp->max_mw                    = attr->max_mw;
+       resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
+       resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
+       resp->max_mcast_grp             = attr->max_mcast_grp;
+       resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
+       resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+       resp->max_ah                    = attr->max_ah;
+       resp->max_fmr                   = attr->max_fmr;
+       resp->max_map_per_fmr           = attr->max_map_per_fmr;
+       resp->max_srq                   = attr->max_srq;
+       resp->max_srq_wr                = attr->max_srq_wr;
+       resp->max_srq_sge               = attr->max_srq_sge;
+       resp->max_pkeys                 = attr->max_pkeys;
+       resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
+       resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+}
+
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                               const char __user *buf,
                               int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                return ret;
 
        memset(&resp, 0, sizeof resp);
-
-       resp.fw_ver                    = attr.fw_ver;
-       resp.node_guid                 = file->device->ib_dev->node_guid;
-       resp.sys_image_guid            = attr.sys_image_guid;
-       resp.max_mr_size               = attr.max_mr_size;
-       resp.page_size_cap             = attr.page_size_cap;
-       resp.vendor_id                 = attr.vendor_id;
-       resp.vendor_part_id            = attr.vendor_part_id;
-       resp.hw_ver                    = attr.hw_ver;
-       resp.max_qp                    = attr.max_qp;
-       resp.max_qp_wr                 = attr.max_qp_wr;
-       resp.device_cap_flags          = attr.device_cap_flags;
-       resp.max_sge                   = attr.max_sge;
-       resp.max_sge_rd                = attr.max_sge_rd;
-       resp.max_cq                    = attr.max_cq;
-       resp.max_cqe                   = attr.max_cqe;
-       resp.max_mr                    = attr.max_mr;
-       resp.max_pd                    = attr.max_pd;
-       resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
-       resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
-       resp.max_res_rd_atom           = attr.max_res_rd_atom;
-       resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
-       resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
-       resp.atomic_cap                = attr.atomic_cap;
-       resp.max_ee                    = attr.max_ee;
-       resp.max_rdd                   = attr.max_rdd;
-       resp.max_mw                    = attr.max_mw;
-       resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
-       resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
-       resp.max_mcast_grp             = attr.max_mcast_grp;
-       resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
-       resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
-       resp.max_ah                    = attr.max_ah;
-       resp.max_fmr                   = attr.max_fmr;
-       resp.max_map_per_fmr           = attr.max_map_per_fmr;
-       resp.max_srq                   = attr.max_srq;
-       resp.max_srq_wr                = attr.max_srq_wr;
-       resp.max_srq_sge               = attr.max_srq_sge;
-       resp.max_pkeys                 = attr.max_pkeys;
-       resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
-       resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+       copy_query_dev_fields(file, &resp, &attr);
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
        if (qp->real_qp == qp) {
                ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
                if (ret)
-                       goto out;
+                       goto release_qp;
                ret = qp->device->modify_qp(qp, attr,
                        modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
        } else {
                ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
        }
 
-       put_qp_read(qp);
-
        if (ret)
-               goto out;
+               goto release_qp;
 
        ret = in_len;
 
+release_qp:
+       put_qp_read(qp);
+
 out:
        kfree(attr);
 
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+                             struct ib_udata *ucore,
+                             struct ib_udata *uhw)
+{
+       struct ib_uverbs_ex_query_device_resp resp;
+       struct ib_uverbs_ex_query_device  cmd;
+       struct ib_device_attr attr;
+       struct ib_device *device;
+       int err;
+
+       device = file->device->ib_dev;
+       if (ucore->inlen < sizeof(cmd))
+               return -EINVAL;
+
+       err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+       if (err)
+               return err;
+
+       if (cmd.comp_mask)
+               return -EINVAL;
+
+       if (cmd.reserved)
+               return -EINVAL;
+
+       resp.response_length = offsetof(typeof(resp), odp_caps);
+
+       if (ucore->outlen < resp.response_length)
+               return -ENOSPC;
+
+       err = device->query_device(device, &attr);
+       if (err)
+               return err;
+
+       copy_query_dev_fields(file, &resp.base, &attr);
+       resp.comp_mask = 0;
+
+       if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
+               goto end;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+       resp.odp_caps.per_transport_caps.rc_odp_caps =
+               attr.odp_caps.per_transport_caps.rc_odp_caps;
+       resp.odp_caps.per_transport_caps.uc_odp_caps =
+               attr.odp_caps.per_transport_caps.uc_odp_caps;
+       resp.odp_caps.per_transport_caps.ud_odp_caps =
+               attr.odp_caps.per_transport_caps.ud_odp_caps;
+       resp.odp_caps.reserved = 0;
+#else
+       memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
+#endif
+       resp.response_length += sizeof(resp.odp_caps);
+
+end:
+       err = ib_copy_to_udata(ucore, &resp, resp.response_length);
+       if (err)
+               return err;
+
+       return 0;
+}
index 5db1a8cc388da0c5de517bf69b3d8136b94a1bbf..259dcc7779f5e01bc95b66ca90e64d20f7c94087 100644 (file)
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
+       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device,
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index 794555dc86a598a78125edc38a01299157e9caeb..bdfac2ccb704ab43403a5245448ef1184143e738 100644 (file)
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
        struct c4iw_cq *chp;
        unsigned long flag;
 
+       spin_lock_irqsave(&dev->lock, flag);
        chp = get_chp(dev, qid);
        if (chp) {
+               atomic_inc(&chp->refcnt);
+               spin_unlock_irqrestore(&dev->lock, flag);
                t4_clear_cq_armed(&chp->cq);
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-       } else
+               if (atomic_dec_and_test(&chp->refcnt))
+                       wake_up(&chp->wait);
+       } else {
                PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+               spin_unlock_irqrestore(&dev->lock, flag);
+       }
        return 0;
 }
index b5678ac97393ab94ba73b6b1f396ae801801c480..d87e1650f6437835f3660c21d3a59ec920fa8f7c 100644 (file)
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
        return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
-#define C4IW_WR_TO (30*HZ)
+#define C4IW_WR_TO (60*HZ)
 
 struct c4iw_wr_wait {
        struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
                                 u32 hwtid, u32 qpid,
                                 const char *func)
 {
-       unsigned to = C4IW_WR_TO;
        int ret;
 
-       do {
-               ret = wait_for_completion_timeout(&wr_waitp->completion, to);
-               if (!ret) {
-                       printk(KERN_ERR MOD "%s - Device %s not responding - "
-                              "tid %u qpid %u\n", func,
-                              pci_name(rdev->lldi.pdev), hwtid, qpid);
-                       if (c4iw_fatal_error(rdev)) {
-                               wr_waitp->ret = -EIO;
-                               break;
-                       }
-                       to = to << 2;
-               }
-       } while (!ret);
+       if (c4iw_fatal_error(rdev)) {
+               wr_waitp->ret = -EIO;
+               goto out;
+       }
+
+       ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
+       if (!ret) {
+               PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+                    func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+               rdev->flags |= T4_FATAL_ERROR;
+               wr_waitp->ret = -EIO;
+       }
+out:
        if (wr_waitp->ret)
                PDBG("%s: FW reply %d tid %u qpid %u\n",
                     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
index 4977082e081f2303542d34ef230c5d199b2b2f31..33c45dfcbd88cb11a05c637e22159cc34008a718 100644 (file)
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!(d_unhashed(tmp) && tmp->d_inode)) {
+       if (!d_unhashed(tmp) && tmp->d_inode) {
                dget_dlock(tmp);
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
index 6559af60bffd62fbf162320379ff545ca4c974fc..e08db7020cd4939809dd456882ff3d4e69480480 100644 (file)
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
 /* clean up any chip type-specific stuff */
 void ipath_chip_done(void);
 
-/* check to see if we have to force ordering for write combining */
-int ipath_unordered_wc(void);
-
 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
                          unsigned cnt);
 void ipath_cancel_sends(struct ipath_devdata *, int);
index 1d7bd82a1fb1fa3ffe8c4cebf08a3cd1b57529ce..1a7e20a75149ce6a6bab980b466585784595c0f1 100644 (file)
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
 {
        return 0;
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int ipath_unordered_wc(void)
-{
-       return 1;
-}
index 3428acb0868c202383304105546fc023d3a0917e..4ad0b932df1fab1c1897f144db9ffc8af35c5f73 100644 (file)
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
                dd->ipath_wc_cookie = 0; /* even on failure */
        }
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering.  Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int ipath_unordered_wc(void)
-{
-       return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
index 56a593e0ae5d1f537db0f3615ee29eb99b0defc2..39a488889fc7a9981213b25567f051dc772bc510 100644 (file)
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
                *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
                if (*slave < 0) {
                        mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
-                                       gid.global.interface_id);
+                                    be64_to_cpu(gid.global.interface_id));
                        return -ENOENT;
                }
                return 0;
index 543ecdd8667bad824fa3313a5b45be9693a5fc69..0176caa5792c4576276470c2c3f86f0fca16a7bd 100644 (file)
@@ -369,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int err;
 
        mutex_lock(&cq->resize_mutex);
-
-       if (entries < 1) {
+       if (entries < 1 || entries > dev->dev->caps.max_cqes) {
                err = -EINVAL;
                goto out;
        }
@@ -381,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                goto out;
        }
 
-       if (entries > dev->dev->caps.max_cqes) {
+       if (entries > dev->dev->caps.max_cqes + 1) {
                err = -EINVAL;
                goto out;
        }
@@ -394,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                /* Can't be smaller than the number of outstanding CQEs */
                outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
                if (entries < outst_cqe + 1) {
-                       err = 0;
+                       err = -EINVAL;
                        goto out;
                }
 
index c7619716c31dd92cb5ddad4f77d98528392698b5..59040265e3614a23fc8508e88dd5b2138e7107e8 100644 (file)
@@ -64,6 +64,14 @@ enum {
 #define GUID_TBL_BLK_NUM_ENTRIES 8
 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
 
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do {\
+       if ((value) > U32_MAX)                   \
+               counter = cpu_to_be32(U32_MAX); \
+       else                                     \
+               counter = cpu_to_be32(value);    \
+} while (0)
+
 struct mlx4_mad_rcv_buf {
        struct ib_grh grh;
        u8 payload[256];
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 static void edit_counter(struct mlx4_counter *cnt,
                                        struct ib_pma_portcounters *pma_cnt)
 {
-       pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
-       pma_cnt->port_rcv_data  = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
-       pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
-       pma_cnt->port_rcv_packets  = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
+       ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+                            (be64_to_cpu(cnt->tx_bytes) >> 2));
+       ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+                            (be64_to_cpu(cnt->rx_bytes) >> 2));
+       ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+                            be64_to_cpu(cnt->tx_frames));
+       ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+                            be64_to_cpu(cnt->rx_frames));
 }
 
 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
index eb8e215f1613ee95ae7fb6253ba6b06220f1b00c..b972c0b41799b51e2f554c1abc703d78e0c4636e 100644 (file)
@@ -1269,8 +1269,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
        struct mlx4_ib_steering *ib_steering = NULL;
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
        struct mlx4_flow_reg_id reg_id;
 
        if (mdev->dev->caps.steering_mode ==
@@ -1284,8 +1283,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                                    !!(mqp->flags &
                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
                                    prot, &reg_id.id);
-       if (err)
+       if (err) {
+               pr_err("multicast attach op failed, err %d\n", err);
                goto err_malloc;
+       }
 
        reg_id.mirror = 0;
        if (mlx4_is_bonded(dev)) {
@@ -1348,9 +1349,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct net_device *ndev;
        struct mlx4_ib_gid_entry *ge;
        struct mlx4_flow_reg_id reg_id = {0, 0};
-
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -2698,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
        spin_lock_bh(&ibdev->iboe.lock);
        for (i = 0; i < MLX4_MAX_PORTS; ++i) {
                struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+               enum ib_port_state curr_port_state;
+
+               if (!curr_netdev)
+                       continue;
 
-               enum ib_port_state curr_port_state =
+               curr_port_state =
                        (netif_running(curr_netdev) &&
                         netif_carrier_ok(curr_netdev)) ?
                        IB_PORT_ACTIVE : IB_PORT_DOWN;
index dfc6ca128a7e355ef737976dceee50a7033e62b7..ed2bd6701f9b131c3dc3261cb2eae21a2d835524 100644 (file)
@@ -1696,8 +1696,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
-                               if (err)
-                                       return -EINVAL;
+                               if (err) {
+                                       err = -EINVAL;
+                                       goto out;
+                               }
                                if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
                                        dev->qp1_proxy[qp->port - 1] = qp;
                        }
index 03bf81211a5401c366522c68213ecf27cdf4b326..cc4ac1e583b29725af01e03e40bebaee758c6e01 100644 (file)
@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
        struct mlx5_general_caps *gen;
-       int err = 0;
+       int err = -ENOMEM;
        int port;
 
        gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+       dev->ib_dev.uverbs_ex_cmd_mask =
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 32a28bd50b20ae08c41a9a7086b72045f7934c3d..cd9822eeacae3f1ab138731ea9c6a67963974bc2 100644 (file)
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
                goto err_2;
        }
        mr->umem = umem;
+       mr->dev = dev;
        mr->live = 1;
        kvfree(in);
 
index b43456ae124bccb99cfe78446b1e1ce347c8f2a6..c9780d919769a6ef9a0020b7e710afa7e7ce2497 100644 (file)
@@ -40,7 +40,7 @@
 #include <be_roce.h>
 #include "ocrdma_sli.h"
 
-#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
 
 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+#define EQ_INTR_PER_SEC_THRSH_HI 150000
+#define EQ_INTR_PER_SEC_THRSH_LOW 100000
+#define EQ_AIC_MAX_EQD 20
+#define EQ_AIC_MIN_EQD 0
+
+void ocrdma_eqd_set_task(struct work_struct *work);
 
 struct ocrdma_dev_attr {
        u8 fw_ver[32];
        u32 vendor_id;
        u32 device_id;
        u16 max_pd;
+       u16 max_dpp_pds;
        u16 max_cq;
        u16 max_cqe;
        u16 max_qp;
@@ -116,12 +123,19 @@ struct ocrdma_queue_info {
        bool created;
 };
 
+struct ocrdma_aic_obj {         /* Adaptive interrupt coalescing (AIC) info */
+       u32 prev_eqd;
+       u64 eq_intr_cnt;
+       u64 prev_eq_intr_cnt;
+};
+
 struct ocrdma_eq {
        struct ocrdma_queue_info q;
        u32 vector;
        int cq_cnt;
        struct ocrdma_dev *dev;
        char irq_name[32];
+       struct ocrdma_aic_obj aic_obj;
 };
 
 struct ocrdma_mq {
@@ -171,6 +185,21 @@ struct ocrdma_stats {
        struct ocrdma_dev *dev;
 };
 
+struct ocrdma_pd_resource_mgr {
+       u32 pd_norm_start;
+       u16 pd_norm_count;
+       u16 pd_norm_thrsh;
+       u16 max_normal_pd;
+       u32 pd_dpp_start;
+       u16 pd_dpp_count;
+       u16 pd_dpp_thrsh;
+       u16 max_dpp_pd;
+       u16 dpp_page_index;
+       unsigned long *pd_norm_bitmap;
+       unsigned long *pd_dpp_bitmap;
+       bool pd_prealloc_valid;
+};
+
 struct stats_mem {
        struct ocrdma_mqe mqe;
        void *va;
@@ -198,6 +227,7 @@ struct ocrdma_dev {
 
        struct ocrdma_eq *eq_tbl;
        int eq_cnt;
+       struct delayed_work eqd_work;
        u16 base_eqid;
        u16 max_eq;
 
@@ -255,7 +285,12 @@ struct ocrdma_dev {
        struct ocrdma_stats rx_qp_err_stats;
        struct ocrdma_stats tx_dbg_stats;
        struct ocrdma_stats rx_dbg_stats;
+       struct ocrdma_stats driver_stats;
+       struct ocrdma_stats reset_stats;
        struct dentry *dir;
+       atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
+       atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
+       struct ocrdma_pd_resource_mgr *pd_mgr;
 };
 
 struct ocrdma_cq {
@@ -335,7 +370,6 @@ struct ocrdma_srq {
 
 struct ocrdma_qp {
        struct ib_qp ibqp;
-       struct ocrdma_dev *dev;
 
        u8 __iomem *sq_db;
        struct ocrdma_qp_hwq_info sq;
index f3cc8c9e65ae70f9e0b157632189e324970cbea5..d812904f398473d1502bb979d6d822c04b55f2b8 100644 (file)
 #include <net/netevent.h>
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
 
 #include "ocrdma.h"
 #include "ocrdma_verbs.h"
 #include "ocrdma_ah.h"
 #include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
 
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-                       struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
+                       struct ib_ah_attr *attr, union ib_gid *sgid,
+                       int pdid, bool *isvlan)
 {
        int status = 0;
-       u16 vlan_tag; bool vlan_enabled = false;
+       u16 vlan_tag;
        struct ocrdma_eth_vlan eth;
        struct ocrdma_grh grh;
        int eth_sz;
@@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
                eth.vlan_tag = cpu_to_be16(vlan_tag);
                eth_sz = sizeof(struct ocrdma_eth_vlan);
-               vlan_enabled = true;
+               *isvlan = true;
        } else {
                eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
        memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
-       if (vlan_enabled)
+       if (*isvlan)
                ah->av->valid |= OCRDMA_AV_VLAN_VALID;
        ah->av->valid = cpu_to_le32(ah->av->valid);
        return status;
@@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
 {
        u32 *ahid_addr;
+       bool isvlan = false;
        int status;
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                }
        }
 
-       status = set_av_attr(dev, ah, attr, &sgid, pd->id);
+       status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
        if (status)
                goto av_conf_err;
 
        /* if pd is for the user process, pass the ah_id to user space */
        if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
                ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
-               *ahid_addr = ah->id;
+               *ahid_addr = 0;
+               *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+               if (isvlan)
+                       *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
+                                      OCRDMA_AH_VLAN_VALID_SHIFT);
        }
+
        return &ah->ibah;
 
 av_conf_err:
@@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
                       struct ib_grh *in_grh,
                       struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-       return IB_MAD_RESULT_SUCCESS;
+       int status;
+       struct ocrdma_dev *dev;
+
+       switch (in_mad->mad_hdr.mgmt_class) {
+       case IB_MGMT_CLASS_PERF_MGMT:
+               dev = get_ocrdma_dev(ibdev);
+               if (!ocrdma_pma_counters(dev, out_mad))
+                       status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+               else
+                       status = IB_MAD_RESULT_SUCCESS;
+               break;
+       default:
+               status = IB_MAD_RESULT_SUCCESS;
+               break;
+       }
+       return status;
 }
index 8ac49e7f96d1585c7fb94049dd7db88a0699b7f4..726a87cf22dcb215d2a105f08a054395bb0c6804 100644 (file)
 #ifndef __OCRDMA_AH_H__
 #define __OCRDMA_AH_H__
 
+enum {
+       OCRDMA_AH_ID_MASK               = 0x3FF,
+       OCRDMA_AH_VLAN_VALID_MASK       = 0x01,
+       OCRDMA_AH_VLAN_VALID_SHIFT      = 0x1F
+};
+
 struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
 int ocrdma_destroy_ah(struct ib_ah *);
 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
index 638bff1ffc6c73b95a41a1556ff42a06680d6bca..0c9e95909a64651e931f4768e88f97e266c8379e 100644 (file)
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
                break;
        }
 
+       if (type < OCRDMA_MAX_ASYNC_ERRORS)
+               atomic_inc(&dev->async_err_stats[type]);
+
        if (qp_event) {
                if (qp->ibqp.event_handler)
                        qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
        return 0;
 }
 
-static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
-                                      struct ocrdma_cq *cq)
+static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+                               struct ocrdma_cq *cq, bool sq)
 {
-       unsigned long flags;
        struct ocrdma_qp *qp;
-       bool buddy_cq_found = false;
-       /* Go through list of QPs in error state which are using this CQ
-        * and invoke its callback handler to trigger CQE processing for
-        * error/flushed CQE. It is rare to find more than few entries in
-        * this list as most consumers stops after getting error CQE.
-        * List is traversed only once when a matching buddy cq found for a QP.
-        */
-       spin_lock_irqsave(&dev->flush_q_lock, flags);
-       list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+       struct list_head *cur;
+       struct ocrdma_cq *bcq = NULL;
+       struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
+
+       list_for_each(cur, head) {
+               if (sq)
+                       qp = list_entry(cur, struct ocrdma_qp, sq_entry);
+               else
+                       qp = list_entry(cur, struct ocrdma_qp, rq_entry);
+
                if (qp->srq)
                        continue;
                /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
                 * if completion came on rq, sq's cq is buddy cq.
                 */
                if (qp->sq_cq == cq)
-                       cq = qp->rq_cq;
+                       bcq = qp->rq_cq;
                else
-                       cq = qp->sq_cq;
-               buddy_cq_found = true;
-               break;
+                       bcq = qp->sq_cq;
+               return bcq;
        }
+       return NULL;
+}
+
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+                                      struct ocrdma_cq *cq)
+{
+       unsigned long flags;
+       struct ocrdma_cq *bcq = NULL;
+
+       /* Go through list of QPs in error state which are using this CQ
+        * and invoke its callback handler to trigger CQE processing for
+        * error/flushed CQE. It is rare to find more than few entries in
+        * this list as most consumers stops after getting error CQE.
+        * List is traversed only once when a matching buddy cq found for a QP.
+        */
+       spin_lock_irqsave(&dev->flush_q_lock, flags);
+       /* Check if buddy CQ is present.
+        * true - Check for  SQ CQ
+        * false - Check for RQ CQ
+        */
+       bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
+       if (bcq == NULL)
+               bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
        spin_unlock_irqrestore(&dev->flush_q_lock, flags);
-       if (buddy_cq_found == false)
-               return;
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
-               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+
+       /* if there is valid buddy cq, look for its completion handler */
+       if (bcq && bcq->ibcq.comp_handler) {
+               spin_lock_irqsave(&bcq->comp_handler_lock, flags);
+               (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
+               spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
        }
 }
 
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
 
        } while (budget);
 
+       eq->aic_obj.eq_intr_cnt++;
        ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
        return IRQ_HANDLED;
 }
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+       attr->max_dpp_pds =
+          (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
        attr->max_qp =
            (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
        return status;
 }
 
+
+static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
+{
+       int status = -ENOMEM;
+       size_t pd_bitmap_size;
+       struct ocrdma_alloc_pd_range *cmd;
+       struct ocrdma_alloc_pd_range_rsp *rsp;
+
+       /* Pre allocate the DPP PDs */
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               return -ENOMEM;
+       cmd->pd_count = dev->attr.max_dpp_pds;
+       cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+       if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
+               dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+                               OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+               dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+                               OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+               dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+               dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
+                                                    GFP_KERNEL);
+       }
+       kfree(cmd);
+
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+       if (rsp->pd_count) {
+               dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
+                                       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+               dev->pd_mgr->max_normal_pd = rsp->pd_count;
+               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+               dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
+                                                     GFP_KERNEL);
+       }
+
+       if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
+               /* Enable PD resource manager */
+               dev->pd_mgr->pd_prealloc_valid = true;
+       } else {
+               return -ENOMEM;
+       }
+mbx_err:
+       kfree(cmd);
+       return status;
+}
+
+static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
+{
+       struct ocrdma_dealloc_pd_range *cmd;
+
+       /* return normal PDs to firmware */
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               goto mbx_err;
+
+       if (dev->pd_mgr->max_normal_pd) {
+               cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
+               cmd->pd_count = dev->pd_mgr->max_normal_pd;
+               ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       }
+
+       if (dev->pd_mgr->max_dpp_pd) {
+               kfree(cmd);
+               /* return DPP PDs to firmware */
+               cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
+                                         sizeof(*cmd));
+               if (!cmd)
+                       goto mbx_err;
+
+               cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
+               cmd->pd_count = dev->pd_mgr->max_dpp_pd;
+               ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       }
+mbx_err:
+       kfree(cmd);
+}
+
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
+{
+       int status;
+
+       dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
+                             GFP_KERNEL);
+       if (!dev->pd_mgr) {
+               pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+               return;
+       }
+       status = ocrdma_mbx_alloc_pd_range(dev);
+       if (status) {
+               pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
+                        __func__, dev->id);
+       }
+}
+
+static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
+{
+       ocrdma_mbx_dealloc_pd_range(dev);
+       kfree(dev->pd_mgr->pd_norm_bitmap);
+       kfree(dev->pd_mgr->pd_dpp_bitmap);
+       kfree(dev->pd_mgr);
+}
+
 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
                               int *num_pages, int *page_size)
 {
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
 {
        bool found;
        unsigned long flags;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
-       spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
+       spin_lock_irqsave(&dev->flush_q_lock, flags);
        found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
        if (!found)
                list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
                if (!found)
                        list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
        }
-       spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
+       spin_unlock_irqrestore(&dev->flush_q_lock, flags);
 }
 
 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
        int status;
        u32 len, hw_pages, hw_page_size;
        dma_addr_t pa;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_wqe_allocated;
        u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
        int status;
        u32 len, hw_pages, hw_page_size;
        dma_addr_t pa = 0;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
 
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
                                        struct ocrdma_qp *qp)
 {
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        dma_addr_t pa = 0;
        int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
 {
        int status = -ENOMEM;
        u32 flags = 0;
-       struct ocrdma_dev *dev = qp->dev;
        struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        struct ocrdma_cq *cq;
        struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        union ib_gid sgid, zgid;
        u32 vlan_id;
        u8 mac_addr[6];
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
                return -EINVAL;
-       if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
-               ocrdma_init_service_level(qp->dev);
+       if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+               ocrdma_init_service_level(dev);
        cmd->params.tclass_sq_psn |=
            (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
        cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
        memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
               sizeof(cmd->params.dgid));
-       status = ocrdma_query_gid(&qp->dev->ibdev, 1,
+       status = ocrdma_query_gid(&dev->ibdev, 1,
                        ah_attr->grh.sgid_index, &sgid);
        if (status)
                return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
 
        qp->sgid_idx = ah_attr->grh.sgid_index;
        memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
-       ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
+       status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
+       if (status)
+               return status;
        cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
                                (mac_addr[2] << 16) | (mac_addr[3] << 24);
        /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
                    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
                cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
                cmd->params.rnt_rc_sl_fl |=
-                       (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
+                       (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
        }
        return 0;
 }
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                                struct ib_qp_attr *attrs, int attr_mask)
 {
        int status = 0;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                        return status;
        } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
                /* set the default mac address for UD, GSI QPs */
-               cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
-                       (qp->dev->nic_info.mac_addr[1] << 8) |
-                       (qp->dev->nic_info.mac_addr[2] << 16) |
-                       (qp->dev->nic_info.mac_addr[3] << 24);
-               cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
-                                       (qp->dev->nic_info.mac_addr[5] << 8);
+               cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
+                       (dev->nic_info.mac_addr[1] << 8) |
+                       (dev->nic_info.mac_addr[2] << 16) |
+                       (dev->nic_info.mac_addr[3] << 24);
+               cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
+                                       (dev->nic_info.mac_addr[5] << 8);
        }
        if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
            attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
        }
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-               if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
+               if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
        }
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-               if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
+               if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -2870,6 +3023,82 @@ done:
        return status;
 }
 
+static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+                                int num)
+{
+       int i, status = -ENOMEM;
+       struct ocrdma_modify_eqd_req *cmd;
+
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
+       if (!cmd)
+               return status;
+
+       ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
+                       OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+       cmd->cmd.num_eq = num;
+       for (i = 0; i < num; i++) {
+               cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
+               cmd->cmd.set_eqd[i].phase = 0;
+               cmd->cmd.set_eqd[i].delay_multiplier =
+                               (eq[i].aic_obj.prev_eqd * 65)/100;
+       }
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+mbx_err:
+       kfree(cmd);
+       return status;
+}
+
+static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+                            int num)
+{
+       int num_eqs, i = 0;
+       if (num > 8) {
+               while (num) {
+                       num_eqs = min(num, 8);
+                       ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
+                       i += num_eqs;
+                       num -= num_eqs;
+               }
+       } else {
+               ocrdma_mbx_modify_eqd(dev, eq, num);
+       }
+       return 0;
+}
+
+void ocrdma_eqd_set_task(struct work_struct *work)
+{
+       struct ocrdma_dev *dev =
+               container_of(work, struct ocrdma_dev, eqd_work.work);
+       struct ocrdma_eq *eq = 0;
+       int i, num = 0, status = -EINVAL;
+       u64 eq_intr;
+
+       for (i = 0; i < dev->eq_cnt; i++) {
+               eq = &dev->eq_tbl[i];
+               if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
+                       eq_intr = eq->aic_obj.eq_intr_cnt -
+                                 eq->aic_obj.prev_eq_intr_cnt;
+                       if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
+                           (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
+                               eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
+                               num++;
+                       } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
+                                  (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
+                               eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
+                               num++;
+                       }
+               }
+               eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
+       }
+
+       if (num)
+               status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+       schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
+}
+
 int ocrdma_init_hw(struct ocrdma_dev *dev)
 {
        int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
 
 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
 {
+       ocrdma_free_pd_pool(dev);
        ocrdma_mbx_delete_ah_tbl(dev);
 
        /* cleanup the eqs */
index 6eed8f191322a134fc0dcd1438cf771525c06a18..e905972fceb7d48ff882800390c1330367815caf 100644 (file)
@@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
 char *port_speed_string(struct ocrdma_dev *dev);
 void ocrdma_init_service_level(struct ocrdma_dev *);
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
+void ocrdma_free_pd_range(struct ocrdma_dev *dev);
 
 #endif                         /* __OCRDMA_HW_H__ */
index b0b2257b8e0430738cc7b5f5f36145868c7d4dc3..7a2b59aca004bfac1eae4fc258fcb08d077bf449 100644 (file)
@@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
 
        dev->ibdev.node_type = RDMA_NODE_IB_CA;
        dev->ibdev.phys_port_cnt = 1;
-       dev->ibdev.num_comp_vectors = 1;
+       dev->ibdev.num_comp_vectors = dev->eq_cnt;
 
        /* mandatory verbs. */
        dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
        if (dev->stag_arr == NULL)
                goto alloc_err;
 
+       ocrdma_alloc_pd_pool(dev);
+
        spin_lock_init(&dev->av_tbl.lock);
        spin_lock_init(&dev->flush_q_lock);
        return 0;
@@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
        spin_unlock(&ocrdma_devlist_lock);
        /* Init stats */
        ocrdma_add_port_stats(dev);
+       /* Interrupt Moderation */
+       INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
+       schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
 
        pr_info("%s %s: %s \"%s\" port %d\n",
                dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
        /* first unregister with stack to stop all the active traffic
         * of the registered clients.
         */
-       ocrdma_rem_port_stats(dev);
+       cancel_delayed_work_sync(&dev->eqd_work);
        ocrdma_remove_sysfiles(dev);
-
        ib_unregister_device(&dev->ibdev);
 
+       ocrdma_rem_port_stats(dev);
+
        spin_lock(&ocrdma_devlist_lock);
        list_del_rcu(&dev->entry);
        spin_unlock(&ocrdma_devlist_lock);
index 4e036480c1a8fa7d9f8d1be9e2267ea3147b2b5a..243c87c8bd65d09026f46ee12e3ee3b9109ce155 100644 (file)
@@ -75,6 +75,8 @@ enum {
        OCRDMA_CMD_DESTROY_RBQ = 26,
 
        OCRDMA_CMD_GET_RDMA_STATS = 27,
+       OCRDMA_CMD_ALLOC_PD_RANGE = 28,
+       OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
 
        OCRDMA_CMD_MAX
 };
@@ -87,6 +89,7 @@ enum {
        OCRDMA_CMD_CREATE_MQ            = 21,
        OCRDMA_CMD_GET_CTRL_ATTRIBUTES  = 32,
        OCRDMA_CMD_GET_FW_VER           = 35,
+       OCRDMA_CMD_MODIFY_EQ_DELAY      = 41,
        OCRDMA_CMD_DELETE_MQ            = 53,
        OCRDMA_CMD_DELETE_CQ            = 54,
        OCRDMA_CMD_DELETE_EQ            = 55,
@@ -101,7 +104,7 @@ enum {
        QTYPE_MCCQ      = 3
 };
 
-#define OCRDMA_MAX_SGID                8
+#define OCRDMA_MAX_SGID                16
 
 #define OCRDMA_MAX_QP    2048
 #define OCRDMA_MAX_CQ    2048
@@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
 
 #define OCRDMA_EQ_MINOR_OTHER  0x1
 
+struct ocrmda_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+};
+
+struct ocrdma_modify_eqd_cmd {
+       struct ocrdma_mbx_hdr req;
+       u32 num_eq;
+       struct ocrmda_set_eqd set_eqd[8];
+} __packed;
+
+struct ocrdma_modify_eqd_req {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_modify_eqd_cmd cmd;
+};
+
+
+struct ocrdma_modify_eq_delay_rsp {
+       struct ocrdma_mbx_rsp hdr;
+       u32 rsvd0;
+} __packed;
+
 enum {
        OCRDMA_MCQE_STATUS_SHIFT        = 0,
        OCRDMA_MCQE_STATUS_MASK         = 0xFFFF,
@@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
        OCRDMA_DEVICE_FATAL_EVENT       = 0x08,
        OCRDMA_SRQCAT_ERROR             = 0x0E,
        OCRDMA_SRQ_LIMIT_EVENT          = 0x0F,
-       OCRDMA_QP_LAST_WQE_EVENT        = 0x10
+       OCRDMA_QP_LAST_WQE_EVENT        = 0x10,
+
+       OCRDMA_MAX_ASYNC_ERRORS
 };
 
 /* mailbox command request and responses */
@@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
        struct ocrdma_mbx_rsp rsp;
 };
 
+struct ocrdma_alloc_pd_range {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 enable_dpp_rsvd;
+       u32 pd_count;
+};
+
+struct ocrdma_alloc_pd_range_rsp {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_rsp rsp;
+       u32 dpp_page_pdid;
+       u32 pd_count;
+};
+
+enum {
+       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
+};
+
+struct ocrdma_dealloc_pd_range {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 start_pd_id;
+       u32 pd_count;
+};
+
+struct ocrdma_dealloc_pd_range_rsp {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 rsvd;
+};
+
 enum {
        OCRDMA_ADDR_CHECK_ENABLE        = 1,
        OCRDMA_ADDR_CHECK_DISABLE       = 0
@@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
        OCRDMA_CQE_INV_EEC_STATE_ERR,
        OCRDMA_CQE_FATAL_ERR,
        OCRDMA_CQE_RESP_TIMEOUT_ERR,
-       OCRDMA_CQE_GENERAL_ERR
+       OCRDMA_CQE_GENERAL_ERR,
+
+       OCRDMA_MAX_CQE_ERR
 };
 
 enum {
@@ -1673,6 +1734,7 @@ enum {
        OCRDMA_FLAG_FENCE_R     = 0x8,
        OCRDMA_FLAG_SOLICIT     = 0x10,
        OCRDMA_FLAG_IMM         = 0x20,
+       OCRDMA_FLAG_AH_VLAN_PR  = 0x40,
 
        /* Stag flags */
        OCRDMA_LKEY_FLAG_LOCAL_WR       = 0x1,
index 41a9aec9998d103f81dc325129cdab47bd20e2ff..48d7ef51aa0c209678e0ed4bbe97bc5ff9a881d5 100644 (file)
@@ -26,6 +26,7 @@
  *******************************************************************/
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_pma.h>
 #include "ocrdma_stats.h"
 
 static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
        return stats;
 }
 
+static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+       return convert_to_64bit(rx_stats->roce_frames_lo,
+               rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
+               + (u64)rx_stats->roce_frame_payload_len_drops;
+}
+
+static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+       return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+               rx_stats->roce_frame_bytes_hi))/4;
+}
+
 static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
 {
        char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
        return stats;
 }
 
+static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+       return (convert_to_64bit(tx_stats->send_pkts_lo,
+                                tx_stats->send_pkts_hi) +
+       convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
+       convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
+       convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+                        tx_stats->read_rsp_pkts_hi) +
+       convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
+}
+
+static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+       return (convert_to_64bit(tx_stats->send_bytes_lo,
+                                tx_stats->send_bytes_hi) +
+               convert_to_64bit(tx_stats->write_bytes_lo,
+                                tx_stats->write_bytes_hi) +
+               convert_to_64bit(tx_stats->read_req_bytes_lo,
+                                tx_stats->read_req_bytes_hi) +
+               convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+                                tx_stats->read_rsp_bytes_hi))/4;
+}
+
 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
 {
        char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
        return dev->stats_mem.debugfs_mem;
 }
 
+static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
+{
+       char *stats = dev->stats_mem.debugfs_mem, *pcur;
+
+
+       memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+       pcur = stats;
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
+                               (u64)(dev->async_err_stats
+                               [OCRDMA_CQ_ERROR].counter));
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_CQ_OVERRUN_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_CQ_QPCAT_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_ACCESS_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_COMM_EST_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SQ_DRAINED_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_DEVICE_FATAL_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SRQCAT_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SRQ_LIMIT_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_LAST_WQE_EVENT].counter);
+
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_LEN_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_PROT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_WR_FLUSH_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_MW_BIND_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_BAD_RESP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_ACCESS_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RETRY_EXC_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_ABORT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_INV_EECN_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_FATAL_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_GENERAL_ERR].counter);
+       return stats;
+}
+
 static void ocrdma_update_stats(struct ocrdma_dev *dev)
 {
        ulong now = jiffies, secs;
        int status = 0;
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+                     (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
 
        secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
        if (secs) {
@@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
                if (status)
                        pr_err("%s: stats mbox failed with status = %d\n",
                               __func__, status);
+               /* Update PD counters from PD resource manager */
+               if (dev->pd_mgr->pd_prealloc_valid) {
+                       rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
+                       rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
+                       /* Threshold stata*/
+                       rsrc_stats = &rdma_stats->th_rsrc_stats;
+                       rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
+                       rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
+               }
                dev->last_stats_time = jiffies;
        }
 }
 
+static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
+                                       const char __user *buffer,
+                                       size_t count, loff_t *ppos)
+{
+       char tmp_str[32];
+       long reset;
+       int status = 0;
+       struct ocrdma_stats *pstats = filp->private_data;
+       struct ocrdma_dev *dev = pstats->dev;
+
+       if (count > 32)
+               goto err;
+
+       if (copy_from_user(tmp_str, buffer, count))
+               goto err;
+
+       tmp_str[count-1] = '\0';
+       if (kstrtol(tmp_str, 10, &reset))
+               goto err;
+
+       switch (pstats->type) {
+       case OCRDMA_RESET_STATS:
+               if (reset) {
+                       status = ocrdma_mbx_rdma_stats(dev, true);
+                       if (status) {
+                               pr_err("Failed to reset stats = %d", status);
+                               goto err;
+                       }
+               }
+               break;
+       default:
+               goto err;
+       }
+
+       return count;
+err:
+       return -EFAULT;
+}
+
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+                       struct ib_mad *out_mad)
+{
+       struct ib_pma_portcounters *pma_cnt;
+
+       memset(out_mad->data, 0, sizeof out_mad->data);
+       pma_cnt = (void *)(out_mad->data + 40);
+       ocrdma_update_stats(dev);
+
+       pma_cnt->port_xmit_data    = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
+       pma_cnt->port_rcv_data     = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
+       pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
+       pma_cnt->port_rcv_packets  = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
+       return 0;
+}
+
 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
                                        size_t usr_buf_len, loff_t *ppos)
 {
@@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
        case OCRDMA_RX_DBG_STATS:
                data = ocrdma_rx_dbg_stats(dev);
                break;
+       case OCRDMA_DRV_STATS:
+               data = ocrdma_driver_dbg_stats(dev);
+               break;
 
        default:
                status = -EFAULT;
@@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
        .owner = THIS_MODULE,
        .open = simple_open,
        .read = ocrdma_dbgfs_ops_read,
+       .write = ocrdma_dbgfs_ops_write,
 };
 
 void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
                                 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
                goto err;
 
+       dev->driver_stats.type = OCRDMA_DRV_STATS;
+       dev->driver_stats.dev = dev;
+       if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+                                       &dev->driver_stats, &ocrdma_dbg_ops))
+               goto err;
+
+       dev->reset_stats.type = OCRDMA_RESET_STATS;
+       dev->reset_stats.dev = dev;
+       if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+                               &dev->reset_stats, &ocrdma_dbg_ops))
+               goto err;
+
        /* Now create dma_mem for stats mbx command */
        if (!ocrdma_alloc_stats_mem(dev))
                goto err;
index 5f5e20c46d7ccdc9fc02aa76084b971ad4bad4c7..091edd68a8a34678e5283b2374c58274c84580b3 100644 (file)
@@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
        OCRDMA_RXQP_ERRSTATS,
        OCRDMA_TXQP_ERRSTATS,
        OCRDMA_TX_DBG_STATS,
-       OCRDMA_RX_DBG_STATS
+       OCRDMA_RX_DBG_STATS,
+       OCRDMA_DRV_STATS,
+       OCRDMA_RESET_STATS
 };
 
 void ocrdma_rem_debugfs(void);
 void ocrdma_init_debugfs(void);
 void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
 void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+                       struct ib_mad *out_mad);
 
 #endif /* __OCRDMA_STATS_H__ */
index fb8d8c4dfbb97d2b36abdf69888793741aba182a..877175563634df79a889ed8a428405258b9df1e4 100644 (file)
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
 
        dev = get_ocrdma_dev(ibdev);
        memset(sgid, 0, sizeof(*sgid));
-       if (index > OCRDMA_MAX_SGID)
+       if (index >= OCRDMA_MAX_SGID)
                return -EINVAL;
 
        memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
        return found;
 }
 
+
+static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
+{
+       u16 pd_bitmap_idx = 0;
+       const unsigned long *pd_bitmap;
+
+       if (dpp_pool) {
+               pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
+               pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+                                                   dev->pd_mgr->max_dpp_pd);
+               __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+               dev->pd_mgr->pd_dpp_count++;
+               if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
+                       dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
+       } else {
+               pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
+               pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+                                                   dev->pd_mgr->max_normal_pd);
+               __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+               dev->pd_mgr->pd_norm_count++;
+               if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
+                       dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
+       }
+       return pd_bitmap_idx;
+}
+
+static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
+                                       bool dpp_pool)
+{
+       u16 pd_count;
+       u16 pd_bit_index;
+
+       pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
+                             dev->pd_mgr->pd_norm_count;
+       if (pd_count == 0)
+               return -EINVAL;
+
+       if (dpp_pool) {
+               pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
+               if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
+                       return -EINVAL;
+               } else {
+                       __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
+                       dev->pd_mgr->pd_dpp_count--;
+               }
+       } else {
+               pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
+               if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
+                       return -EINVAL;
+               } else {
+                       __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
+                       dev->pd_mgr->pd_norm_count--;
+               }
+       }
+
+       return 0;
+}
+
+static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+                                  bool dpp_pool)
+{
+       int status;
+
+       mutex_lock(&dev->dev_lock);
+       status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
+       mutex_unlock(&dev->dev_lock);
+       return status;
+}
+
+static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+       u16 pd_idx = 0;
+       int status = 0;
+
+       mutex_lock(&dev->dev_lock);
+       if (pd->dpp_enabled) {
+               /* try allocating DPP PD, if not available then normal PD */
+               if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
+                       pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
+                       pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
+               } else if (dev->pd_mgr->pd_norm_count <
+                          dev->pd_mgr->max_normal_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+                       pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+                       pd->dpp_enabled = false;
+               } else {
+                       status = -EINVAL;
+               }
+       } else {
+               if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+                       pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+               } else {
+                       status = -EINVAL;
+               }
+       }
+       mutex_unlock(&dev->dev_lock);
+       return status;
+}
+
 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                          struct ocrdma_ucontext *uctx,
                                          struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                           dev->attr.wqe_size) : 0;
        }
 
+       if (dev->pd_mgr->pd_prealloc_valid) {
+               status = ocrdma_get_pd_num(dev, pd);
+               return (status == 0) ? pd : ERR_PTR(status);
+       }
+
 retry:
        status = ocrdma_mbx_alloc_pd(dev, pd);
        if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
 {
        int status = 0;
 
-       status = ocrdma_mbx_dealloc_pd(dev, pd);
+       if (dev->pd_mgr->pd_prealloc_valid)
+               status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+       else
+               status = ocrdma_mbx_dealloc_pd(dev, pd);
+
        kfree(pd);
        return status;
 }
@@ -325,7 +435,6 @@ err:
 
 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
 {
-       int status = 0;
        struct ocrdma_pd *pd = uctx->cntxt_pd;
        struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
                       __func__, dev->id, pd->id);
        }
        uctx->cntxt_pd = NULL;
-       status = _ocrdma_dealloc_pd(dev, pd);
-       return status;
+       (void)_ocrdma_dealloc_pd(dev, pd);
+       return 0;
 }
 
 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
        if (is_uctx_pd) {
                ocrdma_release_ucontext_pd(uctx);
        } else {
-               status = ocrdma_mbx_dealloc_pd(dev, pd);
+               status = _ocrdma_dealloc_pd(dev, pd);
                kfree(pd);
        }
 exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 {
        struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
        struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
-       int status;
 
-       status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+       (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
 
        ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
 
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 
        /* Don't stop cleanup, in case FW is unresponsive */
        if (dev->mqe_ctx.fw_error_state) {
-               status = 0;
                pr_err("%s(%d) fw not responding.\n",
                       __func__, dev->id);
        }
-       return status;
+       return 0;
 }
 
 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
 
 int ocrdma_destroy_cq(struct ib_cq *ibcq)
 {
-       int status;
        struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
        struct ocrdma_eq *eq = NULL;
        struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
        synchronize_irq(irq);
        ocrdma_flush_cq(cq);
 
-       status = ocrdma_mbx_destroy_cq(dev, cq);
+       (void)ocrdma_mbx_destroy_cq(dev, cq);
        if (cq->ucontext) {
                pdid = cq->ucontext->cntxt_pd->id;
                ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
        }
 
        kfree(cq);
-       return status;
+       return 0;
 }
 
 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
        int status = 0;
        u64 usr_db;
        struct ocrdma_create_qp_uresp uresp;
-       struct ocrdma_dev *dev = qp->dev;
        struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
        memset(&uresp, 0, sizeof(uresp));
        usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
                status = -ENOMEM;
                goto gen_err;
        }
-       qp->dev = dev;
        ocrdma_set_qp_init_params(qp, pd, attrs);
        if (udata == NULL)
                qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        enum ib_qp_state old_qps;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
        if (attr_mask & IB_QP_STATE)
                status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
        /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        enum ib_qp_state old_qps, new_qps;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
 
        /* syncronize with multiple context trying to change, retrive qps */
        mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
        u32 qp_state;
        struct ocrdma_qp_params params;
        struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
 
        memset(&params, 0, sizeof(params));
        mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
                goto mbx_err;
        if (qp->qp_type == IB_QPT_UD)
                qp_attr->qkey = params.qkey;
-       qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
-       qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
        qp_attr->path_mtu =
                ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
                                OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
        memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
        qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
                    OCRDMA_QP_PARAMS_STATE_SHIFT;
+       qp_attr->qp_state = get_ibqp_state(qp_state);
+       qp_attr->cur_qp_state = qp_attr->qp_state;
        qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
        qp_attr->max_dest_rd_atomic =
            params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
            params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
        qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
                                OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
+       /* Sync driver QP state with FW */
+       ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
 mbx_err:
        return status;
 }
 
-static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
 {
-       int i = idx / 32;
-       unsigned int mask = (1 << (idx % 32));
+       unsigned int i = idx / 32;
+       u32 mask = (1U << (idx % 32));
 
-       if (srq->idx_bit_fields[i] & mask)
-               srq->idx_bit_fields[i] &= ~mask;
-       else
-               srq->idx_bit_fields[i] |= mask;
+       srq->idx_bit_fields[i] ^= mask;
 }
 
 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
 {
        int found = false;
        unsigned long flags;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        /* sync with any active CQ poll */
 
        spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
 
 int ocrdma_destroy_qp(struct ib_qp *ibqp)
 {
-       int status;
        struct ocrdma_pd *pd;
        struct ocrdma_qp *qp;
        struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        unsigned long flags;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
 
        attrs.qp_state = IB_QPS_ERR;
        pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
         * discarded until the old CQEs are discarded.
         */
        mutex_lock(&dev->dev_lock);
-       status = ocrdma_mbx_destroy_qp(dev, qp);
+       (void) ocrdma_mbx_destroy_qp(dev, qp);
 
        /*
         * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        kfree(qp->wqe_wr_id_tbl);
        kfree(qp->rqe_wr_id_tbl);
        kfree(qp);
-       return status;
+       return 0;
 }
 
 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
        else
                ud_hdr->qkey = wr->wr.ud.remote_qkey;
        ud_hdr->rsvd_ahid = ah->id;
+       if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
+               hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
 }
 
 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
        u64 fbo;
        struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
        struct ocrdma_mr *mr;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
 
        wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
 
-       if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
+       if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
                return -EINVAL;
 
        hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
        fast_reg->size_sge =
                get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
        mr = (struct ocrdma_mr *) (unsigned long)
-               qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
+               dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
        build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
        return 0;
 }
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
                        status = ocrdma_build_write(qp, hdr, wr);
                        break;
-               case IB_WR_RDMA_READ_WITH_INV:
-                       hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
                case IB_WR_RDMA_READ:
                        ocrdma_build_read(qp, hdr, wr);
                        break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
                                 bool *polled, bool *stop)
 {
        bool expand;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
                OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+       if (status < OCRDMA_MAX_CQE_ERR)
+               atomic_inc(&dev->cqe_err_stats[status]);
 
        /* when hw sq is empty, but rq is not empty, so we continue
         * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
                                int status)
 {
        bool expand;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+       if (status < OCRDMA_MAX_CQE_ERR)
+               atomic_inc(&dev->cqe_err_stats[status]);
 
        /* when hw_rq is empty, but wq is not empty, so continue
         * to keep the cqe to get the cq event again.
index c00ae093b6f881870867b8dac16af33efca4091b..ffd48bfc4923457e5383345acfa3620fa5f6a52f 100644 (file)
@@ -1082,12 +1082,6 @@ struct qib_devdata {
        /* control high-level access to EEPROM */
        struct mutex eep_lock;
        uint64_t traffic_wds;
-       /* active time is kept in seconds, but logged in hours */
-       atomic_t active_time;
-       /* Below are nominal shadow of EEPROM, new since last EEPROM update */
-       uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
-       uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
-       uint16_t eep_hrs;
        /*
         * masks for which bits of errs, hwerrs that cause
         * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
                    const void *buffer, int len);
 void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
 void qib_dump_lookup_output_queue(struct qib_devdata *);
 void qib_force_pio_avail_update(struct qib_devdata *);
 void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
  * Flush write combining store buffers (if present) and perform a write
  * barrier.
  */
+static inline void qib_flush_wc(void)
+{
 #if defined(CONFIG_X86_64)
-#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+       asm volatile("sfence" : : : "memory");
 #else
-#define qib_flush_wc() wmb() /* no reorder around wc flush */
+       wmb(); /* no reorder around wc flush */
 #endif
+}
 
 /* global module parameter variables */
 extern unsigned qib_ibmtu;
index 5670ace27c639adb351b9928c65ed081a599a910..4fb78abd8ba1ad69629a9752878d4aa30a4f2229 100644 (file)
@@ -257,7 +257,7 @@ struct qib_base_info {
 
        /* shared memory page for send buffer disarm status */
        __u64 spi_sendbuf_status;
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /*
  * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
         */
        __u64 spu_base_info;
 
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /* User commands. */
 
index 6abd3ed3cd51ecf2c0a21927b0cf4894f7c48941..5e75b43c596b608adfacfeb48ea955ffa914082d 100644 (file)
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
        DEBUGFS_FILE_CREATE(opcode_stats);
        DEBUGFS_FILE_CREATE(ctx_stats);
        DEBUGFS_FILE_CREATE(qp_stats);
-       return;
 }
 
 void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
index 5dfda4c5cc9c3b1fde36d02c55cd387b7e366542..8c34b23e5bf670b92b3d1cec4d7b9c9e93ba9a20 100644 (file)
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
                client_pool = dc->next;
        else
                /* None in pool, alloc and init */
-               dc = kmalloc(sizeof *dc, GFP_KERNEL);
+               dc = kmalloc(sizeof(*dc), GFP_KERNEL);
 
        if (dc) {
                dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        if (dd->userbase) {
                /* If user regs mapped, they are after send, so set limit. */
                u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+
                if (!dd->piovl15base)
                        snd_lim = dd->uregbase;
                krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        snd_bottom = dd->pio2k_bufbase;
        if (snd_lim == 0) {
                u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+
                snd_lim = snd_bottom + tot2k;
        }
        /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
        /* not very efficient, but it works for now */
        while (reg_addr < reg_end) {
                u64 data;
+
                if (copy_from_user(&data, uaddr, sizeof(data))) {
                        ret = -EFAULT;
                        goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
 
        if (!dd || !op)
                return -EINVAL;
-       olp = vmalloc(sizeof *olp);
+       olp = vmalloc(sizeof(*olp));
        if (!olp) {
                pr_err("vmalloc for observer failed\n");
                return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
                op = diag_get_observer(dd, *off);
                if (op) {
                        u32 offset = *off;
+
                        ret = op->hook(dd, op, offset, &data64, 0, use_32);
                }
                /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
                if (count == 4 || count == 8) {
                        u64 data64;
                        u32 offset = *off;
+
                        ret = copy_from_user(&data64, data, count);
                        if (ret) {
                                ret = -EFAULT;
index 5bee08f16d7438d2eba1668bdccee64265c9972d..f58fdc3d25a29a71909a22c772217c62a975b785 100644 (file)
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
 {
        static char iname[16];
 
-       snprintf(iname, sizeof iname, "infinipath%u", unit);
+       snprintf(iname, sizeof(iname), "infinipath%u", unit);
        return iname;
 }
 
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
                qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
                if (qp_num != QIB_MULTICAST_QPN) {
                        int ruc_res;
+
                        qp = qib_lookup_qpn(ibp, qp_num);
                        if (!qp)
                                goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
        rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
        if (dd->flags & QIB_NODMA_RTAIL) {
                u32 seq = qib_hdrget_seq(rhf_addr);
+
                if (seq != rcd->seq_cnt)
                        goto bail;
                hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 {
        struct qib_devdata *dd = ppd->dd;
+
        ppd->lid = lid;
        ppd->lmc = lmc;
 
index 4d5d71aaa2b4e53e319b3c9cc57be37291f3cdd7..311ee6c3dd5e8b2e39382ef1c5d2741040a47636 100644 (file)
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
 
        if (t && dd0->nguid > 1 && t <= dd0->nguid) {
                u8 oguid;
+
                dd->base_guid = dd0->base_guid;
                bguid = (u8 *) &dd->base_guid;
 
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
                 * This board has a Serial-prefix, which is stored
                 * elsewhere for backward-compatibility.
                 */
-               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-               snp[sizeof ifp->if_sprefix] = '\0';
+               memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
+               snp[sizeof(ifp->if_sprefix)] = '\0';
                len = strlen(snp);
                snp += len;
-               len = (sizeof dd->serial) - len;
-               if (len > sizeof ifp->if_serial)
-                       len = sizeof ifp->if_serial;
+               len = sizeof(dd->serial) - len;
+               if (len > sizeof(ifp->if_serial))
+                       len = sizeof(ifp->if_serial);
                memcpy(snp, ifp->if_serial, len);
-       } else
-               memcpy(dd->serial, ifp->if_serial,
-                      sizeof ifp->if_serial);
+       } else {
+               memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
+       }
        if (!strstr(ifp->if_comment, "Tested successfully"))
                qib_dev_err(dd,
                        "Board SN %s did not pass functional test: %s\n",
                        dd->serial, ifp->if_comment);
 
-       memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
-       /*
-        * Power-on (actually "active") hours are kept as little-endian value
-        * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-        * atomic_t while running.
-        */
-       atomic_set(&dd->active_time, 0);
-       dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
 done:
        vfree(buf);
 
 bail:;
 }
 
-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
-       void *buf;
-       struct qib_flash *ifp;
-       int len, hi_water;
-       uint32_t new_time, new_hrs;
-       u8 csum;
-       int ret, idx;
-       unsigned long flags;
-
-       /* first, check if we actually need to do anything. */
-       ret = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               if (dd->eep_st_new_errs[idx]) {
-                       ret = 1;
-                       break;
-               }
-       }
-       new_time = atomic_read(&dd->active_time);
-
-       if (ret == 0 && new_time < 3600)
-               goto bail;
-
-       /*
-        * The quick-check above determined that there is something worthy
-        * of logging, so get current contents and do a more detailed idea.
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        */
-       len = sizeof(struct qib_flash);
-       buf = vmalloc(len);
-       ret = 1;
-       if (!buf) {
-               qib_dev_err(dd,
-                       "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
-                       len);
-               goto bail;
-       }
-
-       /* Grab semaphore and read current EEPROM. If we get an
-        * error, let go, but if not, keep it until we finish write.
-        */
-       ret = mutex_lock_interruptible(&dd->eep_lock);
-       if (ret) {
-               qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-               goto free_bail;
-       }
-       ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
-       if (ret) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "Unable read EEPROM for logging\n");
-               goto free_bail;
-       }
-       ifp = (struct qib_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-                           csum, ifp->if_csum);
-               ret = 1;
-               goto free_bail;
-       }
-       hi_water = 0;
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               int new_val = dd->eep_st_new_errs[idx];
-               if (new_val) {
-                       /*
-                        * If we have seen any errors, add to EEPROM values
-                        * We need to saturate at 0xFF (255) and we also
-                        * would need to adjust the checksum if we were
-                        * trying to minimize EEPROM traffic
-                        * Note that we add to actual current count in EEPROM,
-                        * in case it was altered while we were running.
-                        */
-                       new_val += ifp->if_errcntp[idx];
-                       if (new_val > 0xFF)
-                               new_val = 0xFF;
-                       if (ifp->if_errcntp[idx] != new_val) {
-                               ifp->if_errcntp[idx] = new_val;
-                               hi_water = offsetof(struct qib_flash,
-                                                   if_errcntp) + idx;
-                       }
-                       /*
-                        * update our shadow (used to minimize EEPROM
-                        * traffic), to match what we are about to write.
-                        */
-                       dd->eep_st_errs[idx] = new_val;
-                       dd->eep_st_new_errs[idx] = 0;
-               }
-       }
-       /*
-        * Now update active-time. We would like to round to the nearest hour
-        * but unless atomic_t are sure to be proper signed ints we cannot,
-        * because we need to account for what we "transfer" to EEPROM and
-        * if we log an hour at 31 minutes, then we would need to set
-        * active_time to -29 to accurately count the _next_ hour.
-        */
-       if (new_time >= 3600) {
-               new_hrs = new_time / 3600;
-               atomic_sub((new_hrs * 3600), &dd->active_time);
-               new_hrs += dd->eep_hrs;
-               if (new_hrs > 0xFFFF)
-                       new_hrs = 0xFFFF;
-               dd->eep_hrs = new_hrs;
-               if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-                       ifp->if_powerhour[0] = new_hrs & 0xFF;
-                       hi_water = offsetof(struct qib_flash, if_powerhour);
-               }
-               if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-                       ifp->if_powerhour[1] = new_hrs >> 8;
-                       hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
-               }
-       }
-       /*
-        * There is a tiny possibility that we could somehow fail to write
-        * the EEPROM after updating our shadows, but problems from holding
-        * the spinlock too long are a much bigger issue.
-        */
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-       if (hi_water) {
-               /* we made some change to the data, uopdate cksum and write */
-               csum = flash_csum(ifp, 1);
-               ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
-       }
-       mutex_unlock(&dd->eep_lock);
-       if (ret)
-               qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-       vfree(buf);
-bail:
-       return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
-       uint new_val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       new_val = dd->eep_st_new_errs[eidx] + incr;
-       if (new_val > 255)
-               new_val = 255;
-       dd->eep_st_new_errs[eidx] = new_val;
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
index b15e34eeef685d510c781d25d08433e4b3e7c717..41937c6f888af13deadb6c7b25678cfc34596cf8 100644 (file)
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
                 * unless perhaps the user has mpin'ed the pages
                 * themselves.
                 */
-               qib_devinfo(dd->pcidev,
-                        "Failed to lock addr %p, %u pages: "
-                        "errno %d\n", (void *) vaddr, cnt, -ret);
+               qib_devinfo(
+                       dd->pcidev,
+                       "Failed to lock addr %p, %u pages: errno %d\n",
+                       (void *) vaddr, cnt, -ret);
                goto done;
        }
        for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
                        goto cleanup;
                }
                if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-                                tidmap, sizeof tidmap)) {
+                                tidmap, sizeof(tidmap))) {
                        ret = -EFAULT;
                        goto cleanup;
                }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
        }
 
        if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-                          sizeof tidmap)) {
+                          sizeof(tidmap))) {
                ret = -EFAULT;
                goto done;
        }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
                /* rcvegrbufs are read-only on the slave */
                if (vma->vm_flags & VM_WRITE) {
                        qib_devinfo(dd->pcidev,
-                                "Can't map eager buffers as "
-                                "writable (flags=%lx)\n", vma->vm_flags);
+                                "Can't map eager buffers as writable (flags=%lx)\n",
+                                vma->vm_flags);
                        ret = -EPERM;
                        goto bail;
                }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
         */
        if (weight >= qib_cpulist_count) {
                int cpu;
+
                cpu = find_first_zero_bit(qib_cpulist,
                                          qib_cpulist_count);
                if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
        if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
                uinfo->spu_userversion & 0xffff)) {
                qib_devinfo(dd->pcidev,
-                        "Mismatched user version (%d.%d) and driver "
-                        "version (%d.%d) while context sharing. Ensure "
-                        "that driver and library are from the same "
-                        "release.\n",
+                        "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
                         (int) (uinfo->spu_userversion >> 16),
                         (int) (uinfo->spu_userversion & 0xffff),
                         QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
        }
        if (!ppd) {
                u32 pidx = ctxt % dd->num_pports;
+
                if (usable(dd->pport + pidx))
                        ppd = dd->pport + pidx;
                else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
 
        if (alg == QIB_PORT_ALG_ACROSS) {
                unsigned inuse = ~0U;
+
                /* find device (with ACTIVE ports) with fewest ctxts in use */
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
                        unsigned cused = 0, cfree = 0, pusable = 0;
+
                        if (!dd)
                                continue;
                        if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
        } else {
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
+
                        if (dd) {
                                ret = choose_port_ctxt(fp, dd, port, uinfo);
                                if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
        }
        for (ndev = 0; ndev < devmax; ndev++) {
                struct qib_devdata *dd = qib_lookup(ndev);
+
                if (dd) {
                        if (pcibus_to_node(dd->pcidev->bus) < 0) {
                                ret = -EINVAL;
index 81854586c081fee37e0b3933a26d08e5e8b4e815..650897a8591e872f338d03994da3ed18e5a51bf7 100644 (file)
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
 {
        qib_stats.sps_ints = qib_sps_ints();
        return simple_read_from_buffer(buf, count, ppos, &qib_stats,
-                                      sizeof qib_stats);
+                                      sizeof(qib_stats));
 }
 
 /*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        return simple_read_from_buffer(buf, count, ppos, qib_statnames,
-               sizeof qib_statnames - 1); /* no null */
+               sizeof(qib_statnames) - 1); /* no null */
 }
 
 static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
        int ret, i;
 
        /* create the per-unit directory */
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
                          &simple_dir_operations, dd);
        if (ret) {
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!(d_unhashed(tmp) && tmp->d_inode)) {
+       if (!d_unhashed(tmp) && tmp->d_inode) {
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
                simple_unlink(parent->d_inode, tmp);
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
 
        root = dget(sb->s_root);
        mutex_lock(&root->d_inode->i_mutex);
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        dir = lookup_one_len(unit, root, strlen(unit));
 
        if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
                        const char *dev_name, void *data)
 {
        struct dentry *ret;
+
        ret = mount_single(fs_type, flags, data, qibfs_fill_super);
        if (!IS_ERR(ret))
                qib_super = ret->d_sb;
index d68266ac7619b896c49e500256c263ec1c3619c1..0d2ba59af30af66bce01ef8132c8182cc6e44a33 100644 (file)
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
         */
        mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
                ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
-       qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
                }
                if (crcs) {
                        u32 cntr = dd->cspec->lli_counter;
+
                        cntr += crcs;
                        if (cntr) {
                                if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
                        "irq is 0, BIOS error?  Interrupts won't work\n");
        else {
                int ret;
+
                ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
                                  QIB_DRV_NAME, dd);
                if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 
        qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
 {
        int ret = 0;
+
        if (!strncmp(what, "ibc", 3)) {
                ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
 static void set_6120_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
        dd->cspec->cregbase = (u64 __iomem *)
                ((char __iomem *) dd->kregbase + cregbase);
index 7dec89fdc1248dc69c3cab38069e5e2f32986ce3..22affda8af88eacbd11f21abab55ba299dfb0e0b 100644 (file)
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
        errs &= QLOGIC_IB_E_SDMAERRS;
 
        msg = dd->cspec->sdmamsgbuf;
-       qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+       qib_decode_7220_sdma_errs(ppd, errs, msg,
+               sizeof(dd->cspec->sdmamsgbuf));
        spin_lock_irqsave(&ppd->sdma_lock, flags);
 
        if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
 static void reenable_7220_chase(unsigned long opaque)
 {
        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
        ppd->cpspec->chase_timer.expires = 0;
        qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
                ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
                ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
 
-       qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 done:
        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
index a7eb32517a04bced55f9d1d5720b5d060d96e92b..ef97b71c8f7dd713a77401f593c6a10320a046e6 100644 (file)
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
 
 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation, \
+MODULE_PARM_DESC(long_attenuation,
                 "attenuation cutoff (dB) for long copper cable setup");
 
 static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
 static int  setup_txselect(const char *, struct kernel_param *);
 module_param_call(txselect, setup_txselect, param_get_string,
                  &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect, \
+MODULE_PARM_DESC(txselect,
                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 
 #define BOARD_QME7342 5
 #define BOARD_QMH7342 6
+#define BOARD_QMH7360 9
 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
                    BOARD_QMH7342)
 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        /* do these first, they are most important */
        if (errs & QIB_E_HARDWARE) {
                *msg = '\0';
-               qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        } else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        mask = QIB_E_HARDWARE;
        *msg = '\0';
 
-       err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+       err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
                   qib_7322error_msgs);
 
        /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
        *msg = '\0';
 
        if (errs & ~QIB_E_P_BITSEXTANT) {
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
                if (!*msg)
-                       snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+                       snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
                                 "no others");
                qib_dev_porterr(dd, ppd->port,
                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                /* determine cause, then write to clear */
                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
                           hdrchk_msgs);
                *msg = '\0';
                /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                         * isn't valid.  We don't want to confuse people, so
                         * we just don't print them, except at debug
                         */
-                       err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+                       err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                                   (errs & QIB_E_P_LINK_PKTERRS),
                                   qib_7322p_error_msgs);
                        *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                 * valid.  We don't want to confuse people, so we just
                 * don't print them, except at debug
                 */
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
                           qib_7322p_error_msgs);
                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
                *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
                if (dd->cspec->num_msix_entries) {
                        /* and same for MSIx */
                        u64 val = qib_read_kreg64(dd, kr_intgranted);
+
                        if (val)
                                qib_write_kreg(dd, kr_intgranted, val);
                }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
                int err;
                unsigned long flags;
                struct qib_pportdata *ppd = dd->pport;
+
                for (; pidx < dd->num_pports; ++pidx, ppd++) {
                        err = 0;
                        if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                qib_update_rhdrq_dca(rcd, cpu);
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                qib_update_sdma_dca(ppd, cpu);
        }
 }
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                dd = rcd->dd;
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                dd = ppd->dd;
        }
        qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                struct qib_pportdata *ppd;
                struct qib_qsfp_data *qd;
                u32 mask;
+
                if (!dd->pport[pidx].link_speed_supported)
                        continue;
                mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
                if (gpiostatus & dd->cspec->gpio_mask & mask) {
                        u64 pins;
+
                        qd = &ppd->cpspec->qsfp_data;
                        gpiostatus &= ~mask;
                        pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
        }
 
        /* Try to get MSIx interrupts */
-       memset(redirect, 0, sizeof redirect);
+       memset(redirect, 0, sizeof(redirect));
        mask = ~0ULL;
        msixnum = 0;
        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
                n = "InfiniPath_QME7362";
                dd->flags |= QIB_HAS_QSFP;
                break;
+       case BOARD_QMH7360:
+               n = "Intel IB QDR 1P FLR-QSFP Adptr";
+               dd->flags |= QIB_HAS_QSFP;
+               break;
        case 15:
                n = "InfiniPath_QLE7342_TEST";
                dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
         */
        for (i = 0; i < msix_entries; i++) {
                u64 vecaddr, vecdata;
+
                vecaddr = qib_read_kreg64(dd, 2 * i +
                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
                traffic_wds -= ppd->dd->traffic_wds;
                ppd->dd->traffic_wds += traffic_wds;
-               if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-                       atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
                                                QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
 {
        u64 newctrlb;
+
        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
                                    IBA7322_IBC_IBTA_1_2_MASK |
                                    IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
 
        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
        struct qib_devdata *dd;
        unsigned long val;
        char *n;
+
        if (strlen(str) >= MAX_ATTEN_LEN) {
                pr_info("txselect_values string too long\n");
                return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
        val = TIDFLOW_ERRBITS; /* these are W1C */
        for (i = 0; i < dd->cfgctxts; i++) {
                int flow;
+
                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
        }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 
        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
                struct qib_chippport_specific *cp = ppd->cpspec;
+
                ppd->link_speed_supported = features & PORT_SPD_CAP;
                features >>=  PORT_SPD_CAP_SHIFT;
                if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
                                ppd->vls_supported = IB_VL_VL0_7;
                        else {
                                qib_devinfo(dd->pcidev,
-                                           "Invalid num_vls %u for MTU %d "
-                                           ", using 4 VLs\n",
+                                           "Invalid num_vls %u for MTU %d , using 4 VLs\n",
                                            qib_num_cfg_vls, mtu);
                                ppd->vls_supported = IB_VL_VL0_3;
                                qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
        int ret = 0;
+
        if (ppd->dd->cspec->r1)
                ret = serdes_7322_init_old(ppd);
        else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
 
 static int qib_r_grab(struct qib_devdata *dd)
 {
-       u64 val;
-       val = SJA_EN;
+       u64 val = SJA_EN;
+
        qib_write_kreg(dd, kr_r_access, val);
        qib_read_kreg32(dd, kr_scratch);
        return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
 {
        u64 val;
        int timeout;
+
        for (timeout = 0; timeout < 100 ; ++timeout) {
                val = qib_read_kreg32(dd, kr_r_access);
                if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
                }
                if (inp) {
                        int tdi = inp[pos >> 3] >> (pos & 7);
+
                        val |= ((tdi & 1) << R_TDI_LSB);
                }
                qib_write_kreg(dd, kr_r_access, val);
index 729da39c49ed3fac472db472c3ac58b7766f8ada..2ee36953e234c46ff704bc6e5dfbed8339c09974 100644 (file)
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
         * Allocate full ctxtcnt array, rather than just cfgctxts, because
         * cleanup iterates across all possible ctxts.
         */
-       dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+       dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
        if (!dd->rcd) {
                qib_dev_err(dd,
                        "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
                        u8 hw_pidx, u8 port)
 {
        int size;
+
        ppd->dd = dd;
        ppd->hw_pidx = hw_pidx;
        ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
                ppd = dd->pport + pidx;
                if (!ppd->qib_wq) {
                        char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
                        snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
                                dd->unit, pidx);
                        ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
 
        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
                int mtu;
+
                if (lastfail)
                        ret = lastfail;
                ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
                qib_free_pportdata(ppd);
        }
 
-       qib_update_eeprom_log(dd);
 }
 
 /**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
        addr = vmalloc(cnt);
        if (!addr) {
                qib_devinfo(dd->pcidev,
-                        "Couldn't get memory for checking PIO perf,"
-                        " skipping\n");
+                        "Couldn't get memory for checking PIO perf, skipping\n");
                goto done;
        }
 
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
 
        if (!qib_cpulist_count) {
                u32 count = num_online_cpus();
+
                qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
                                      sizeof(long), GFP_KERNEL);
                if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
        if (!list_empty(&dd->list))
                list_del_init(&dd->list);
        ib_dealloc_device(&dd->verbs_dev.ibdev);
-       return ERR_PTR(ret);;
+       return ERR_PTR(ret);
 }
 
 /*
index f4918f2165ec72616d51a3bd9c461b33f6ed8548..086616d071b988e38cc813b09a9659d787195c62 100644 (file)
@@ -168,7 +168,6 @@ skip_ibchange:
        ppd->lastibcstat = ibcs;
        if (ev)
                signal_ib_event(ppd, ev);
-       return;
 }
 
 void qib_clear_symerror_on_linkup(unsigned long opaque)
index 3b9afccaaade824370f5c0ea0d6d6ceb519e6090..ad843c786e7212d0c89f90264bacb5cb6b8346a0 100644 (file)
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
        if (!mr->lkey_published)
                goto out;
        if (lkey == 0)
-               rcu_assign_pointer(dev->dma_mr, NULL);
+               RCU_INIT_POINTER(dev->dma_mr, NULL);
        else {
                r = lkey >> (32 - ib_qib_lkey_table_size);
-               rcu_assign_pointer(rkt->table[r], NULL);
+               RCU_INIT_POINTER(rkt->table[r], NULL);
        }
        qib_put_mr(mr);
        mr->lkey_published = 0;
index 636be117b57859e690fdb0704be6fe10b429511f..395f4046dba2054633ad41f4a0f67a4dbee57c63 100644 (file)
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
        data.trap_num = trap_num;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_257_258.lid1 = lid1;
        data.details.ntc_257_258.lid2 = lid2;
        data.details.ntc_257_258.key = cpu_to_be32(key);
        data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
        data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
        data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_256.lid = data.issuer_lid;
        data.details.ntc_256.method = smp->method;
        data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
                       hop_cnt);
        }
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_145.lid = data.issuer_lid;
        data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.local_changes = 1;
        data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 static int subn_get_nodedescription(struct ib_smp *smp,
index 8b73a11d571c1671ba6678565c5e34754a3ab411..146cf29a2e1db19a8293f2ecbf3f8348ce1732c2 100644 (file)
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
                                           void *obj) {
        struct qib_mmap_info *ip;
 
-       ip = kmalloc(sizeof *ip, GFP_KERNEL);
+       ip = kmalloc(sizeof(*ip), GFP_KERNEL);
        if (!ip)
                goto bail;
 
index a77fb4fb14e43c255e23a41b7c86877835a78ea3..c4473db46699b5f367a0fd1b9d9df92bb2723d50 100644 (file)
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
 
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
        for (; i < m; i++) {
-               mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
+               mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
                if (!mr->map[i])
                        goto bail;
        }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
                goto bail;
        }
 
-       mr = kzalloc(sizeof *mr, GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
        if (!mr)
                goto bail;
 
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
        if (size > PAGE_SIZE)
                return ERR_PTR(-EINVAL);
 
-       pl = kzalloc(sizeof *pl, GFP_KERNEL);
+       pl = kzalloc(sizeof(*pl), GFP_KERNEL);
        if (!pl)
                return ERR_PTR(-ENOMEM);
 
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+       fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
        if (!fmr)
                goto bail;
 
index 61a0046efb76ff9310b9a15f79dfe9dd11354c05..4758a3801ae8f916b6a96988b6c88ce5c344c588 100644 (file)
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
        /* We can't pass qib_msix_entry array to qib_msix_setup
         * so use a dummy msix_entry array and copy the allocated
         * irq back to the qib_msix_entry array. */
-       msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
+       msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
        if (!msix_entry)
                goto do_intx;
 
@@ -234,8 +234,10 @@ free_msix_entry:
        kfree(msix_entry);
 
 do_intx:
-       qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
-                       "falling back to INTx\n", nvec, ret);
+       qib_dev_err(
+               dd,
+               "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
+               nvec, ret);
        *msixcnt = 0;
        qib_enable_intx(dd->pcidev);
 }
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
 {
        int r;
+
        r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
                                   dd->pcibar0);
        if (r)
@@ -696,6 +699,7 @@ static void
 qib_pci_resume(struct pci_dev *pdev)
 {
        struct qib_devdata *dd = pci_get_drvdata(pdev);
+
        qib_devinfo(pdev, "QIB resume function called\n");
        pci_cleanup_aer_uncorrect_error_status(pdev);
        /*
index 6ddc0264aad2779ef327161c14a2ee9aef2e543f..4fa88ba2963e6ba21186ae5eb095ea531b741e97 100644 (file)
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
 
        if (rcu_dereference_protected(ibp->qp0,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp0, NULL);
+               RCU_INIT_POINTER(ibp->qp0, NULL);
        } else if (rcu_dereference_protected(ibp->qp1,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp1, NULL);
+               RCU_INIT_POINTER(ibp->qp1, NULL);
        } else {
                struct qib_qp *q;
                struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
                                lockdep_is_held(&dev->qpt_lock))) != NULL;
                                qpp = &q->next)
                        if (q == qp) {
-                               rcu_assign_pointer(*qpp,
+                               RCU_INIT_POINTER(*qpp,
                                        rcu_dereference_protected(qp->next,
                                         lockdep_is_held(&dev->qpt_lock)));
                                removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
        for (n = 0; n < dev->qp_table_size; n++) {
                qp = rcu_dereference_protected(dev->qp_table[n],
                        lockdep_is_held(&dev->qpt_lock));
-               rcu_assign_pointer(dev->qp_table[n], NULL);
+               RCU_INIT_POINTER(dev->qp_table[n], NULL);
 
                for (; qp; qp = rcu_dereference_protected(qp->next,
                                        lockdep_is_held(&dev->qpt_lock)))
index fa71b1e666c5414fbba2357fe531e75cabc7986c..5e27f76805e28af0c0e0acdc9864360f8cf09b41 100644 (file)
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
         * Module could take up to 2 Msec to respond to MOD_SEL, and there
         * is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
        else if (pass)
                qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
 
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
         * Module could take up to 2 Msec to respond to MOD_SEL,
         * and there is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
         * going away, and there is no way to tell if it is ready.
         * so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
                 * set the page to zero, Even if it already appears to be zero.
                 */
                u8 poke = 0;
+
                ret = qib_qsfp_write(ppd, 127, &poke, 1);
                udelay(50);
                if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
        udelay(20); /* Generous RST dwell */
 
        dd->f_gpio_mod(dd, mask, mask, mask);
-       return;
 }
 
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
 
        while (bidx < QSFP_DEFAULT_HDR_CNT) {
                int iidx;
+
                ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
                if (ret < 0)
                        goto bail;
index 2f2501890c4ea2b26a9ebd7a82755688605443e3..4544d6f88ad77c7f7c69fd6e6d4a138188f493b3 100644 (file)
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 4c07a8b34ffe27e2bfa89a19883fa25ef6aa64e7..f42bd0f47577a4557f47cac58de4a16b678b923d 100644 (file)
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
                struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 
                return ppd->guid;
-       } else
-               return ibp->guids[index - 1];
+       }
+       return ibp->guids[index - 1];
 }
 
 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
                goto serr;
        }
 
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        send_status = IB_WC_SUCCESS;
 
        release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
            status != IB_WC_SUCCESS) {
                struct ib_wc wc;
 
-               memset(&wc, 0, sizeof wc);
+               memset(&wc, 0, sizeof(wc));
                wc.wr_id = wqe->wr.wr_id;
                wc.status = status;
                wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 911205d3d5a0bf255fae65126740ab47eaa4a8ef..c72775f2721226868604b3770b1dbe23b7456dc0 100644 (file)
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
                 * it again during startup.
                 */
                u64 val;
+
                rst_val &= ~(1ULL);
                qib_write_kreg(dd, kr_hwerrmask,
                               dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                 * Both should be clear
                 */
                u64 newval = 0;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                /* Need to claim */
                u64 pollval;
                u64 newval = EPB_ACC_REQ | oct_sel;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
                        if (!sofar) {
                                /* Only set address at start of chunk */
                                int addrbyte = (addr + sofar) >> 8;
+
                                transval = csbit | EPB_MADDRH | addrbyte;
                                tries = epb_trans(dd, trans, transval,
                                                  &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
  * IRQ not set up at this point in init, so we poll.
  */
 #define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (30)
+#define TRIM_TMO (15)
 
 static int qib_sd_trimdone_poll(struct qib_devdata *dd)
 {
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
                        ret = 1;
                        break;
                }
-               msleep(10);
+               msleep(20);
        }
        if (trim_tmo >= TRIM_TMO) {
                qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
                dds_reg_map >>= 4;
                for (midx = 0; midx < DDS_ROWS; ++midx) {
                        u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+
                        data = dds_init_vals[midx].reg_vals[idx];
                        writeq(data, daddr);
                        mmiowb();
index 3c8e4e3caca6240175bbddb35fb304107179920e..81f56cdff2bc280c7a64f816a215b10b703d0ec4 100644 (file)
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
                container_of(device, struct qib_ibdev, ibdev.dev);
        struct qib_devdata *dd = dd_from_dev(dev);
 
-       buf[sizeof dd->serial] = '\0';
-       memcpy(buf, dd->serial, sizeof dd->serial);
+       buf[sizeof(dd->serial)] = '\0';
+       memcpy(buf, dd->serial, sizeof(dd->serial));
        strcat(buf, "\n");
        return strlen(buf);
 }
@@ -611,28 +611,6 @@ bail:
        return ret < 0 ? ret : count;
 }
 
-static ssize_t show_logged_errs(struct device *device,
-                               struct device_attribute *attr, char *buf)
-{
-       struct qib_ibdev *dev =
-               container_of(device, struct qib_ibdev, ibdev.dev);
-       struct qib_devdata *dd = dd_from_dev(dev);
-       int idx, count;
-
-       /* force consistency with actual EEPROM */
-       if (qib_update_eeprom_log(dd) != 0)
-               return -ENXIO;
-
-       count = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-                                  dd->eep_st_errs[idx],
-                                  idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
-       }
-
-       return count;
-}
-
 /*
  * Dump tempsense regs. in decimal, to ease shell-scripts.
  */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
        &dev_attr_nfreectxts,
        &dev_attr_serial,
        &dev_attr_boardversion,
-       &dev_attr_logged_errors,
        &dev_attr_tempsense,
        &dev_attr_localbus_info,
        &dev_attr_chip_reset,
index 647f7beb1b0a1669a841c8180a9d6f4f1722c4c9..f5698664419b430ac932fb396f7a35c11ca80816 100644 (file)
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
                udelay(2);
        else {
                int rise_usec;
+
                for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
                        if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
                                break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
 {
        int ret = 1;
+
        if (flags & QIB_TWSI_START)
                start_seq(dd);
 
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
        int sub_len;
        const u8 *bp = buffer;
        int max_wait_time, i;
-       int ret;
-       ret = 1;
+       int ret = 1;
 
        while (len > 0) {
                if (dev == QIB_TWSI_NO_DEV) {
index 31d3561400a49f6056eb6be5c082b05cc0edb0e4..eface3b3dacf6a3d49c6d67b6702de7d38abc8ab 100644 (file)
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
 
        for (i = 0; i < cnt; i++) {
                int which;
+
                if (!test_bit(i, mask))
                        continue;
                /*
index aaf7039f8ed2112041174d1b320a3d8205348c08..26243b722b5e979c1324471b6e17871d3ef22540 100644 (file)
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
         * present on the wire.
         */
        length = swqe->length;
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        wc.byte_len = length + sizeof(struct ib_grh);
 
        if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
index d2806cae234c254ef7e71ee58e03df5d2c1efdc9..3e0677c512768a7bd79a612fae251f0af70db512 100644 (file)
@@ -50,7 +50,7 @@
 /* expected size of headers (for dma_pool) */
 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
 /* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
 
 /*
  * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
                sdma_rb_node->refcount++;
        } else {
                int ret;
+
                sdma_rb_node = kmalloc(sizeof(
                        struct qib_user_sdma_rb_node), GFP_KERNEL);
                if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
 
                        if (tiddma) {
                                char *tidsm = (char *)pkt + pktsize;
+
                                cfur = copy_from_user(tidsm,
                                        iov[idx].iov_base, tidsmsize);
                                if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
                qib_user_sdma_hwqueue_clean(ppd);
                qib_user_sdma_queue_clean(ppd, pq);
                mutex_unlock(&pq->lock);
-               msleep(10);
+               msleep(20);
        }
 
        if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
 
        if (nfree && !list_empty(pktlist))
                goto retry;
-
-       return;
 }
 
 /* pq->lock must be held, get packets on the wire... */
index 9bcfbd8429804e237b23555a54bbd2b1d0fc5a27..4a3599890ea5f114655a34e472bee9197d73bd2c 100644 (file)
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
 done:
        if (dd->flags & QIB_USE_SPCL_TRIG) {
                u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
                qib_flush_wc();
                __raw_writel(0xaebecede, piobuf_orig + spcl_off);
        }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
         * we allow allocations of more than we report for this value.
         */
 
-       pd = kmalloc(sizeof *pd, GFP_KERNEL);
+       pd = kmalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
                goto bail;
        }
 
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+       ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
        if (!ah) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
        struct ib_ah *ah = ERR_PTR(-EINVAL);
        struct qib_qp *qp0;
 
-       memset(&attr, 0, sizeof attr);
+       memset(&attr, 0, sizeof(attr));
        attr.dlid = dlid;
        attr.port_num = ppd_from_ibp(ibp)->port;
        rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
        struct qib_ucontext *context;
        struct ib_ucontext *ret;
 
-       context = kmalloc(sizeof *context, GFP_KERNEL);
+       context = kmalloc(sizeof(*context), GFP_KERNEL);
        if (!context) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
 
        dev->qp_table_size = ib_qib_qp_table_size;
        get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
-       dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
+       dev->qp_table = kmalloc_array(
+                               dev->qp_table_size,
+                               sizeof(*dev->qp_table),
                                GFP_KERNEL);
        if (!dev->qp_table) {
                ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        for (i = 0; i < ppd->sdma_descq_cnt; i++) {
                struct qib_verbs_txreq *tx;
 
-               tx = kzalloc(sizeof *tx, GFP_KERNEL);
+               tx = kzalloc(sizeof(*tx), GFP_KERNEL);
                if (!tx) {
                        ret = -ENOMEM;
                        goto err_tx;
index dabb697b1c2a6025ae72927740582a1cb277f796..f8ea069a3eafca4078ab70848c3804867609143c 100644 (file)
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
 {
        struct qib_mcast_qp *mqp;
 
-       mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+       mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
        if (!mqp)
                goto bail;
 
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
 {
        struct qib_mcast *mcast;
 
-       mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+       mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
        if (!mcast)
                goto bail;
 
index 1d7281c5a02eaf633fed9d4b71d4bec2b9433915..81b225f2300aed34eab9b88077bd4c4097dceae2 100644 (file)
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        if (dd->piobcnt2k && dd->piobcnt4k) {
                /* 2 sizes for chip */
                unsigned long pio2kbase, pio4kbase;
+
                pio2kbase = dd->piobufbase & 0xffffffffUL;
                pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
                if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        }
 
        for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-               /* do nothing */ ;
+               ; /* do nothing */
 
        if (piolen != (1ULL << bits)) {
                piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
                piolen = 1ULL << (bits + 1);
        }
        if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               atmp = pioaddr & ~(piolen - 1);
+               u64 atmp = pioaddr & ~(piolen - 1);
+
                if (atmp < addr || (atmp + piolen) > (addr + len)) {
                        qib_dev_err(dd,
                                "No way to align address/size (%llx/%llx), no WC mtrr\n",
index 5ce26817e7e1d9b8d43126518188b3769e0c7f44..b47aea1094b2d9f7e434cf8a442f4b83b1a28f3c 100644 (file)
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
                           enum dma_data_direction dma_dir);
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data);
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir);
+
 int  iser_initialize_task_headers(struct iscsi_task *task,
                        struct iser_tx_desc *tx_desc);
 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
index 3821633f1065b15a9fcc1b1c36a74eda619aeae3..20e859a6f1a63b2fede51dcb2b02b9b6f638a36e 100644 (file)
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
 
-       if (!iser_conn->rx_descs)
-               goto free_login_buf;
-
        if (device->iser_free_rdma_reg_res)
                device->iser_free_rdma_reg_res(ib_conn);
 
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        /* make sure we never redo any unmapping */
        iser_conn->rx_descs = NULL;
 
-free_login_buf:
        iser_free_login_buf(iser_conn);
 }
 
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_IN]);
+                                                &iser_task->data[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_IN]);
+                                                &iser_task->prot[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
        }
 
        if (iser_task->dir[ISER_DIR_OUT]) {
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_OUT]);
+                                                &iser_task->data[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_OUT]);
+                                                &iser_task->prot[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
        }
 }
index abce9339333f0a8551a2e52600d2409edd39461c..341040bf09849d41e4e01c613e0d0e01683fceda 100644 (file)
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 }
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data)
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir)
 {
        struct ib_device *dev;
 
        dev = iser_task->iser_conn->ib_conn.device->ib_device;
-       ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+       ib_dma_unmap_sg(dev, data->buf, data->size, dir);
 }
 
 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
                iser_data_buf_dump(mem, ibdev);
 
        /* unmap the command data before accessing it */
-       iser_dma_unmap_task_data(iser_task, mem);
+       iser_dma_unmap_task_data(iser_task, mem,
+                                (cmd_dir == ISER_DIR_OUT) ?
+                                DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        /* allocate copy buf, if we are writing, copy the */
        /* unaligned scatterlist, dma map the copy        */
index 695a2704bd4380acafbc04c4ae7f0ecd96bc95a0..4065abe28829f78356c5ee7cd76ffa7b7330b3d5 100644 (file)
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
 /**
  * iser_free_ib_conn_res - release IB related resources
  * @iser_conn: iser connection struct
- * @destroy_device: indicator if we need to try to release
- *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
- *     will use this.
+ * @destroy: indicator if we need to try to release the
+ *     iser device and memory regoins pool (only iscsi
+ *     shutdown and DEVICE_REMOVAL will use this).
  *
  * This routine is called with the iser state mutex held
  * so the cm_id removal is out of here. It is Safe to
  * be invoked multiple times.
  */
 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
-                                 bool destroy_device)
+                                 bool destroy)
 {
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
        iser_info("freeing conn %p cma_id %p qp %p\n",
                  iser_conn, ib_conn->cma_id, ib_conn->qp);
 
-       iser_free_rx_descriptors(iser_conn);
-
        if (ib_conn->qp != NULL) {
                ib_conn->comp->active_qps--;
                rdma_destroy_qp(ib_conn->cma_id);
                ib_conn->qp = NULL;
        }
 
-       if (destroy_device && device != NULL) {
-               iser_device_try_release(device);
-               ib_conn->device = NULL;
+       if (destroy) {
+               if (iser_conn->rx_descs)
+                       iser_free_rx_descriptors(iser_conn);
+
+               if (device != NULL) {
+                       iser_device_try_release(device);
+                       ib_conn->device = NULL;
+               }
        }
 }
 
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
        mutex_unlock(&ig.connlist_mutex);
 
        mutex_lock(&iser_conn->state_mutex);
+       /* In case we endup here without ep_disconnect being invoked. */
        if (iser_conn->state != ISER_CONN_DOWN) {
                iser_warn("iser conn %p state %d, expected state down.\n",
                          iser_conn, iser_conn->state);
+               iscsi_destroy_endpoint(iser_conn->ep);
                iser_conn->state = ISER_CONN_DOWN;
        }
        /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
 }
 
 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
-                                bool destroy_device)
+                                bool destroy)
 {
        struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
 
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
         * and flush errors.
         */
        iser_disconnected_handler(cma_id);
-       iser_free_ib_conn_res(iser_conn, destroy_device);
+       iser_free_ib_conn_res(iser_conn, destroy);
        complete(&iser_conn->ib_completion);
 };
 
index dafb3c531f96f7ae61e70ff9f37d9324bee687ff..075b19cc78e89d11d73ba19bcffcd68c18323907 100644 (file)
@@ -38,7 +38,7 @@
 #define ISER_MAX_CQ_LEN                (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
                                 ISERT_MAX_CONN)
 
-int isert_debug_level = 0;
+static int isert_debug_level;
 module_param_named(debug_level, isert_debug_level, int, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
 
@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
                isert_err("ib_post_recv() failed with ret: %d\n", ret);
                isert_conn->post_recv_buf_count -= count;
        } else {
-               isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
+               isert_dbg("Posted %d RX buffers\n", count);
                isert_conn->conn_rx_desc_head = rx_head;
        }
        return ret;
@@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
        struct iscsi_conn *conn = isert_conn->conn;
        u32 payload_length = ntoh24(hdr->dlength);
        int rc;
-       unsigned char *text_in;
+       unsigned char *text_in = NULL;
 
        rc = iscsit_setup_text_cmd(conn, cmd, hdr);
        if (rc < 0)
                return rc;
 
-       text_in = kzalloc(payload_length, GFP_KERNEL);
-       if (!text_in) {
-               isert_err("Unable to allocate text_in of payload_length: %u\n",
-                         payload_length);
-               return -ENOMEM;
+       if (payload_length) {
+               text_in = kzalloc(payload_length, GFP_KERNEL);
+               if (!text_in) {
+                       isert_err("Unable to allocate text_in of payload_length: %u\n",
+                                 payload_length);
+                       return -ENOMEM;
+               }
        }
        cmd->text_in_ptr = text_in;
 
@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
                break;
        case ISCSI_OP_TEXT:
-               cmd = isert_allocate_cmd(conn);
-               if (!cmd)
-                       break;
+               if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+                       cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+                       if (!cmd)
+                               break;
+               } else {
+                       cmd = isert_allocate_cmd(conn);
+                       if (!cmd)
+                               break;
+               }
 
                isert_cmd = iscsit_priv_cmd(cmd);
                ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        struct isert_conn *isert_conn = isert_cmd->conn;
        struct iscsi_conn *conn = isert_conn->conn;
        struct isert_device *device = isert_conn->conn_device;
+       struct iscsi_text_rsp *hdr;
 
        isert_dbg("Cmd %p\n", isert_cmd);
 
@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        case ISCSI_OP_REJECT:
        case ISCSI_OP_NOOP_OUT:
        case ISCSI_OP_TEXT:
+               hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+               /* If the continue bit is on, keep the command alive */
+               if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
+                       break;
+
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
                        list_del_init(&cmd->i_conn_node);
@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
-                       isert_dbg("Calling transport_generic_free_cmd from"
-                                " isert_put_cmd for 0x%02x\n",
+                       isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
                                 cmd->iscsi_opcode);
                        transport_generic_free_cmd(&cmd->se_cmd, 0);
                        break;
@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        }
        isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       isert_dbg("conn %p Text Reject\n", isert_conn);
+       isert_dbg("conn %p Text Response\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -3136,7 +3149,7 @@ accept_wait:
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               isert_dbg("np_thread_state %d for isert_accept_np\n",
+               isert_dbg("np_thread_state %d\n",
                         np->np_thread_state);
                /**
                 * No point in stalling here when np_thread
@@ -3320,7 +3333,8 @@ static int __init isert_init(void)
 {
        int ret;
 
-       isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+       isert_comp_wq = alloc_workqueue("isert_comp_wq",
+                                       WQ_UNBOUND | WQ_HIGHPRI, 0);
        if (!isert_comp_wq) {
                isert_err("Unable to allocate isert_comp_wq\n");
                ret = -ENOMEM;
index eb694ddad79fe069b6d2f796004a5e8acc77e833..6e0a477681e90b0efe53330de1b9118000bcc85a 100644 (file)
@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess)
        DECLARE_COMPLETION_ONSTACK(release_done);
        struct srpt_rdma_ch *ch;
        struct srpt_device *sdev;
-       int res;
+       unsigned long res;
 
        ch = se_sess->fabric_sess_ptr;
        WARN_ON(ch->sess != se_sess);
@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess)
        spin_unlock_irq(&sdev->spinlock);
 
        res = wait_for_completion_timeout(&release_done, 60 * HZ);
-       WARN_ON(res <= 0);
+       WARN_ON(res == 0);
 }
 
 /**
index b78425765d3eb12ccd02fd1c3dc644a1956110a1..d09cefa379316a302df754394e342cc6fe4b088c 100644 (file)
@@ -535,8 +535,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
                }
        }
  fail2:        for (i = 0; i < 2; i++)
-               if (port->adi[i].dev)
-                       input_free_device(port->adi[i].dev);
+               input_free_device(port->adi[i].dev);
        gameport_close(gameport);
  fail1:        gameport_set_drvdata(gameport, NULL);
        kfree(port);
index a89488aa1aa4d0ea0fefcd796646f04dbe851c22..fcef5d1365e2a3034e3a9544f7e403a0680a5058 100644 (file)
@@ -345,13 +345,11 @@ static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
 {
        const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
        struct input_dev *input_dev = keypad->input_dev;
-       const struct matrix_keymap_data *keymap_data =
-                               pdata ? pdata->matrix_keymap_data : NULL;
        unsigned short keycode;
        int i;
        int error;
 
-       error = matrix_keypad_build_keymap(keymap_data, NULL,
+       error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
                                           pdata->matrix_key_rows,
                                           pdata->matrix_key_cols,
                                           keypad->keycodes, input_dev);
index 8ff612d160b07ec61b7084d668f04468ee116b36..563932500ff1bbfbf83c84c06b4f64205450045b 100644 (file)
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
 
        input_set_drvdata(input, keypad);
 
-       error = request_threaded_irq(irq, NULL,
-                       tc3589x_keypad_irq, plat->irqtype,
-                       "tc3589x-keypad", keypad);
+       error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
+                                    plat->irqtype | IRQF_ONESHOT,
+                                    "tc3589x-keypad", keypad);
        if (error < 0) {
                dev_err(&pdev->dev,
                                "Could not allocate irq %d,error %d\n",
index 3f4351579372ebb87aafe5af7df505ac2f485361..a0fc18fdfc0c62263c21537a2f8105a3820846e3 100644 (file)
@@ -7,29 +7,37 @@
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
 #include <linux/slab.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 #include <asm/portmux.h>
-#include <asm/bfin_rotary.h>
 
-static const u16 per_cnt[] = {
-       P_CNT_CUD,
-       P_CNT_CDG,
-       P_CNT_CZM,
-       0
-};
+#define CNT_CONFIG_OFF         0       /* CNT Config Offset */
+#define CNT_IMASK_OFF          4       /* CNT Interrupt Mask Offset */
+#define CNT_STATUS_OFF         8       /* CNT Status Offset */
+#define CNT_COMMAND_OFF                12      /* CNT Command Offset */
+#define CNT_DEBOUNCE_OFF       16      /* CNT Debounce Offset */
+#define CNT_COUNTER_OFF                20      /* CNT Counter Offset */
+#define CNT_MAX_OFF            24      /* CNT Maximum Count Offset */
+#define CNT_MIN_OFF            28      /* CNT Minimum Count Offset */
 
 struct bfin_rot {
        struct input_dev *input;
+       void __iomem *base;
        int irq;
        unsigned int up_key;
        unsigned int down_key;
        unsigned int button_key;
        unsigned int rel_code;
+
+       unsigned short mode;
+       unsigned short debounce;
+
        unsigned short cnt_config;
        unsigned short cnt_imask;
        unsigned short cnt_debounce;
@@ -59,18 +67,17 @@ static void report_rotary_event(struct bfin_rot *rotary, int delta)
 
 static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
 {
-       struct platform_device *pdev = dev_id;
-       struct bfin_rot *rotary = platform_get_drvdata(pdev);
+       struct bfin_rot *rotary = dev_id;
        int delta;
 
-       switch (bfin_read_CNT_STATUS()) {
+       switch (readw(rotary->base + CNT_STATUS_OFF)) {
 
        case ICII:
                break;
 
        case UCII:
        case DCII:
-               delta = bfin_read_CNT_COUNTER();
+               delta = readl(rotary->base + CNT_COUNTER_OFF);
                if (delta)
                        report_rotary_event(rotary, delta);
                break;
@@ -83,16 +90,52 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
                break;
        }
 
-       bfin_write_CNT_COMMAND(W1LCNT_ZERO);    /* Clear COUNTER */
-       bfin_write_CNT_STATUS(-1);      /* Clear STATUS */
+       writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */
+       writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */
 
        return IRQ_HANDLED;
 }
 
+static int bfin_rotary_open(struct input_dev *input)
+{
+       struct bfin_rot *rotary = input_get_drvdata(input);
+       unsigned short val;
+
+       if (rotary->mode & ROT_DEBE)
+               writew(rotary->debounce & DPRESCALE,
+                       rotary->base + CNT_DEBOUNCE_OFF);
+
+       writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF);
+
+       val = UCIE | DCIE;
+       if (rotary->button_key)
+               val |= CZMIE;
+       writew(val, rotary->base + CNT_IMASK_OFF);
+
+       writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF);
+
+       return 0;
+}
+
+static void bfin_rotary_close(struct input_dev *input)
+{
+       struct bfin_rot *rotary = input_get_drvdata(input);
+
+       writew(0, rotary->base + CNT_CONFIG_OFF);
+       writew(0, rotary->base + CNT_IMASK_OFF);
+}
+
+static void bfin_rotary_free_action(void *data)
+{
+       peripheral_free_list(data);
+}
+
 static int bfin_rotary_probe(struct platform_device *pdev)
 {
-       struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev);
        struct bfin_rot *rotary;
+       struct resource *res;
        struct input_dev *input;
        int error;
 
@@ -102,18 +145,37 @@ static int bfin_rotary_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       error = peripheral_request_list(per_cnt, dev_name(&pdev->dev));
-       if (error) {
-               dev_err(&pdev->dev, "requesting peripherals failed\n");
-               return error;
+       if (pdata->pin_list) {
+               error = peripheral_request_list(pdata->pin_list,
+                                               dev_name(&pdev->dev));
+               if (error) {
+                       dev_err(dev, "requesting peripherals failed: %d\n",
+                               error);
+                       return error;
+               }
+
+               error = devm_add_action(dev, bfin_rotary_free_action,
+                                       pdata->pin_list);
+               if (error) {
+                       dev_err(dev, "setting cleanup action failed: %d\n",
+                               error);
+                       peripheral_free_list(pdata->pin_list);
+                       return error;
+               }
        }
 
-       rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL);
-       input = input_allocate_device();
-       if (!rotary || !input) {
-               error = -ENOMEM;
-               goto out1;
-       }
+       rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL);
+       if (!rotary)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       rotary->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(rotary->base))
+               return PTR_ERR(rotary->base);
+
+       input = devm_input_allocate_device(dev);
+       if (!input)
+               return -ENOMEM;
 
        rotary->input = input;
 
@@ -122,9 +184,8 @@ static int bfin_rotary_probe(struct platform_device *pdev)
        rotary->button_key = pdata->rotary_button_key;
        rotary->rel_code = pdata->rotary_rel_code;
 
-       error = rotary->irq = platform_get_irq(pdev, 0);
-       if (error < 0)
-               goto out1;
+       rotary->mode = pdata->mode;
+       rotary->debounce = pdata->debounce;
 
        input->name = pdev->name;
        input->phys = "bfin-rotary/input0";
@@ -137,6 +198,9 @@ static int bfin_rotary_probe(struct platform_device *pdev)
        input->id.product = 0x0001;
        input->id.version = 0x0100;
 
+       input->open = bfin_rotary_open;
+       input->close = bfin_rotary_close;
+
        if (rotary->up_key) {
                __set_bit(EV_KEY, input->evbit);
                __set_bit(rotary->up_key, input->keybit);
@@ -151,75 +215,43 @@ static int bfin_rotary_probe(struct platform_device *pdev)
                __set_bit(rotary->button_key, input->keybit);
        }
 
-       error = request_irq(rotary->irq, bfin_rotary_isr,
-                           0, dev_name(&pdev->dev), pdev);
+       /* Quiesce the device before requesting irq */
+       bfin_rotary_close(input);
+
+       rotary->irq = platform_get_irq(pdev, 0);
+       if (rotary->irq < 0) {
+               dev_err(dev, "No rotary IRQ specified\n");
+               return -ENOENT;
+       }
+
+       error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr,
+                                0, dev_name(dev), rotary);
        if (error) {
-               dev_err(&pdev->dev,
-                       "unable to claim irq %d; error %d\n",
+               dev_err(dev, "unable to claim irq %d; error %d\n",
                        rotary->irq, error);
-               goto out1;
+               return error;
        }
 
        error = input_register_device(input);
        if (error) {
-               dev_err(&pdev->dev,
-                       "unable to register input device (%d)\n", error);
-               goto out2;
+               dev_err(dev, "unable to register input device (%d)\n", error);
+               return error;
        }
 
-       if (pdata->rotary_button_key)
-               bfin_write_CNT_IMASK(CZMIE);
-
-       if (pdata->mode & ROT_DEBE)
-               bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);
-
-       if (pdata->mode)
-               bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
-                                       (pdata->mode & ~CNTE));
-
-       bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
-       bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);
-
        platform_set_drvdata(pdev, rotary);
        device_init_wakeup(&pdev->dev, 1);
 
        return 0;
-
-out2:
-       free_irq(rotary->irq, pdev);
-out1:
-       input_free_device(input);
-       kfree(rotary);
-       peripheral_free_list(per_cnt);
-
-       return error;
 }
 
-static int bfin_rotary_remove(struct platform_device *pdev)
-{
-       struct bfin_rot *rotary = platform_get_drvdata(pdev);
-
-       bfin_write_CNT_CONFIG(0);
-       bfin_write_CNT_IMASK(0);
-
-       free_irq(rotary->irq, pdev);
-       input_unregister_device(rotary->input);
-       peripheral_free_list(per_cnt);
-
-       kfree(rotary);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int bfin_rotary_suspend(struct device *dev)
+static int __maybe_unused bfin_rotary_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-       rotary->cnt_config = bfin_read_CNT_CONFIG();
-       rotary->cnt_imask = bfin_read_CNT_IMASK();
-       rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE();
+       rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF);
+       rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF);
+       rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF);
 
        if (device_may_wakeup(&pdev->dev))
                enable_irq_wake(rotary->irq);
@@ -227,38 +259,32 @@ static int bfin_rotary_suspend(struct device *dev)
        return 0;
 }
 
-static int bfin_rotary_resume(struct device *dev)
+static int __maybe_unused bfin_rotary_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-       bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce);
-       bfin_write_CNT_IMASK(rotary->cnt_imask);
-       bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE);
+       writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF);
+       writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF);
+       writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF);
 
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(rotary->irq);
 
        if (rotary->cnt_config & CNTE)
-               bfin_write_CNT_CONFIG(rotary->cnt_config);
+               writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF);
 
        return 0;
 }
 
-static const struct dev_pm_ops bfin_rotary_pm_ops = {
-       .suspend        = bfin_rotary_suspend,
-       .resume         = bfin_rotary_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops,
+                        bfin_rotary_suspend, bfin_rotary_resume);
 
 static struct platform_driver bfin_rotary_device_driver = {
        .probe          = bfin_rotary_probe,
-       .remove         = bfin_rotary_remove,
        .driver         = {
                .name   = "bfin-rotary",
-#ifdef CONFIG_PM
                .pm     = &bfin_rotary_pm_ops,
-#endif
        },
 };
 module_platform_driver(bfin_rotary_device_driver);
index 59d4dcddf6de0cf4d1c5384c977a36c319d66389..98228773a1118bfd448ec0143c4b572a534af499 100644 (file)
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c,
        idev->private           = m;
        idev->input->name       = MMA8450_DRV_NAME;
        idev->input->id.bustype = BUS_I2C;
+       idev->input->dev.parent = &c->dev;
        idev->poll              = mma8450_poll;
        idev->poll_interval     = POLL_INTERVAL;
        idev->poll_interval_max = POLL_INTERVAL_MAX;
index 79cc0f79896fcb47e9ed31faeed02339ffe5ccb4..e8e010a85484ae3d42003bc3c0742f909b8946f6 100644 (file)
@@ -195,7 +195,7 @@ static int soc_button_probe(struct platform_device *pdev)
 
 static struct soc_button_info soc_button_PNP0C40[] = {
        { "power", 0, EV_KEY, KEY_POWER, false, true },
-       { "home", 1, EV_KEY, KEY_HOME, false, true },
+       { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
        { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
        { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
        { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
index f205b8be2ce4ecd2395c75dc8fc990513f3fff48..1bd15ebc01f2df5002eca38f7089a61701471f5d 100644 (file)
@@ -99,36 +99,58 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
-#define ALPS_IS_RUSHMORE       0x100   /* device is a rushmore */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
-       { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Toshiba Salellite Pro M10 */
-       { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 },                           /* UMAX-530T */
-       { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },                           /* HP ze1115 */
-       { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Fujitsu Siemens S6010 */
-       { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL },                  /* Toshiba Satellite S2400-103 */
-       { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 },                /* NEC Versa L320 */
-       { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D800 */
-       { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT },              /* ThinkPad R61 8918-5QG */
-       { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Ahtec Laptop */
-       { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* XXX */
-       { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
-       { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D600 */
+       { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },      /* Toshiba Salellite Pro M10 */
+       { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } },                               /* UMAX-530T */
+       { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },                               /* HP ze1115 */
+       { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },            /* Fujitsu Siemens S6010 */
+       { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } },              /* Toshiba Satellite S2400-103 */
+       { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } },            /* NEC Versa L320 */
+       { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },      /* Dell Latitude D800 */
+       { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } },          /* ThinkPad R61 8918-5QG */
+       { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },            /* Ahtec Laptop */
+
+       /*
+        * XXX This entry is suspicious. First byte has zero lower nibble,
+        * which is what a normal mouse would report. Also, the value 0x0e
+        * isn't valid per PS/2 spec.
+        */
+       { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+
+       { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+       { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } },      /* Dell Latitude D600 */
        /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
-       { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
-       { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT },              /* Dell XT2 */
-       { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },           /* Dell Vostro 1400 */
-       { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },                            /* Toshiba Tecra A11-11L */
-       { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
+       { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },
+       { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } },          /* Dell XT2 */
+       { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } },       /* Dell Vostro 1400 */
+       { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },                          /* Toshiba Tecra A11-11L */
+       { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } },
+};
+
+static const struct alps_protocol_info alps_v3_protocol_data = {
+       ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v3_rushmore_data = {
+       ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v5_protocol_data = {
+       ALPS_PROTO_V5, 0xc8, 0xd8, 0
+};
+
+static const struct alps_protocol_info alps_v7_protocol_data = {
+       ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
 };
 
 static void alps_set_abs_params_st(struct alps_data *priv,
@@ -136,12 +158,6 @@ static void alps_set_abs_params_st(struct alps_data *priv,
 static void alps_set_abs_params_mt(struct alps_data *priv,
                                   struct input_dev *dev1);
 
-/*
- * XXX - this entry is suspicious. First byte has zero lower nibble,
- * which is what a normal mouse would report. Also, the value 0x0e
- * isn't valid per PS/2 spec.
- */
-
 /* Packet formats are described in Documentation/input/alps.txt */
 
 static bool alps_is_valid_first_byte(struct alps_data *priv,
@@ -150,8 +166,7 @@ static bool alps_is_valid_first_byte(struct alps_data *priv,
        return (data & priv->mask0) == priv->byte0;
 }
 
-static void alps_report_buttons(struct psmouse *psmouse,
-                               struct input_dev *dev1, struct input_dev *dev2,
+static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2,
                                int left, int right, int middle)
 {
        struct input_dev *dev;
@@ -161,20 +176,21 @@ static void alps_report_buttons(struct psmouse *psmouse,
         * other device (dev2) then this event should be also
         * sent through that device.
         */
-       dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_LEFT, left);
 
-       dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_RIGHT, right);
 
-       dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_MIDDLE, middle);
 
        /*
         * Sync the _other_ device now, we'll do the first
         * device later once we report the rest of the events.
         */
-       input_sync(dev2);
+       if (dev2)
+               input_sync(dev2);
 }
 
 static void alps_process_packet_v1_v2(struct psmouse *psmouse)
@@ -221,13 +237,13 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                input_report_rel(dev2, REL_X,  (x > 383 ? (x - 768) : x));
                input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
 
-               alps_report_buttons(psmouse, dev2, dev, left, right, middle);
+               alps_report_buttons(dev2, dev, left, right, middle);
 
                input_sync(dev2);
                return;
        }
 
-       alps_report_buttons(psmouse, dev, dev2, left, right, middle);
+       alps_report_buttons(dev, dev2, left, right, middle);
 
        /* Convert hardware tap to a reasonable Z value */
        if (ges && !fin)
@@ -412,7 +428,7 @@ static int alps_process_bitmap(struct alps_data *priv,
                (2 * (priv->y_bits - 1));
 
        /* y-bitmap order is reversed, except on rushmore */
-       if (!(priv->flags & ALPS_IS_RUSHMORE)) {
+       if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) {
                fields->mt[0].y = priv->y_max - fields->mt[0].y;
                fields->mt[1].y = priv->y_max - fields->mt[1].y;
        }
@@ -648,7 +664,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
                 */
                if (f->is_mp) {
                        fingers = f->fingers;
-                       if (priv->proto_version == ALPS_PROTO_V3) {
+                       if (priv->proto_version == ALPS_PROTO_V3 ||
+                           priv->proto_version == ALPS_PROTO_V3_RUSHMORE) {
                                if (alps_process_bitmap(priv, f) == 0)
                                        fingers = 0; /* Use st data */
 
@@ -892,34 +909,6 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
                                          unsigned char *pkt,
                                          unsigned char pkt_id)
 {
-       /*
-        *       packet-fmt    b7   b6    b5   b4   b3   b2   b1   b0
-        * Byte0 TWO & MULTI    L    1     R    M    1 Y0-2 Y0-1 Y0-0
-        * Byte0 NEW            L    1  X1-5    1    1 Y0-2 Y0-1 Y0-0
-        * Byte1            Y0-10 Y0-9  Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
-        * Byte2            X0-11    1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
-        * Byte3            X1-11    1  X0-4 X0-3    1 X0-2 X0-1 X0-0
-        * Byte4 TWO        X1-10  TWO  X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
-        * Byte4 MULTI      X1-10  TWO  X1-9 X1-8 X1-7 X1-6 Y1-5    1
-        * Byte4 NEW        X1-10  TWO  X1-9 X1-8 X1-7 X1-6    0    0
-        * Byte5 TWO & NEW  Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
-        * Byte5 MULTI      Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6  F-1  F-0
-        * L:         Left button
-        * R / M:     Non-clickpads: Right / Middle button
-        *            Clickpads: When > 2 fingers are down, and some fingers
-        *            are in the button area, then the 2 coordinates reported
-        *            are for fingers outside the button area and these report
-        *            extra fingers being present in the right / left button
-        *            area. Note these fingers are not added to the F field!
-        *            so if a TWO packet is received and R = 1 then there are
-        *            3 fingers down, etc.
-        * TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
-        *            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
-        *               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
-        *               in NEW fmt
-        * F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
-        */
-
        mt[0].x = ((pkt[2] & 0x80) << 4);
        mt[0].x |= ((pkt[2] & 0x3F) << 5);
        mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -1044,17 +1033,6 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
                return;
        }
 
-       /*
-        *        b7 b6 b5 b4 b3 b2 b1 b0
-        * Byte0   0  1  0  0  1  0  0  0
-        * Byte1   1  1  *  *  1  M  R  L
-        * Byte2  X7  1 X5 X4 X3 X2 X1 X0
-        * Byte3  Z6  1 Y6 X6  1 Y2 Y1 Y0
-        * Byte4  Y7  0 Y5 Y4 Y3  1  1  0
-        * Byte5 T&P  0 Z5 Z4 Z3 Z2 Z1 Z0
-        * M / R / L: Middle / Right / Left button
-        */
-
        x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
        y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
            ((packet[3] & 0x20) << 1);
@@ -1107,23 +1085,89 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
                alps_process_touchpad_packet_v7(psmouse);
 }
 
-static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+static DEFINE_MUTEX(alps_mutex);
+
+static void alps_register_bare_ps2_mouse(struct work_struct *work)
+{
+       struct alps_data *priv =
+               container_of(work, struct alps_data, dev3_register_work.work);
+       struct psmouse *psmouse = priv->psmouse;
+       struct input_dev *dev3;
+       int error = 0;
+
+       mutex_lock(&alps_mutex);
+
+       if (priv->dev3)
+               goto out;
+
+       dev3 = input_allocate_device();
+       if (!dev3) {
+               psmouse_err(psmouse, "failed to allocate secondary device\n");
+               error = -ENOMEM;
+               goto out;
+       }
+
+       snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
+                psmouse->ps2dev.serio->phys,
+                (priv->dev2 ? "input2" : "input1"));
+       dev3->phys = priv->phys3;
+
+       /*
+        * format of input device name is: "protocol vendor name"
+        * see function psmouse_switch_protocol() in psmouse-base.c
+        */
+       dev3->name = "PS/2 ALPS Mouse";
+
+       dev3->id.bustype = BUS_I8042;
+       dev3->id.vendor  = 0x0002;
+       dev3->id.product = PSMOUSE_PS2;
+       dev3->id.version = 0x0000;
+       dev3->dev.parent = &psmouse->ps2dev.serio->dev;
+
+       input_set_capability(dev3, EV_REL, REL_X);
+       input_set_capability(dev3, EV_REL, REL_Y);
+       input_set_capability(dev3, EV_KEY, BTN_LEFT);
+       input_set_capability(dev3, EV_KEY, BTN_RIGHT);
+       input_set_capability(dev3, EV_KEY, BTN_MIDDLE);
+
+       __set_bit(INPUT_PROP_POINTER, dev3->propbit);
+
+       error = input_register_device(dev3);
+       if (error) {
+               psmouse_err(psmouse,
+                           "failed to register secondary device: %d\n",
+                           error);
+               input_free_device(dev3);
+               goto out;
+       }
+
+       priv->dev3 = dev3;
+
+out:
+       /*
+        * Save the error code so that we can detect that we
+        * already tried to create the device.
+        */
+       if (error)
+               priv->dev3 = ERR_PTR(error);
+
+       mutex_unlock(&alps_mutex);
+}
+
+static void alps_report_bare_ps2_packet(struct input_dev *dev,
                                        unsigned char packet[],
                                        bool report_buttons)
 {
-       struct alps_data *priv = psmouse->private;
-       struct input_dev *dev2 = priv->dev2;
-
        if (report_buttons)
-               alps_report_buttons(psmouse, dev2, psmouse->dev,
+               alps_report_buttons(dev, NULL,
                                packet[0] & 1, packet[0] & 2, packet[0] & 4);
 
-       input_report_rel(dev2, REL_X,
+       input_report_rel(dev, REL_X,
                packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
-       input_report_rel(dev2, REL_Y,
+       input_report_rel(dev, REL_Y,
                packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
 
-       input_sync(dev2);
+       input_sync(dev);
 }
 
 static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
@@ -1188,8 +1232,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
                 * de-synchronization.
                 */
 
-               alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
-                                           false);
+               alps_report_bare_ps2_packet(priv->dev2,
+                                           &psmouse->packet[3], false);
 
                /*
                 * Continue with the standard ALPS protocol handling,
@@ -1245,9 +1289,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
         * properly we only do this if the device is fully synchronized.
         */
        if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+
+               /* Register dev3 mouse if we received PS/2 packet first time */
+               if (unlikely(!priv->dev3))
+                       psmouse_queue_work(psmouse,
+                                          &priv->dev3_register_work, 0);
+
                if (psmouse->pktcnt == 3) {
-                       alps_report_bare_ps2_packet(psmouse, psmouse->packet,
-                                                   true);
+                       /* Once dev3 mouse device is registered report data */
+                       if (likely(!IS_ERR_OR_NULL(priv->dev3)))
+                               alps_report_bare_ps2_packet(priv->dev3,
+                                                           psmouse->packet,
+                                                           true);
                        return PSMOUSE_FULL_PACKET;
                }
                return PSMOUSE_GOOD_DATA;
@@ -1275,7 +1328,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
                            psmouse->pktcnt - 1,
                            psmouse->packet[psmouse->pktcnt - 1]);
 
-               if (priv->proto_version == ALPS_PROTO_V3 &&
+               if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE &&
                    psmouse->pktcnt == psmouse->pktsize) {
                        /*
                         * Some Dell boxes, such as Latitude E6440 or E7440
@@ -1780,7 +1833,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
         * all.
         */
        if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
-               psmouse_warn(psmouse, "trackstick E7 report failed\n");
+               psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n");
                ret = -ENODEV;
        } else {
                psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param);
@@ -1945,8 +1998,6 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
                                                   ALPS_REG_BASE_RUSHMORE);
                if (reg_val == -EIO)
                        goto error;
-               if (reg_val == -ENODEV)
-                       priv->flags &= ~ALPS_DUALPOINT;
        }
 
        if (alps_enter_command_mode(psmouse) ||
@@ -2162,11 +2213,18 @@ error:
        return ret;
 }
 
-static void alps_set_defaults(struct alps_data *priv)
+static int alps_set_protocol(struct psmouse *psmouse,
+                            struct alps_data *priv,
+                            const struct alps_protocol_info *protocol)
 {
-       priv->byte0 = 0x8f;
-       priv->mask0 = 0x8f;
-       priv->flags = ALPS_DUALPOINT;
+       psmouse->private = priv;
+
+       setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+
+       priv->proto_version = protocol->version;
+       priv->byte0 = protocol->byte0;
+       priv->mask0 = protocol->mask0;
+       priv->flags = protocol->flags;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
@@ -2182,6 +2240,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->x_max = 1023;
                priv->y_max = 767;
                break;
+
        case ALPS_PROTO_V3:
                priv->hw_init = alps_hw_init_v3;
                priv->process_packet = alps_process_packet_v3;
@@ -2190,6 +2249,23 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
                break;
+
+       case ALPS_PROTO_V3_RUSHMORE:
+               priv->hw_init = alps_hw_init_rushmore_v3;
+               priv->process_packet = alps_process_packet_v3;
+               priv->set_abs_params = alps_set_abs_params_mt;
+               priv->decode_fields = alps_decode_rushmore;
+               priv->nibble_commands = alps_v3_nibble_commands;
+               priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+               priv->x_bits = 16;
+               priv->y_bits = 12;
+
+               if (alps_probe_trackstick_v3(psmouse,
+                                            ALPS_REG_BASE_RUSHMORE) < 0)
+                       priv->flags &= ~ALPS_DUALPOINT;
+
+               break;
+
        case ALPS_PROTO_V4:
                priv->hw_init = alps_hw_init_v4;
                priv->process_packet = alps_process_packet_v4;
@@ -2197,6 +2273,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->nibble_commands = alps_v4_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_DISABLE;
                break;
+
        case ALPS_PROTO_V5:
                priv->hw_init = alps_hw_init_dolphin_v1;
                priv->process_packet = alps_process_touchpad_packet_v3_v5;
@@ -2204,14 +2281,12 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->byte0 = 0xc8;
-               priv->mask0 = 0xd8;
-               priv->flags = 0;
                priv->x_max = 1360;
                priv->y_max = 660;
                priv->x_bits = 23;
                priv->y_bits = 12;
                break;
+
        case ALPS_PROTO_V6:
                priv->hw_init = alps_hw_init_v6;
                priv->process_packet = alps_process_packet_v6;
@@ -2220,6 +2295,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->x_max = 2047;
                priv->y_max = 1535;
                break;
+
        case ALPS_PROTO_V7:
                priv->hw_init = alps_hw_init_v7;
                priv->process_packet = alps_process_packet_v7;
@@ -2227,19 +2303,21 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->x_max = 0xfff;
-               priv->y_max = 0x7ff;
-               priv->byte0 = 0x48;
-               priv->mask0 = 0x48;
+
+               if (alps_dolphin_get_device_area(psmouse, priv))
+                       return -EIO;
 
                if (priv->fw_ver[1] != 0xba)
                        priv->flags |= ALPS_BUTTONPAD;
+
                break;
        }
+
+       return 0;
 }
 
-static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
-                           unsigned char *e7, unsigned char *ec)
+static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
+                                                        unsigned char *ec)
 {
        const struct alps_model_info *model;
        int i;
@@ -2251,23 +2329,18 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
                    (!model->command_mode_resp ||
                     model->command_mode_resp == ec[2])) {
 
-                       priv->proto_version = model->proto_version;
-                       alps_set_defaults(priv);
-
-                       priv->flags = model->flags;
-                       priv->byte0 = model->byte0;
-                       priv->mask0 = model->mask0;
-
-                       return 0;
+                       return &model->protocol_info;
                }
        }
 
-       return -EINVAL;
+       return NULL;
 }
 
 static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
 {
+       const struct alps_protocol_info *protocol;
        unsigned char e6[4], e7[4], ec[4];
+       int error;
 
        /*
         * First try "E6 report".
@@ -2293,54 +2366,35 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
            alps_exit_command_mode(psmouse))
                return -EIO;
 
-       /* Save the Firmware version */
-       memcpy(priv->fw_ver, ec, 3);
-
-       if (alps_match_table(psmouse, priv, e7, ec) == 0) {
-               return 0;
-       } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
-                  ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
-               priv->proto_version = ALPS_PROTO_V5;
-               alps_set_defaults(priv);
-               if (alps_dolphin_get_device_area(psmouse, priv))
-                       return -EIO;
-               else
-                       return 0;
-       } else if (ec[0] == 0x88 &&
-                  ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
-               priv->proto_version = ALPS_PROTO_V7;
-               alps_set_defaults(priv);
-
-               return 0;
-       } else if (ec[0] == 0x88 && ec[1] == 0x08) {
-               priv->proto_version = ALPS_PROTO_V3;
-               alps_set_defaults(priv);
-
-               priv->hw_init = alps_hw_init_rushmore_v3;
-               priv->decode_fields = alps_decode_rushmore;
-               priv->x_bits = 16;
-               priv->y_bits = 12;
-               priv->flags |= ALPS_IS_RUSHMORE;
-
-               /* hack to make addr_command, nibble_command available */
-               psmouse->private = priv;
-
-               if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
-                       priv->flags &= ~ALPS_DUALPOINT;
-
-               return 0;
-       } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
-                  ec[2] >= 0x90 && ec[2] <= 0x9d) {
-               priv->proto_version = ALPS_PROTO_V3;
-               alps_set_defaults(priv);
-
-               return 0;
+       protocol = alps_match_table(e7, ec);
+       if (!protocol) {
+               if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
+                          ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
+                       protocol = &alps_v5_protocol_data;
+               } else if (ec[0] == 0x88 &&
+                          ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
+                       protocol = &alps_v7_protocol_data;
+               } else if (ec[0] == 0x88 && ec[1] == 0x08) {
+                       protocol = &alps_v3_rushmore_data;
+               } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+                          ec[2] >= 0x90 && ec[2] <= 0x9d) {
+                       protocol = &alps_v3_protocol_data;
+               } else {
+                       psmouse_dbg(psmouse,
+                                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+                       return -EINVAL;
+               }
        }
 
-       psmouse_dbg(psmouse,
-                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+       if (priv) {
+               /* Save the Firmware version */
+               memcpy(priv->fw_ver, ec, 3);
+               error = alps_set_protocol(psmouse, priv, protocol);
+               if (error)
+                       return error;
+       }
 
-       return -EINVAL;
+       return 0;
 }
 
 static int alps_reconnect(struct psmouse *psmouse)
@@ -2361,7 +2415,10 @@ static void alps_disconnect(struct psmouse *psmouse)
 
        psmouse_reset(psmouse);
        del_timer_sync(&priv->timer);
-       input_unregister_device(priv->dev2);
+       if (priv->dev2)
+               input_unregister_device(priv->dev2);
+       if (!IS_ERR_OR_NULL(priv->dev3))
+               input_unregister_device(priv->dev3);
        kfree(priv);
 }
 
@@ -2394,25 +2451,12 @@ static void alps_set_abs_params_mt(struct alps_data *priv,
 
 int alps_init(struct psmouse *psmouse)
 {
-       struct alps_data *priv;
-       struct input_dev *dev1 = psmouse->dev, *dev2;
-
-       priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
-       dev2 = input_allocate_device();
-       if (!priv || !dev2)
-               goto init_fail;
-
-       priv->dev2 = dev2;
-       setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
-
-       psmouse->private = priv;
-
-       psmouse_reset(psmouse);
-
-       if (alps_identify(psmouse, priv) < 0)
-               goto init_fail;
+       struct alps_data *priv = psmouse->private;
+       struct input_dev *dev1 = psmouse->dev;
+       int error;
 
-       if (priv->hw_init(psmouse))
+       error = priv->hw_init(psmouse);
+       if (error)
                goto init_fail;
 
        /*
@@ -2462,36 +2506,57 @@ int alps_init(struct psmouse *psmouse)
        }
 
        if (priv->flags & ALPS_DUALPOINT) {
+               struct input_dev *dev2;
+
+               dev2 = input_allocate_device();
+               if (!dev2) {
+                       psmouse_err(psmouse,
+                                   "failed to allocate trackstick device\n");
+                       error = -ENOMEM;
+                       goto init_fail;
+               }
+
+               snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1",
+                        psmouse->ps2dev.serio->phys);
+               dev2->phys = priv->phys2;
+
                /*
                 * format of input device name is: "protocol vendor name"
                 * see function psmouse_switch_protocol() in psmouse-base.c
                 */
                dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
+
+               dev2->id.bustype = BUS_I8042;
+               dev2->id.vendor  = 0x0002;
                dev2->id.product = PSMOUSE_ALPS;
                dev2->id.version = priv->proto_version;
-       } else {
-               dev2->name = "PS/2 ALPS Mouse";
-               dev2->id.product = PSMOUSE_PS2;
-               dev2->id.version = 0x0000;
-       }
-
-       snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
-       dev2->phys = priv->phys;
-       dev2->id.bustype = BUS_I8042;
-       dev2->id.vendor  = 0x0002;
-       dev2->dev.parent = &psmouse->ps2dev.serio->dev;
+               dev2->dev.parent = &psmouse->ps2dev.serio->dev;
 
-       dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
-       dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-       dev2->keybit[BIT_WORD(BTN_LEFT)] =
-               BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
+               input_set_capability(dev2, EV_REL, REL_X);
+               input_set_capability(dev2, EV_REL, REL_Y);
+               input_set_capability(dev2, EV_KEY, BTN_LEFT);
+               input_set_capability(dev2, EV_KEY, BTN_RIGHT);
+               input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
 
-       __set_bit(INPUT_PROP_POINTER, dev2->propbit);
-       if (priv->flags & ALPS_DUALPOINT)
+               __set_bit(INPUT_PROP_POINTER, dev2->propbit);
                __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
 
-       if (input_register_device(priv->dev2))
-               goto init_fail;
+               error = input_register_device(dev2);
+               if (error) {
+                       psmouse_err(psmouse,
+                                   "failed to register trackstick device: %d\n",
+                                   error);
+                       input_free_device(dev2);
+                       goto init_fail;
+               }
+
+               priv->dev2 = dev2;
+       }
+
+       priv->psmouse = psmouse;
+
+       INIT_DELAYED_WORK(&priv->dev3_register_work,
+                         alps_register_bare_ps2_mouse);
 
        psmouse->protocol_handler = alps_process_byte;
        psmouse->poll = alps_poll;
@@ -2509,25 +2574,58 @@ int alps_init(struct psmouse *psmouse)
 
 init_fail:
        psmouse_reset(psmouse);
-       input_free_device(dev2);
-       kfree(priv);
+       /*
+        * Even though we did not allocate psmouse->private we do free
+        * it here.
+        */
+       kfree(psmouse->private);
        psmouse->private = NULL;
-       return -1;
+       return error;
 }
 
 int alps_detect(struct psmouse *psmouse, bool set_properties)
 {
-       struct alps_data dummy;
+       struct alps_data *priv;
+       int error;
 
-       if (alps_identify(psmouse, &dummy) < 0)
-               return -1;
+       error = alps_identify(psmouse, NULL);
+       if (error)
+               return error;
+
+       /*
+        * Reset the device to make sure it is fully operational:
+        * on some laptops, like certain Dell Latitudes, we may
+        * fail to properly detect presence of trackstick if device
+        * has not been reset.
+        */
+       psmouse_reset(psmouse);
+
+       priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       error = alps_identify(psmouse, priv);
+       if (error) {
+               kfree(priv);
+               return error;
+       }
 
        if (set_properties) {
                psmouse->vendor = "ALPS";
-               psmouse->name = dummy.flags & ALPS_DUALPOINT ?
+               psmouse->name = priv->flags & ALPS_DUALPOINT ?
                                "DualPoint TouchPad" : "GlidePoint";
-               psmouse->model = dummy.proto_version << 8;
+               psmouse->model = priv->proto_version;
+       } else {
+               /*
+                * Destroy alps_data structure we allocated earlier since
+                * this was just a "trial run". Otherwise we'll keep it
+                * to be used by alps_init() which has to be called if
+                * we succeed and set_properties is true.
+                */
+               kfree(priv);
+               psmouse->private = NULL;
        }
+
        return 0;
 }
 
index 66240b47819a9569c975cedd1d5443b2b59f6b86..02513c0502fc1309a9f8285ea2f4e3237fd7464a 100644 (file)
 
 #include <linux/input/mt.h>
 
-#define ALPS_PROTO_V1  1
-#define ALPS_PROTO_V2  2
-#define ALPS_PROTO_V3  3
-#define ALPS_PROTO_V4  4
-#define ALPS_PROTO_V5  5
-#define ALPS_PROTO_V6  6
-#define ALPS_PROTO_V7  7       /* t3btl t4s */
+#define ALPS_PROTO_V1          0x100
+#define ALPS_PROTO_V2          0x200
+#define ALPS_PROTO_V3          0x300
+#define ALPS_PROTO_V3_RUSHMORE 0x310
+#define ALPS_PROTO_V4          0x400
+#define ALPS_PROTO_V5          0x500
+#define ALPS_PROTO_V6          0x600
+#define ALPS_PROTO_V7          0x700   /* t3btl t4s */
 
 #define MAX_TOUCHES    2
 
@@ -45,6 +46,21 @@ enum V7_PACKET_ID {
         V7_PACKET_ID_UNKNOWN,
 };
 
+/**
+ * struct alps_protocol_info - information about protocol used by a device
+ * @version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ *   known format for this model.  The first byte of the report, ANDed with
+ *   mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ */
+struct alps_protocol_info {
+       u16 version;
+       u8 byte0, mask0;
+       unsigned int flags;
+};
+
 /**
  * struct alps_model_info - touchpad ID table
  * @signature: E7 response string to match.
@@ -52,23 +68,16 @@ enum V7_PACKET_ID {
  *   (aka command mode response) identifies the firmware minor version.  This
  *   can be used to distinguish different hardware models which are not
  *   uniquely identifiable through their E7 responses.
- * @proto_version: Indicates V1/V2/V3/...
- * @byte0: Helps figure out whether a position report packet matches the
- *   known format for this model.  The first byte of the report, ANDed with
- *   mask0, should match byte0.
- * @mask0: The mask used to check the first byte of the report.
- * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @protocol_info: information about protcol used by the device.
  *
  * Many (but not all) ALPS touchpads can be identified by looking at the
  * values returned in the "E7 report" and/or the "EC report."  This table
  * lists a number of such touchpads.
  */
 struct alps_model_info {
-       unsigned char signature[3];
-       unsigned char command_mode_resp;
-       unsigned char proto_version;
-       unsigned char byte0, mask0;
-       int flags;
+       u8 signature[3];
+       u8 command_mode_resp;
+       struct alps_protocol_info protocol_info;
 };
 
 /**
@@ -132,8 +141,12 @@ struct alps_fields {
 
 /**
  * struct alps_data - private data structure for the ALPS driver
- * @dev2: "Relative" device used to report trackstick or mouse activity.
- * @phys: Physical path for the relative device.
+ * @psmouse: Pointer to parent psmouse device
+ * @dev2: Trackstick device (can be NULL).
+ * @dev3: Generic PS/2 mouse (can be NULL, delayed registering).
+ * @phys2: Physical path for the trackstick device.
+ * @phys3: Physical path for the generic PS/2 mouse.
+ * @dev3_register_work: Delayed work for registering PS/2 mouse.
  * @nibble_commands: Command mapping used for touchpad register accesses.
  * @addr_command: Command used to tell the touchpad that a register address
  *   follows.
@@ -160,15 +173,19 @@ struct alps_fields {
  * @timer: Timer for flushing out the final report packet in the stream.
  */
 struct alps_data {
+       struct psmouse *psmouse;
        struct input_dev *dev2;
-       char phys[32];
+       struct input_dev *dev3;
+       char phys2[32];
+       char phys3[32];
+       struct delayed_work dev3_register_work;
 
        /* these are autodetected when the device is identified */
        const struct alps_nibble_commands *nibble_commands;
        int addr_command;
-       unsigned char proto_version;
-       unsigned char byte0, mask0;
-       unsigned char fw_ver[3];
+       u16 proto_version;
+       u8 byte0, mask0;
+       u8 fw_ver[3];
        int flags;
        int x_max;
        int y_max;
index 77e9d70a986bc049ecc48650bff28d34d97443d8..1e2291c378feb8856b319dc5c2e6492360de1ada 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/input/mt.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 #include "cyapa.h"
 
 
index ddf5393a118098fc87337d34e3808c0cfc28ce95..5b611dd71e790660fa1492dab505499222390d02 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/mutex.h>
 #include <linux/completion.h>
 #include <linux/slab.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 #include <linux/crc-itu-t.h>
 #include "cyapa.h"
 
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa,
                                electrodes_tx = cyapa->electrodes_x;
                        max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) &
                                                ~7u) * electrodes_tx;
-               } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) {
+               } else {
                        offset = 2;
                        max_element_cnt = cyapa->electrodes_x +
                                                cyapa->electrodes_y;
index 9118a1861a45cf0629f6380112eafeb2cc9b13b9..28dcfc822bf647f4386239d49487e111020a2272 100644 (file)
@@ -710,8 +710,3 @@ err_exit:
 
        return -1;
 }
-
-bool cypress_supported(void)
-{
-       return true;
-}
index 4720f21d2d70cfe2dbf20a626a5133010ba3e320..81f68aaed7c8567d3cfbfb4afc56008815ead34d 100644 (file)
@@ -172,7 +172,6 @@ struct cytp_data {
 #ifdef CONFIG_MOUSE_PS2_CYPRESS
 int cypress_detect(struct psmouse *psmouse, bool set_properties);
 int cypress_init(struct psmouse *psmouse);
-bool cypress_supported(void);
 #else
 inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
 {
@@ -182,10 +181,6 @@ inline int cypress_init(struct psmouse *psmouse)
 {
        return -ENOSYS;
 }
-inline bool cypress_supported(void)
-{
-       return 0;
-}
 #endif /* CONFIG_MOUSE_PS2_CYPRESS */
 
 #endif  /* _CYPRESS_PS2_H */
index fca38ba63bbe7f73f6e01195bd2e20395d683dd5..23d259416f2f4d90d40a04271e98e6f8acca3966 100644 (file)
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse)
 
 #define FOC_MAX_FINGERS 5
 
-#define FOC_MAX_X 2431
-#define FOC_MAX_Y 1663
-
 /*
  * Current state of a single finger on the touchpad.
  */
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse)
                input_mt_slot(dev, i);
                input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
                if (active) {
-                       input_report_abs(dev, ABS_MT_POSITION_X, finger->x);
+                       unsigned int clamped_x, clamped_y;
+                       /*
+                        * The touchpad might report invalid data, so we clamp
+                        * the resulting values so that we do not confuse
+                        * userspace.
+                        */
+                       clamped_x = clamp(finger->x, 0U, priv->x_max);
+                       clamped_y = clamp(finger->y, 0U, priv->y_max);
+                       input_report_abs(dev, ABS_MT_POSITION_X, clamped_x);
                        input_report_abs(dev, ABS_MT_POSITION_Y,
-                                        FOC_MAX_Y - finger->y);
+                                        priv->y_max - clamped_y);
                }
        }
        input_mt_report_pointer_emulation(dev, true);
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse,
 
        state->pressed = (packet[0] >> 4) & 1;
 
-       /*
-        * packet[5] contains some kind of tool size in the most
-        * significant nibble. 0xff is a special value (latching) that
-        * signals a large contact area.
-        */
-       if (packet[5] == 0xff) {
-               state->fingers[finger].valid = false;
-               return;
-       }
-
        state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2];
        state->fingers[finger].y = (packet[3] << 8) | packet[4];
        state->fingers[finger].valid = true;
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse)
 
        return 0;
 }
+
+void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+       /* not supported yet */
+}
+
+static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+       /* not supported yet */
+}
+
+static void focaltech_set_scale(struct psmouse *psmouse,
+                               enum psmouse_scale scale)
+{
+       /* not supported yet */
+}
+
 int focaltech_init(struct psmouse *psmouse)
 {
        struct focaltech_data *priv;
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse)
        psmouse->cleanup = focaltech_reset;
        /* resync is not supported yet */
        psmouse->resync_time = 0;
+       /*
+        * rate/resolution/scale changes are not supported yet, and
+        * the generic implementations of these functions seem to
+        * confuse some touchpads
+        */
+       psmouse->set_resolution = focaltech_set_resolution;
+       psmouse->set_rate = focaltech_set_rate;
+       psmouse->set_scale = focaltech_set_scale;
 
        return 0;
 
@@ -424,11 +444,6 @@ fail:
        return error;
 }
 
-bool focaltech_supported(void)
-{
-       return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_FOCALTECH */
 
 int focaltech_init(struct psmouse *psmouse)
@@ -438,9 +453,4 @@ int focaltech_init(struct psmouse *psmouse)
        return 0;
 }
 
-bool focaltech_supported(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_FOCALTECH */
index 71870a9b548a8cd0a695fb9327b88e75daa83eb8..ca61ebff373e99a194011c1d8554d8ffb0766981 100644 (file)
@@ -19,6 +19,5 @@
 
 int focaltech_detect(struct psmouse *psmouse, bool set_properties);
 int focaltech_init(struct psmouse *psmouse);
-bool focaltech_supported(void);
 
 #endif
index 68469feda470d9d8b34c249cbcd02426795f94c8..8bc61237bc1b1c95d43b0d7e3d11e90275aa8d4a 100644 (file)
@@ -453,6 +453,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
        psmouse->rate = r;
 }
 
+/*
+ * Here we set the mouse scaling.
+ */
+
+static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale)
+{
+       ps2_command(&psmouse->ps2dev, NULL,
+                   scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 :
+                                              PSMOUSE_CMD_SETSCALE11);
+}
+
 /*
  * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
  */
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
 
        psmouse->set_rate = psmouse_set_rate;
        psmouse->set_resolution = psmouse_set_resolution;
+       psmouse->set_scale = psmouse_set_scale;
        psmouse->poll = psmouse_poll;
        psmouse->protocol_handler = psmouse_process_byte;
        psmouse->pktsize = 3;
@@ -727,7 +739,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
        if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
                if (max_proto > PSMOUSE_IMEX) {
                        if (!set_properties || focaltech_init(psmouse) == 0) {
-                               if (focaltech_supported())
+                               if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH))
                                        return PSMOUSE_FOCALTECH;
                                /*
                                 * Note that we need to also restrict
@@ -776,7 +788,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * Try activating protocol, but check if support is enabled first, since
  * we try detecting Synaptics even when protocol is disabled.
  */
-                       if (synaptics_supported() &&
+                       if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) &&
                            (!set_properties || synaptics_init(psmouse) == 0)) {
                                return PSMOUSE_SYNAPTICS;
                        }
@@ -801,7 +813,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  */
        if (max_proto > PSMOUSE_IMEX &&
                        cypress_detect(psmouse, set_properties) == 0) {
-               if (cypress_supported()) {
+               if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) {
                        if (cypress_init(psmouse) == 0)
                                return PSMOUSE_CYPRESS;
 
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse)
        if (psmouse_max_proto != PSMOUSE_PS2) {
                psmouse->set_rate(psmouse, psmouse->rate);
                psmouse->set_resolution(psmouse, psmouse->resolution);
-               ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
+               psmouse->set_scale(psmouse, PSMOUSE_SCALE11);
        }
 }
 
index c2ff137ecbdb636ba75da72fe14c61a73ec59a5e..d02e1bdc9ae4934d56e5290f8adedcb3a2297566 100644 (file)
@@ -36,6 +36,11 @@ typedef enum {
        PSMOUSE_FULL_PACKET
 } psmouse_ret_t;
 
+enum psmouse_scale {
+       PSMOUSE_SCALE11,
+       PSMOUSE_SCALE21
+};
+
 struct psmouse {
        void *private;
        struct input_dev *dev;
@@ -67,6 +72,7 @@ struct psmouse {
        psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse);
        void (*set_rate)(struct psmouse *psmouse, unsigned int rate);
        void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution);
+       void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale);
 
        int (*reconnect)(struct psmouse *psmouse);
        void (*disconnect)(struct psmouse *psmouse);
index 7e705ee90b86cf0c86729aacbf6c7a1d981b0a12..dda605836546847afbdbdd4c77c976134a914a2e 100644 (file)
@@ -67,9 +67,6 @@
 #define X_MAX_POSITIVE 8176
 #define Y_MAX_POSITIVE 8176
 
-/* maximum ABS_MT_POSITION displacement (in mm) */
-#define DMAX 10
-
 /*****************************************************************************
  *     Stuff we need even when we do not want native Synaptics support
  ****************************************************************************/
@@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse)
 
 static bool cr48_profile_sensor;
 
+#define ANY_BOARD_ID 0
 struct min_max_quirk {
        const char * const *pnp_ids;
+       struct {
+               unsigned long int min, max;
+       } board_id;
        int x_min, x_max, y_min, y_max;
 };
 
 static const struct min_max_quirk min_max_pnpid_table[] = {
        {
                (const char * const []){"LEN0033", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
                1024, 5052, 2258, 4832
        },
        {
-               (const char * const []){"LEN0035", "LEN0042", NULL},
+               (const char * const []){"LEN0042", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
                1232, 5710, 1156, 4696
        },
        {
                (const char * const []){"LEN0034", "LEN0036", "LEN0037",
                                        "LEN0039", "LEN2002", "LEN2004",
                                        NULL},
+               {ANY_BOARD_ID, 2961},
                1024, 5112, 2024, 4832
        },
        {
                (const char * const []){"LEN2001", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
                1024, 5022, 2508, 4832
        },
        {
                (const char * const []){"LEN2006", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
                1264, 5675, 1171, 4688
        },
        { }
@@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0041",
        "LEN0042", /* Yoga */
        "LEN0045",
-       "LEN0046",
        "LEN0047",
-       "LEN0048",
        "LEN0049",
        "LEN2000",
        "LEN2001", /* Edge E431 */
@@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse)
        return 0;
 }
 
+static int synaptics_more_extended_queries(struct psmouse *psmouse)
+{
+       struct synaptics_data *priv = psmouse->private;
+       unsigned char buf[3];
+
+       if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf))
+               return -1;
+
+       priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2];
+
+       return 0;
+}
+
 /*
- * Read the board id from the touchpad
+ * Read the board id and the "More Extended Queries" from the touchpad
  * The board id is encoded in the "QUERY MODES" response
  */
-static int synaptics_board_id(struct psmouse *psmouse)
+static int synaptics_query_modes(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        unsigned char bid[3];
 
+       /* firmwares prior 7.5 have no board_id encoded */
+       if (SYN_ID_FULL(priv->identity) < 0x705)
+               return 0;
+
        if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
                return -1;
        priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
+
+       if (SYN_MEXT_CAP_BIT(bid[0]))
+               return synaptics_more_extended_queries(psmouse);
+
        return 0;
 }
 
@@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        unsigned char resp[3];
-       int i;
 
        if (SYN_ID_MAJOR(priv->identity) < 4)
                return 0;
@@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
                }
        }
 
-       for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
-               if (psmouse_matches_pnp_id(psmouse,
-                                          min_max_pnpid_table[i].pnp_ids)) {
-                       priv->x_min = min_max_pnpid_table[i].x_min;
-                       priv->x_max = min_max_pnpid_table[i].x_max;
-                       priv->y_min = min_max_pnpid_table[i].y_min;
-                       priv->y_max = min_max_pnpid_table[i].y_max;
-                       return 0;
-               }
-       }
-
        if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
            SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
                if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
@@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse)
                } else {
                        priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
                        priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
+                       psmouse_info(psmouse,
+                                    "queried max coordinates: x [..%d], y [..%d]\n",
+                                    priv->x_max, priv->y_max);
                }
        }
 
-       if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
-           SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
+       if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
+           (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
+            /*
+             * Firmware v8.1 does not report proper number of extended
+             * capabilities, but has been proven to report correct min
+             * coordinates.
+             */
+            SYN_ID_FULL(priv->identity) == 0x801)) {
                if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
                        psmouse_warn(psmouse,
                                     "device claims to have min coordinates query, but I'm not able to read it.\n");
                } else {
                        priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
                        priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
+                       psmouse_info(psmouse,
+                                    "queried min coordinates: x [%d..], y [%d..]\n",
+                                    priv->x_min, priv->y_min);
                }
        }
 
        return 0;
 }
 
+/*
+ * Apply quirk(s) if the hardware matches
+ */
+
+static void synaptics_apply_quirks(struct psmouse *psmouse)
+{
+       struct synaptics_data *priv = psmouse->private;
+       int i;
+
+       for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
+               if (!psmouse_matches_pnp_id(psmouse,
+                                           min_max_pnpid_table[i].pnp_ids))
+                       continue;
+
+               if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
+                   priv->board_id < min_max_pnpid_table[i].board_id.min)
+                       continue;
+
+               if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
+                   priv->board_id > min_max_pnpid_table[i].board_id.max)
+                       continue;
+
+               priv->x_min = min_max_pnpid_table[i].x_min;
+               priv->x_max = min_max_pnpid_table[i].x_max;
+               priv->y_min = min_max_pnpid_table[i].y_min;
+               priv->y_max = min_max_pnpid_table[i].y_max;
+               psmouse_info(psmouse,
+                            "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
+                            priv->x_min, priv->x_max,
+                            priv->y_min, priv->y_max);
+               break;
+       }
+}
+
 static int synaptics_query_hardware(struct psmouse *psmouse)
 {
        if (synaptics_identify(psmouse))
@@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
                return -1;
        if (synaptics_firmware_id(psmouse))
                return -1;
-       if (synaptics_board_id(psmouse))
+       if (synaptics_query_modes(psmouse))
                return -1;
        if (synaptics_capability(psmouse))
                return -1;
        if (synaptics_resolution(psmouse))
                return -1;
 
+       synaptics_apply_quirks(psmouse);
+
        return 0;
 }
 
@@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf)
        return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
 }
 
-static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet)
+static void synaptics_pass_pt_packet(struct psmouse *psmouse,
+                                    struct serio *ptport,
+                                    unsigned char *packet)
 {
+       struct synaptics_data *priv = psmouse->private;
        struct psmouse *child = serio_get_drvdata(ptport);
 
        if (child && child->state == PSMOUSE_ACTIVATED) {
-               serio_interrupt(ptport, packet[1], 0);
+               serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0);
                serio_interrupt(ptport, packet[4], 0);
                serio_interrupt(ptport, packet[5], 0);
                if (child->pktsize == 4)
                        serio_interrupt(ptport, packet[2], 0);
-       } else
+       } else {
                serio_interrupt(ptport, packet[1], 0);
+       }
 }
 
 static void synaptics_pt_activate(struct psmouse *psmouse)
@@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[],
        }
 }
 
+static void synaptics_parse_ext_buttons(const unsigned char buf[],
+                                       struct synaptics_data *priv,
+                                       struct synaptics_hw_state *hw)
+{
+       unsigned int ext_bits =
+               (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
+       unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
+
+       hw->ext_buttons = buf[4] & ext_mask;
+       hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
+}
+
 static bool is_forcepad;
 
 static int synaptics_parse_hw_state(const unsigned char buf[],
@@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                        hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
                }
 
-               if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
+               if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
                    ((buf[0] ^ buf[3]) & 0x02)) {
-                       switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
-                       default:
-                               /*
-                                * if nExtBtn is greater than 8 it should be
-                                * considered invalid and treated as 0
-                                */
-                               break;
-                       case 8:
-                               hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
-                               hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
-                       case 6:
-                               hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
-                               hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
-                       case 4:
-                               hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
-                               hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
-                       case 2:
-                               hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
-                               hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
-                       }
+                       synaptics_parse_ext_buttons(buf, priv, hw);
                }
        } else {
                hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
@@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
        }
 }
 
+static void synaptics_report_ext_buttons(struct psmouse *psmouse,
+                                        const struct synaptics_hw_state *hw)
+{
+       struct input_dev *dev = psmouse->dev;
+       struct synaptics_data *priv = psmouse->private;
+       int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
+       char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+       int i;
+
+       if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
+               return;
+
+       /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
+       if (SYN_ID_FULL(priv->identity) == 0x801 &&
+           !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
+               return;
+
+       if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) {
+               for (i = 0; i < ext_bits; i++) {
+                       input_report_key(dev, BTN_0 + 2 * i,
+                               hw->ext_buttons & (1 << i));
+                       input_report_key(dev, BTN_1 + 2 * i,
+                               hw->ext_buttons & (1 << (i + ext_bits)));
+               }
+               return;
+       }
+
+       /*
+        * This generation of touchpads has the trackstick buttons
+        * physically wired to the touchpad. Re-route them through
+        * the pass-through interface.
+        */
+       if (!priv->pt_port)
+               return;
+
+       /* The trackstick expects at most 3 buttons */
+       priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons)      |
+                          SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 |
+                          SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2;
+
+       synaptics_pass_pt_packet(psmouse, priv->pt_port, buf);
+}
+
 static void synaptics_report_buttons(struct psmouse *psmouse,
                                     const struct synaptics_hw_state *hw)
 {
        struct input_dev *dev = psmouse->dev;
        struct synaptics_data *priv = psmouse->private;
-       int i;
 
        input_report_key(dev, BTN_LEFT, hw->left);
        input_report_key(dev, BTN_RIGHT, hw->right);
@@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
                input_report_key(dev, BTN_BACK, hw->down);
        }
 
-       for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
-               input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
+       synaptics_report_ext_buttons(psmouse, hw);
 }
 
 static void synaptics_report_mt_data(struct psmouse *psmouse,
@@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
                pos[i].y = synaptics_invert_y(hw[i]->y);
        }
 
-       input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res);
+       input_mt_assign_slots(dev, slot, pos, nsemi, 0);
 
        for (i = 0; i < nsemi; i++) {
                input_mt_slot(dev, slot[i]);
@@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
                if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
                    synaptics_is_pt_packet(psmouse->packet)) {
                        if (priv->pt_port)
-                               synaptics_pass_pt_packet(priv->pt_port, psmouse->packet);
+                               synaptics_pass_pt_packet(psmouse, priv->pt_port,
+                                                        psmouse->packet);
                } else
                        synaptics_process_packet(psmouse);
 
@@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse,
                __set_bit(BTN_BACK, dev->keybit);
        }
 
-       for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
-               __set_bit(BTN_0 + i, dev->keybit);
+       if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
+               for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
+                       __set_bit(BTN_0 + i, dev->keybit);
 
        __clear_bit(EV_REL, dev->evbit);
        __clear_bit(REL_X, dev->relbit);
@@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse,
 
        if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
-               if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+               if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
+                   !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
                        __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
                /* Clickpads report only left button */
                __clear_bit(BTN_RIGHT, dev->keybit);
@@ -1454,11 +1556,6 @@ int synaptics_init_relative(struct psmouse *psmouse)
        return __synaptics_init(psmouse, false);
 }
 
-bool synaptics_supported(void)
-{
-       return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_SYNAPTICS */
 
 void __init synaptics_module_init(void)
@@ -1470,9 +1567,4 @@ int synaptics_init(struct psmouse *psmouse)
        return -ENOSYS;
 }
 
-bool synaptics_supported(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
index 6faf9bb7c117d46f40af90a9232fc2810c921155..ee4bd0d12b26fa2770445a381134b62124be6741 100644 (file)
@@ -22,6 +22,7 @@
 #define SYN_QUE_EXT_CAPAB_0C           0x0c
 #define SYN_QUE_EXT_MAX_COORDS         0x0d
 #define SYN_QUE_EXT_MIN_COORDS         0x0f
+#define SYN_QUE_MEXT_CAPAB_10          0x10
 
 /* synatics modes */
 #define SYN_BIT_ABSOLUTE_MODE          (1 << 7)
@@ -53,6 +54,7 @@
 #define SYN_EXT_CAP_REQUESTS(c)                (((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)    (((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)         (((ec) & 0xff0000) >> 16)
+#define SYN_MEXT_CAP_BIT(m)            ((m) & (1 << 1))
 
 /*
  * The following describes response for the 0x0c query.
 #define SYN_CAP_REDUCED_FILTERING(ex0c)        ((ex0c) & 0x000400)
 #define SYN_CAP_IMAGE_SENSOR(ex0c)     ((ex0c) & 0x000800)
 
+/*
+ * The following descibes response for the 0x10 query.
+ *
+ * byte        mask    name                    meaning
+ * ----        ----    -------                 ------------
+ * 1   0x01    ext buttons are stick   buttons exported in the extended
+ *                                     capability are actually meant to be used
+ *                                     by the tracktick (pass-through).
+ * 1   0x02    SecurePad               the touchpad is a SecurePad, so it
+ *                                     contains a built-in fingerprint reader.
+ * 1   0xe0    more ext count          how many more extented queries are
+ *                                     available after this one.
+ * 2   0xff    SecurePad width         the width of the SecurePad fingerprint
+ *                                     reader.
+ * 3   0xff    SecurePad height        the height of the SecurePad fingerprint
+ *                                     reader.
+ */
+#define SYN_CAP_EXT_BUTTONS_STICK(ex10)        ((ex10) & 0x010000)
+#define SYN_CAP_SECUREPAD(ex10)                ((ex10) & 0x020000)
+
+#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
+#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
+#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
+
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)           ((m) & (1 << 7))
 #define SYN_MODE_RATE(m)               ((m) & (1 << 6))
@@ -143,6 +169,7 @@ struct synaptics_data {
        unsigned long int capabilities;         /* Capabilities */
        unsigned long int ext_cap;              /* Extended Capabilities */
        unsigned long int ext_cap_0c;           /* Ext Caps from 0x0c query */
+       unsigned long int ext_cap_10;           /* Ext Caps from 0x10 query */
        unsigned long int identity;             /* Identification */
        unsigned int x_res, y_res;              /* X/Y resolution in units/mm */
        unsigned int x_max, y_max;              /* Max coordinates (from FW) */
@@ -156,6 +183,7 @@ struct synaptics_data {
        bool disable_gesture;                   /* disable gestures */
 
        struct serio *pt_port;                  /* Pass-through serio port */
+       unsigned char pt_buttons;               /* Pass-through buttons */
 
        /*
         * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
@@ -175,6 +203,5 @@ int synaptics_detect(struct psmouse *psmouse, bool set_properties);
 int synaptics_init(struct psmouse *psmouse);
 int synaptics_init_relative(struct psmouse *psmouse);
 void synaptics_reset(struct psmouse *psmouse);
-bool synaptics_supported(void);
 
 #endif /* _SYNAPTICS_H */
index 58917525126e86fd06cb632b2cbb3f558b0a2f83..6261fd6d7c3c4ddcb758b6796b0ba6ca0c222c99 100644 (file)
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I
        tristate "Allwinner sun4i resistive touchscreen controller support"
        depends on ARCH_SUNXI || COMPILE_TEST
        depends on HWMON
+       depends on THERMAL || !THERMAL_OF
        help
          This selects support for the resistive touchscreen controller
          found on Allwinner sunxi SoCs.
index baa0d9786f506bfeff064459e1f2277cd2506b6b..1ae4e547b419b909a9748b54cc6b973d31ff0221 100644 (file)
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE
 config IOMMU_IO_PGTABLE_LPAE
        bool "ARMv7/v8 Long Descriptor Format"
        select IOMMU_IO_PGTABLE
+       depends on ARM || ARM64 || COMPILE_TEST
        help
          Enable support for the ARM long descriptor pagetable format.
          This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -63,6 +64,7 @@ config MSM_IOMMU
        bool "MSM IOMMU Support"
        depends on ARM
        depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
+       depends on BROKEN
        select IOMMU_API
        help
          Support for the IOMMUs found on certain Qualcomm SOCs.
index 7ce52737c7a129c825397733041d945cd05761bf..dc14fec4ede123b0af6564f641d81dcfcb958b25 100644 (file)
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = {
 
 static int __init exynos_iommu_init(void)
 {
+       struct device_node *np;
        int ret;
 
+       np = of_find_matching_node(NULL, sysmmu_of_match);
+       if (!np)
+               return 0;
+
+       of_node_put(np);
+
        lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
                                LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
        if (!lv2table_kmem_cache) {
index 5a500edf00cc146805d0a346525e0b9169c816c5..b610a8dee23820573b6362472b4ab5ec31c4003f 100644 (file)
@@ -56,7 +56,8 @@
        ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))             \
          * (d)->bits_per_level) + (d)->pg_shift)
 
-#define ARM_LPAE_PAGES_PER_PGD(d)      ((d)->pgd_size >> (d)->pg_shift)
+#define ARM_LPAE_PAGES_PER_PGD(d)                                      \
+       DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
 
 /*
  * Calculate the index at level l used to map virtual address a using the
@@ -66,7 +67,7 @@
        ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
 
 #define ARM_LPAE_LVL_IDX(a,l,d)                                                \
-       (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) &                             \
+       (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &                        \
         ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
 
 /* Calculate the block/page mapping size at level l for pagetable in d. */
index f59f857b702e8e3f65080edd43b743a54091170f..a4ba851825c235b5bfa7ab93a8256adfca160ca7 100644 (file)
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void)
        struct kmem_cache *p;
        const unsigned long flags = SLAB_HWCACHE_ALIGN;
        size_t align = 1 << 10; /* L2 pagetable alignement */
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, omap_iommu_of_match);
+       if (!np)
+               return 0;
+
+       of_node_put(np);
 
        p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
                              iopte_cachep_ctor);
index 6a8b1ec4a48a1f1100bc0f9f301fc658758a35ca..9f74fddcd304f76bd8a9d546f1d2caf74e7dc588 100644 (file)
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = {
 
 static int __init rk_iommu_init(void)
 {
+       struct device_node *np;
        int ret;
 
+       np = of_find_matching_node(NULL, rk_iommu_dt_ids);
+       if (!np)
+               return 0;
+
+       of_node_put(np);
+
        ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
        if (ret)
                return ret;
index 463c235acbdcdc1758329205e3c79e380b5fa7a6..4387dae14e453a949bb297ec1a74a59e400a3089 100644 (file)
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base;
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
 static u32 doorbell_mask_reg;
+static int parent_irq;
 #ifdef CONFIG_PCI_MSI
 static struct irq_domain *armada_370_xp_msi_domain;
 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
 {
        if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
                armada_xp_mpic_smp_cpu_init();
+
        return NOTIFY_OK;
 }
 
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
        .priority = 100,
 };
 
+static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+{
+       if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+               enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block mpic_cascaded_cpu_notifier = {
+       .notifier_call = mpic_cascaded_secondary_init,
+       .priority = 100,
+};
+
 #endif /* CONFIG_SMP */
 
 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
                                             struct device_node *parent)
 {
        struct resource main_int_res, per_cpu_int_res;
-       int parent_irq, nr_irqs, i;
+       int nr_irqs, i;
        u32 control;
 
        BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
                register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
 #endif
        } else {
+#ifdef CONFIG_SMP
+               register_cpu_notifier(&mpic_cascaded_cpu_notifier);
+#endif
                irq_set_chained_handler(parent_irq,
                                        armada_370_xp_mpic_handle_cascade_irq);
        }
index d8996bdf0f61e95e45ee670e44e565d045fb9535..596b0a9eee99a9f2ea1beaac726bddc4069c4937 100644 (file)
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its,
 {
        struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
        struct its_collection *sync_col;
+       unsigned long flags;
 
-       raw_spin_lock(&its->lock);
+       raw_spin_lock_irqsave(&its->lock, flags);
 
        cmd = its_allocate_entry(its);
        if (!cmd) {             /* We're soooooo screewed... */
                pr_err_ratelimited("ITS can't allocate, dropping command\n");
-               raw_spin_unlock(&its->lock);
+               raw_spin_unlock_irqrestore(&its->lock, flags);
                return;
        }
        sync_col = builder(cmd, desc);
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its,
 
 post:
        next_cmd = its_post_commands(its);
-       raw_spin_unlock(&its->lock);
+       raw_spin_unlock_irqrestore(&its->lock, flags);
 
        its_wait_for_range_completion(its, cmd, next_cmd);
 }
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its)
 {
        int err;
        int i;
-       int psz = PAGE_SIZE;
+       int psz = SZ_64K;
        u64 shr = GITS_BASER_InnerShareable;
 
        for (i = 0; i < GITS_BASER_NR_REGS; i++) {
                u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
                u64 type = GITS_BASER_TYPE(val);
                u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
+               int order = get_order(psz);
+               int alloc_size;
                u64 tmp;
                void *base;
 
                if (type == GITS_BASER_TYPE_NONE)
                        continue;
 
-               /* We're lazy and only allocate a single page for now */
-               base = (void *)get_zeroed_page(GFP_KERNEL);
+               /*
+                * Allocate as many entries as required to fit the
+                * range of device IDs that the ITS can grok... The ID
+                * space being incredibly sparse, this results in a
+                * massive waste of memory.
+                *
+                * For other tables, only allocate a single page.
+                */
+               if (type == GITS_BASER_TYPE_DEVICE) {
+                       u64 typer = readq_relaxed(its->base + GITS_TYPER);
+                       u32 ids = GITS_TYPER_DEVBITS(typer);
+
+                       order = get_order((1UL << ids) * entry_size);
+                       if (order >= MAX_ORDER) {
+                               order = MAX_ORDER - 1;
+                               pr_warn("%s: Device Table too large, reduce its page order to %u\n",
+                                       its->msi_chip.of_node->full_name, order);
+                       }
+               }
+
+               alloc_size = (1 << order) * PAGE_SIZE;
+               base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
                if (!base) {
                        err = -ENOMEM;
                        goto out_free;
@@ -841,7 +864,7 @@ retry_baser:
                        break;
                }
 
-               val |= (PAGE_SIZE / psz) - 1;
+               val |= (alloc_size / psz) - 1;
 
                writeq_relaxed(val, its->base + GITS_BASER + i * 8);
                tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -882,7 +905,7 @@ retry_baser:
                }
 
                pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
-                       (int)(PAGE_SIZE / entry_size),
+                       (int)(alloc_size / entry_size),
                        its_base_type_string[type],
                        (unsigned long)virt_to_phys(base),
                        psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void)
 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
 {
        struct its_device *its_dev = NULL, *tmp;
+       unsigned long flags;
 
-       raw_spin_lock(&its->lock);
+       raw_spin_lock_irqsave(&its->lock, flags);
 
        list_for_each_entry(tmp, &its->its_device_list, entry) {
                if (tmp->device_id == dev_id) {
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
                }
        }
 
-       raw_spin_unlock(&its->lock);
+       raw_spin_unlock_irqrestore(&its->lock, flags);
 
        return its_dev;
 }
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
 {
        struct its_device *dev;
        unsigned long *lpi_map;
+       unsigned long flags;
        void *itt;
        int lpi_base;
        int nr_lpis;
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        nr_ites = max(2UL, roundup_pow_of_two(nvecs));
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
-       itt = kmalloc(sz, GFP_KERNEL);
+       itt = kzalloc(sz, GFP_KERNEL);
        lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
 
        if (!dev || !itt || !lpi_map) {
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        dev->device_id = dev_id;
        INIT_LIST_HEAD(&dev->entry);
 
-       raw_spin_lock(&its->lock);
+       raw_spin_lock_irqsave(&its->lock, flags);
        list_add(&dev->entry, &its->its_device_list);
-       raw_spin_unlock(&its->lock);
+       raw_spin_unlock_irqrestore(&its->lock, flags);
 
        /* Bind the device to the first possible CPU */
        cpu = cpumask_first(cpu_online_mask);
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
 
 static void its_free_device(struct its_device *its_dev)
 {
-       raw_spin_lock(&its_dev->its->lock);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&its_dev->its->lock, flags);
        list_del(&its_dev->entry);
-       raw_spin_unlock(&its_dev->its->lock);
+       raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
        kfree(its_dev->itt);
        kfree(its_dev);
 }
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
        return 0;
 }
 
+struct its_pci_alias {
+       struct pci_dev  *pdev;
+       u32             dev_id;
+       u32             count;
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev)
+{
+       int msi, msix;
+
+       msi = max(pci_msi_vec_count(pdev), 0);
+       msix = max(pci_msix_vec_count(pdev), 0);
+
+       return max(msi, msix);
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+       struct its_pci_alias *dev_alias = data;
+
+       dev_alias->dev_id = alias;
+       if (pdev != dev_alias->pdev)
+               dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+
+       return 0;
+}
+
 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
                           int nvec, msi_alloc_info_t *info)
 {
        struct pci_dev *pdev;
        struct its_node *its;
-       u32 dev_id;
        struct its_device *its_dev;
+       struct its_pci_alias dev_alias;
 
        if (!dev_is_pci(dev))
                return -EINVAL;
 
        pdev = to_pci_dev(dev);
-       dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+       dev_alias.pdev = pdev;
+       dev_alias.count = nvec;
+
+       pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
        its = domain->parent->host_data;
 
-       its_dev = its_find_device(its, dev_id);
-       if (WARN_ON(its_dev))
-               return -EINVAL;
+       its_dev = its_find_device(its, dev_alias.dev_id);
+       if (its_dev) {
+               /*
+                * We already have seen this ID, probably through
+                * another alias (PCI bridge of some sort). No need to
+                * create the device.
+                */
+               dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
+               goto out;
+       }
 
-       its_dev = its_create_device(its, dev_id, nvec);
+       its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
        if (!its_dev)
                return -ENOMEM;
 
-       dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
-
+       dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
+               dev_alias.count, ilog2(dev_alias.count));
+out:
        info->scratchpad[0].ptr = its_dev;
        info->scratchpad[1].ptr = dev;
        return 0;
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = {
        .deactivate             = its_irq_domain_deactivate,
 };
 
+static int its_force_quiescent(void __iomem *base)
+{
+       u32 count = 1000000;    /* 1s */
+       u32 val;
+
+       val = readl_relaxed(base + GITS_CTLR);
+       if (val & GITS_CTLR_QUIESCENT)
+               return 0;
+
+       /* Disable the generation of all interrupts to this ITS */
+       val &= ~GITS_CTLR_ENABLE;
+       writel_relaxed(val, base + GITS_CTLR);
+
+       /* Poll GITS_CTLR and wait until ITS becomes quiescent */
+       while (1) {
+               val = readl_relaxed(base + GITS_CTLR);
+               if (val & GITS_CTLR_QUIESCENT)
+                       return 0;
+
+               count--;
+               if (!count)
+                       return -EBUSY;
+
+               cpu_relax();
+               udelay(1);
+       }
+}
+
 static int its_probe(struct device_node *node, struct irq_domain *parent)
 {
        struct resource res;
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
                goto out_unmap;
        }
 
+       err = its_force_quiescent(its_base);
+       if (err) {
+               pr_warn("%s: failed to quiesce, giving up\n",
+                       node->full_name);
+               goto out_unmap;
+       }
+
        pr_info("ITS: %s\n", node->full_name);
 
        its = kzalloc(sizeof(*its), GFP_KERNEL);
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
        writeq_relaxed(baser, its->base + GITS_CBASER);
        tmp = readq_relaxed(its->base + GITS_CBASER);
        writeq_relaxed(0, its->base + GITS_CWRITER);
-       writel_relaxed(1, its->base + GITS_CTLR);
+       writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
 
        if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
                pr_info("ITS: using cache flushing for cmd queue\n");
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void)
 
 int its_cpu_init(void)
 {
-       if (!gic_rdists_supports_plpis()) {
-               pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
-               return -ENXIO;
-       }
-
        if (!list_empty(&its_nodes)) {
+               if (!gic_rdists_supports_plpis()) {
+                       pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+                       return -ENXIO;
+               }
                its_cpu_init_lpis();
                its_cpu_init_collection();
        }
index 1c6dea2fbc34ce2d7b00007250111166315d8b78..fd8850def1b86a3310e376c821c3aaf33153f1cc 100644 (file)
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
                tlist |= 1 << (mpidr & 0xf);
 
                cpu = cpumask_next(cpu, mask);
-               if (cpu == nr_cpu_ids)
+               if (cpu >= nr_cpu_ids)
                        goto out;
 
                mpidr = cpu_logical_map(cpu);
index 4634cf7d0ec379d5578319d45194cb18c9997510..471e1cdc193365dce99dcb53e3c8b21a388e0faa 100644 (file)
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d)
 static void gic_mask_irq(struct irq_data *d)
 {
        u32 mask = 1 << (gic_irq(d) % 32);
+       unsigned long flags;
 
-       raw_spin_lock(&irq_controller_lock);
+       raw_spin_lock_irqsave(&irq_controller_lock, flags);
        writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
        if (gic_arch_extn.irq_mask)
                gic_arch_extn.irq_mask(d);
-       raw_spin_unlock(&irq_controller_lock);
+       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_unmask_irq(struct irq_data *d)
 {
        u32 mask = 1 << (gic_irq(d) % 32);
+       unsigned long flags;
 
-       raw_spin_lock(&irq_controller_lock);
+       raw_spin_lock_irqsave(&irq_controller_lock, flags);
        if (gic_arch_extn.irq_unmask)
                gic_arch_extn.irq_unmask(d);
        writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
-       raw_spin_unlock(&irq_controller_lock);
+       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 {
        void __iomem *base = gic_dist_base(d);
        unsigned int gicirq = gic_irq(d);
+       unsigned long flags;
        int ret;
 
        /* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
                            type != IRQ_TYPE_EDGE_RISING)
                return -EINVAL;
 
-       raw_spin_lock(&irq_controller_lock);
+       raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
        if (gic_arch_extn.irq_set_type)
                gic_arch_extn.irq_set_type(d, type);
 
        ret = gic_configure_irq(gicirq, type, base, NULL);
 
-       raw_spin_unlock(&irq_controller_lock);
+       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 
        return ret;
 }
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
        unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
        u32 val, mask, bit;
+       unsigned long flags;
 
        if (!force)
                cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       raw_spin_lock(&irq_controller_lock);
+       raw_spin_lock_irqsave(&irq_controller_lock, flags);
        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;
        val = readl_relaxed(reg) & ~mask;
        writel_relaxed(val | bit, reg);
-       raw_spin_unlock(&irq_controller_lock);
+       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 
        return IRQ_SET_MASK_OK;
 }
index 1daa7ca04577de0854f77b1ff4d31faf241e0baa..9acdc080e7ecd21b2cd256c18e31c9c27337c7d1 100644 (file)
@@ -192,14 +192,6 @@ static bool gic_local_irq_is_routable(int intr)
        }
 }
 
-unsigned int gic_get_timer_pending(void)
-{
-       unsigned int vpe_pending;
-
-       vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-       return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
-}
-
 static void gic_bind_eic_interrupt(int irq, int set)
 {
        /* Convert irq vector # to hw int # */
index 3c92780bda09e17843f3cea5c7c35161e103c25c..ff48da61c94c849bf06cbb9ab9cb149515dcd626 100644 (file)
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
                enable_hwirq(hc);
                spin_unlock_irqrestore(&hc->lock, flags);
                /* Timeout 80ms */
-               current->state = TASK_UNINTERRUPTIBLE;
+               set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout((80 * HZ) / 1000);
                printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
                       hc->irq, hc->irqcnt);
index 6a7447c304acc39e6edc422bf8f6533acfde93b5..358a574d9e8be4d2bfed217b0448c89fe458d8f6 100644 (file)
@@ -1609,7 +1609,7 @@ icn_setup(char *line)
        if (ints[0] > 1)
                membase = (unsigned long)ints[2];
        if (str && *str) {
-               strcpy(sid, str);
+               strlcpy(sid, str, sizeof(sid));
                icn_id = sid;
                if ((p = strchr(sid, ','))) {
                        *p++ = 0;
index 08981be7baa183dbe963b6e38cd4866f34e278a7..713a96237a80c34951302dfa4a5ea6db9f44c39b 100644 (file)
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
+#include <linux/kthread.h>
 #include <linux/backing-dev.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
+#include <linux/rbtree.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
 #include <crypto/hash.h>
@@ -58,7 +60,8 @@ struct dm_crypt_io {
        atomic_t io_pending;
        int error;
        sector_t sector;
-       struct dm_crypt_io *base_io;
+
+       struct rb_node rb_node;
 } CRYPTO_MINALIGN_ATTR;
 
 struct dm_crypt_request {
@@ -108,7 +111,8 @@ struct iv_tcw_private {
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
  */
-enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
+            DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
 
 /*
  * The fields in here must be read only after initialization.
@@ -121,14 +125,18 @@ struct crypt_config {
         * pool for per bio private data, crypto requests and
         * encryption requeusts/buffer pages
         */
-       mempool_t *io_pool;
        mempool_t *req_pool;
        mempool_t *page_pool;
        struct bio_set *bs;
+       struct mutex bio_alloc_lock;
 
        struct workqueue_struct *io_queue;
        struct workqueue_struct *crypt_queue;
 
+       struct task_struct *write_thread;
+       wait_queue_head_t write_thread_wait;
+       struct rb_root write_tree;
+
        char *cipher;
        char *cipher_string;
 
@@ -172,9 +180,6 @@ struct crypt_config {
 };
 
 #define MIN_IOS        16
-#define MIN_POOL_PAGES 32
-
-static struct kmem_cache *_crypt_io_pool;
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -946,57 +951,70 @@ static int crypt_convert(struct crypt_config *cc,
        return 0;
 }
 
+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
- * May return a smaller bio when running out of pages, indicated by
- * *out_of_pages set to 1.
+ *
+ * This function may be called concurrently. If we allocate from the mempool
+ * concurrently, there is a possibility of deadlock. For example, if we have
+ * mempool of 256 pages, two processes, each wanting 256, pages allocate from
+ * the mempool concurrently, it may deadlock in a situation where both processes
+ * have allocated 128 pages and the mempool is exhausted.
+ *
+ * In order to avoid this scenario we allocate the pages under a mutex.
+ *
+ * In order to not degrade performance with excessive locking, we try
+ * non-blocking allocations without a mutex first but on failure we fallback
+ * to blocking allocations with a mutex.
  */
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
-                                     unsigned *out_of_pages)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 {
        struct crypt_config *cc = io->cc;
        struct bio *clone;
        unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
-       unsigned i, len;
+       gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+       unsigned i, len, remaining_size;
        struct page *page;
+       struct bio_vec *bvec;
+
+retry:
+       if (unlikely(gfp_mask & __GFP_WAIT))
+               mutex_lock(&cc->bio_alloc_lock);
 
        clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
        if (!clone)
-               return NULL;
+               goto return_clone;
 
        clone_init(io, clone);
-       *out_of_pages = 0;
+
+       remaining_size = size;
 
        for (i = 0; i < nr_iovecs; i++) {
                page = mempool_alloc(cc->page_pool, gfp_mask);
                if (!page) {
-                       *out_of_pages = 1;
-                       break;
+                       crypt_free_buffer_pages(cc, clone);
+                       bio_put(clone);
+                       gfp_mask |= __GFP_WAIT;
+                       goto retry;
                }
 
-               /*
-                * If additional pages cannot be allocated without waiting,
-                * return a partially-allocated bio.  The caller will then try
-                * to allocate more bios while submitting this partial bio.
-                */
-               gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+               len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
 
-               len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+               bvec = &clone->bi_io_vec[clone->bi_vcnt++];
+               bvec->bv_page = page;
+               bvec->bv_len = len;
+               bvec->bv_offset = 0;
 
-               if (!bio_add_page(clone, page, len, 0)) {
-                       mempool_free(page, cc->page_pool);
-                       break;
-               }
+               clone->bi_iter.bi_size += len;
 
-               size -= len;
+               remaining_size -= len;
        }
 
-       if (!clone->bi_iter.bi_size) {
-               bio_put(clone);
-               return NULL;
-       }
+return_clone:
+       if (unlikely(gfp_mask & __GFP_WAIT))
+               mutex_unlock(&cc->bio_alloc_lock);
 
        return clone;
 }
@@ -1020,7 +1038,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
        io->base_bio = bio;
        io->sector = sector;
        io->error = 0;
-       io->base_io = NULL;
        io->ctx.req = NULL;
        atomic_set(&io->io_pending, 0);
 }
@@ -1033,13 +1050,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
- * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *base_bio = io->base_bio;
-       struct dm_crypt_io *base_io = io->base_io;
        int error = io->error;
 
        if (!atomic_dec_and_test(&io->io_pending))
@@ -1047,16 +1062,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 
        if (io->ctx.req)
                crypt_free_req(cc, io->ctx.req, base_bio);
-       if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
-               mempool_free(io, cc->io_pool);
-
-       if (likely(!base_io))
-               bio_endio(base_bio, error);
-       else {
-               if (error && !base_io->error)
-                       base_io->error = error;
-               crypt_dec_pending(base_io);
-       }
+
+       bio_endio(base_bio, error);
 }
 
 /*
@@ -1138,37 +1145,97 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        return 0;
 }
 
+static void kcryptd_io_read_work(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+
+       crypt_inc_pending(io);
+       if (kcryptd_io_read(io, GFP_NOIO))
+               io->error = -ENOMEM;
+       crypt_dec_pending(io);
+}
+
+static void kcryptd_queue_read(struct dm_crypt_io *io)
+{
+       struct crypt_config *cc = io->cc;
+
+       INIT_WORK(&io->work, kcryptd_io_read_work);
+       queue_work(cc->io_queue, &io->work);
+}
+
 static void kcryptd_io_write(struct dm_crypt_io *io)
 {
        struct bio *clone = io->ctx.bio_out;
+
        generic_make_request(clone);
 }
 
-static void kcryptd_io(struct work_struct *work)
+#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
+
+static int dmcrypt_write(void *data)
 {
-       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = data;
+       struct dm_crypt_io *io;
 
-       if (bio_data_dir(io->base_bio) == READ) {
-               crypt_inc_pending(io);
-               if (kcryptd_io_read(io, GFP_NOIO))
-                       io->error = -ENOMEM;
-               crypt_dec_pending(io);
-       } else
-               kcryptd_io_write(io);
-}
+       while (1) {
+               struct rb_root write_tree;
+               struct blk_plug plug;
 
-static void kcryptd_queue_io(struct dm_crypt_io *io)
-{
-       struct crypt_config *cc = io->cc;
+               DECLARE_WAITQUEUE(wait, current);
 
-       INIT_WORK(&io->work, kcryptd_io);
-       queue_work(cc->io_queue, &io->work);
+               spin_lock_irq(&cc->write_thread_wait.lock);
+continue_locked:
+
+               if (!RB_EMPTY_ROOT(&cc->write_tree))
+                       goto pop_from_list;
+
+               __set_current_state(TASK_INTERRUPTIBLE);
+               __add_wait_queue(&cc->write_thread_wait, &wait);
+
+               spin_unlock_irq(&cc->write_thread_wait.lock);
+
+               if (unlikely(kthread_should_stop())) {
+                       set_task_state(current, TASK_RUNNING);
+                       remove_wait_queue(&cc->write_thread_wait, &wait);
+                       break;
+               }
+
+               schedule();
+
+               set_task_state(current, TASK_RUNNING);
+               spin_lock_irq(&cc->write_thread_wait.lock);
+               __remove_wait_queue(&cc->write_thread_wait, &wait);
+               goto continue_locked;
+
+pop_from_list:
+               write_tree = cc->write_tree;
+               cc->write_tree = RB_ROOT;
+               spin_unlock_irq(&cc->write_thread_wait.lock);
+
+               BUG_ON(rb_parent(write_tree.rb_node));
+
+               /*
+                * Note: we cannot walk the tree here with rb_next because
+                * the structures may be freed when kcryptd_io_write is called.
+                */
+               blk_start_plug(&plug);
+               do {
+                       io = crypt_io_from_node(rb_first(&write_tree));
+                       rb_erase(&io->rb_node, &write_tree);
+                       kcryptd_io_write(io);
+               } while (!RB_EMPTY_ROOT(&write_tree));
+               blk_finish_plug(&plug);
+       }
+       return 0;
 }
 
 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 {
        struct bio *clone = io->ctx.bio_out;
        struct crypt_config *cc = io->cc;
+       unsigned long flags;
+       sector_t sector;
+       struct rb_node **rbp, *parent;
 
        if (unlikely(io->error < 0)) {
                crypt_free_buffer_pages(cc, clone);
@@ -1182,20 +1249,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 
        clone->bi_iter.bi_sector = cc->start + io->sector;
 
-       if (async)
-               kcryptd_queue_io(io);
-       else
+       if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
                generic_make_request(clone);
+               return;
+       }
+
+       spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
+       rbp = &cc->write_tree.rb_node;
+       parent = NULL;
+       sector = io->sector;
+       while (*rbp) {
+               parent = *rbp;
+               if (sector < crypt_io_from_node(parent)->sector)
+                       rbp = &(*rbp)->rb_left;
+               else
+                       rbp = &(*rbp)->rb_right;
+       }
+       rb_link_node(&io->rb_node, parent, rbp);
+       rb_insert_color(&io->rb_node, &cc->write_tree);
+
+       wake_up_locked(&cc->write_thread_wait);
+       spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
 }
 
 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *clone;
-       struct dm_crypt_io *new_io;
        int crypt_finished;
-       unsigned out_of_pages = 0;
-       unsigned remaining = io->base_bio->bi_iter.bi_size;
        sector_t sector = io->sector;
        int r;
 
@@ -1205,80 +1286,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        crypt_inc_pending(io);
        crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
 
-       /*
-        * The allocated buffers can be smaller than the whole bio,
-        * so repeat the whole process until all the data can be handled.
-        */
-       while (remaining) {
-               clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
-               if (unlikely(!clone)) {
-                       io->error = -ENOMEM;
-                       break;
-               }
-
-               io->ctx.bio_out = clone;
-               io->ctx.iter_out = clone->bi_iter;
-
-               remaining -= clone->bi_iter.bi_size;
-               sector += bio_sectors(clone);
-
-               crypt_inc_pending(io);
-
-               r = crypt_convert(cc, &io->ctx);
-               if (r < 0)
-                       io->error = -EIO;
-
-               crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
-
-               /* Encryption was already finished, submit io now */
-               if (crypt_finished) {
-                       kcryptd_crypt_write_io_submit(io, 0);
-
-                       /*
-                        * If there was an error, do not try next fragments.
-                        * For async, error is processed in async handler.
-                        */
-                       if (unlikely(r < 0))
-                               break;
+       clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+       if (unlikely(!clone)) {
+               io->error = -EIO;
+               goto dec;
+       }
 
-                       io->sector = sector;
-               }
+       io->ctx.bio_out = clone;
+       io->ctx.iter_out = clone->bi_iter;
 
-               /*
-                * Out of memory -> run queues
-                * But don't wait if split was due to the io size restriction
-                */
-               if (unlikely(out_of_pages))
-                       congestion_wait(BLK_RW_ASYNC, HZ/100);
+       sector += bio_sectors(clone);
 
-               /*
-                * With async crypto it is unsafe to share the crypto context
-                * between fragments, so switch to a new dm_crypt_io structure.
-                */
-               if (unlikely(!crypt_finished && remaining)) {
-                       new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
-                       crypt_io_init(new_io, io->cc, io->base_bio, sector);
-                       crypt_inc_pending(new_io);
-                       crypt_convert_init(cc, &new_io->ctx, NULL,
-                                          io->base_bio, sector);
-                       new_io->ctx.iter_in = io->ctx.iter_in;
-
-                       /*
-                        * Fragments after the first use the base_io
-                        * pending count.
-                        */
-                       if (!io->base_io)
-                               new_io->base_io = io;
-                       else {
-                               new_io->base_io = io->base_io;
-                               crypt_inc_pending(io->base_io);
-                               crypt_dec_pending(io);
-                       }
+       crypt_inc_pending(io);
+       r = crypt_convert(cc, &io->ctx);
+       if (r)
+               io->error = -EIO;
+       crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
 
-                       io = new_io;
-               }
+       /* Encryption was already finished, submit io now */
+       if (crypt_finished) {
+               kcryptd_crypt_write_io_submit(io, 0);
+               io->sector = sector;
        }
 
+dec:
        crypt_dec_pending(io);
 }
 
@@ -1481,6 +1512,9 @@ static void crypt_dtr(struct dm_target *ti)
        if (!cc)
                return;
 
+       if (cc->write_thread)
+               kthread_stop(cc->write_thread);
+
        if (cc->io_queue)
                destroy_workqueue(cc->io_queue);
        if (cc->crypt_queue)
@@ -1495,8 +1529,6 @@ static void crypt_dtr(struct dm_target *ti)
                mempool_destroy(cc->page_pool);
        if (cc->req_pool)
                mempool_destroy(cc->req_pool);
-       if (cc->io_pool)
-               mempool_destroy(cc->io_pool);
 
        if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
                cc->iv_gen_ops->dtr(cc);
@@ -1688,7 +1720,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        char dummy;
 
        static struct dm_arg _args[] = {
-               {0, 1, "Invalid number of feature args"},
+               {0, 3, "Invalid number of feature args"},
        };
 
        if (argc < 5) {
@@ -1710,13 +1742,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (ret < 0)
                goto bad;
 
-       ret = -ENOMEM;
-       cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
-       if (!cc->io_pool) {
-               ti->error = "Cannot allocate crypt io mempool";
-               goto bad;
-       }
-
        cc->dmreq_start = sizeof(struct ablkcipher_request);
        cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
        cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
@@ -1734,6 +1759,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
        }
 
+       ret = -ENOMEM;
        cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
                        sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
        if (!cc->req_pool) {
@@ -1746,7 +1772,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
                      ARCH_KMALLOC_MINALIGN);
 
-       cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+       cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
        if (!cc->page_pool) {
                ti->error = "Cannot allocate page mempool";
                goto bad;
@@ -1758,6 +1784,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
+       mutex_init(&cc->bio_alloc_lock);
+
        ret = -EINVAL;
        if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
                ti->error = "Invalid iv_offset sector";
@@ -1788,15 +1816,26 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                if (ret)
                        goto bad;
 
-               opt_string = dm_shift_arg(&as);
+               while (opt_params--) {
+                       opt_string = dm_shift_arg(&as);
+                       if (!opt_string) {
+                               ti->error = "Not enough feature arguments";
+                               goto bad;
+                       }
 
-               if (opt_params == 1 && opt_string &&
-                   !strcasecmp(opt_string, "allow_discards"))
-                       ti->num_discard_bios = 1;
-               else if (opt_params) {
-                       ret = -EINVAL;
-                       ti->error = "Invalid feature arguments";
-                       goto bad;
+                       if (!strcasecmp(opt_string, "allow_discards"))
+                               ti->num_discard_bios = 1;
+
+                       else if (!strcasecmp(opt_string, "same_cpu_crypt"))
+                               set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+
+                       else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
+                               set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+
+                       else {
+                               ti->error = "Invalid feature arguments";
+                               goto bad;
+                       }
                }
        }
 
@@ -1807,13 +1846,28 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       cc->crypt_queue = alloc_workqueue("kcryptd",
-                                         WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+       if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+       else
+               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+                                                 num_online_cpus());
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
                goto bad;
        }
 
+       init_waitqueue_head(&cc->write_thread_wait);
+       cc->write_tree = RB_ROOT;
+
+       cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+       if (IS_ERR(cc->write_thread)) {
+               ret = PTR_ERR(cc->write_thread);
+               cc->write_thread = NULL;
+               ti->error = "Couldn't spawn write thread";
+               goto bad;
+       }
+       wake_up_process(cc->write_thread);
+
        ti->num_flush_bios = 1;
        ti->discard_zeroes_data_unsupported = true;
 
@@ -1848,7 +1902,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
-                       kcryptd_queue_io(io);
+                       kcryptd_queue_read(io);
        } else
                kcryptd_queue_crypt(io);
 
@@ -1860,6 +1914,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 {
        struct crypt_config *cc = ti->private;
        unsigned i, sz = 0;
+       int num_feature_args = 0;
 
        switch (type) {
        case STATUSTYPE_INFO:
@@ -1878,8 +1933,18 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
                DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
                                cc->dev->name, (unsigned long long)cc->start);
 
-               if (ti->num_discard_bios)
-                       DMEMIT(" 1 allow_discards");
+               num_feature_args += !!ti->num_discard_bios;
+               num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+               num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+               if (num_feature_args) {
+                       DMEMIT(" %d", num_feature_args);
+                       if (ti->num_discard_bios)
+                               DMEMIT(" allow_discards");
+                       if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+                               DMEMIT(" same_cpu_crypt");
+                       if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
+                               DMEMIT(" submit_from_crypt_cpus");
+               }
 
                break;
        }
@@ -1976,7 +2041,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 13, 0},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
@@ -1994,15 +2059,9 @@ static int __init dm_crypt_init(void)
 {
        int r;
 
-       _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
-       if (!_crypt_io_pool)
-               return -ENOMEM;
-
        r = dm_register_target(&crypt_target);
-       if (r < 0) {
+       if (r < 0)
                DMERR("register failed %d", r);
-               kmem_cache_destroy(_crypt_io_pool);
-       }
 
        return r;
 }
@@ -2010,7 +2069,6 @@ static int __init dm_crypt_init(void)
 static void __exit dm_crypt_exit(void)
 {
        dm_unregister_target(&crypt_target);
-       kmem_cache_destroy(_crypt_io_pool);
 }
 
 module_init(dm_crypt_init);
index c09359db3a90730dbd32b3bd733709f3c6444192..37de0173b6d2324ed15de95442df37bc5a990e16 100644 (file)
@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
        unsigned short logical_block_size = queue_logical_block_size(q);
        sector_t num_sectors;
 
+       /* Reject unsupported discard requests */
+       if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+               dec_count(io, region, -EOPNOTSUPP);
+               return;
+       }
+
        /*
         * where->count may be zero if rw holds a flush and we need to
         * send a zero-sized flush.
index 7dfdb5c746d6f31960902350c33b7457485caadb..089d62751f7ff2a3aedf7e441cb88bec0d06b8a7 100644 (file)
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
                return;
        }
 
+       /*
+        * If the bio is discard, return an error, but do not
+        * degrade the array.
+        */
+       if (bio->bi_rw & REQ_DISCARD) {
+               bio_endio(bio, -EOPNOTSUPP);
+               return;
+       }
+
        for (i = 0; i < ms->nr_mirrors; i++)
                if (test_bit(i, &error))
                        fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
index 864b03f477276f9c01bee8fd34a25e2303af1116..8b204ae216ab62d354c814277dc413c34f0bf9a4 100644 (file)
@@ -1432,8 +1432,6 @@ out:
                full_bio->bi_private = pe->full_bio_private;
                atomic_inc(&full_bio->bi_remaining);
        }
-       free_pending_exception(pe);
-
        increment_pending_exceptions_done_count();
 
        up_write(&s->lock);
@@ -1450,6 +1448,8 @@ out:
        }
 
        retry_origin_bios(s, origin_bios);
+
+       free_pending_exception(pe);
 }
 
 static void commit_callback(void *context, int success)
index ec1444f49de14ac185ae39cfb214deee3ba66998..73f28802dc7abc3cb46dc38c8ef6fb5bb521e66b 100644 (file)
@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md)
        return 0;
 }
 
-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
 {
        struct mapped_device *md;
        unsigned minor = MINOR(dev);
@@ -2582,12 +2582,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
        spin_lock(&_minor_lock);
 
        md = idr_find(&_minor_idr, minor);
-       if (md && (md == MINOR_ALLOCED ||
-                  (MINOR(disk_devt(dm_disk(md))) != minor) ||
-                  dm_deleting_md(md) ||
-                  test_bit(DMF_FREEING, &md->flags))) {
-               md = NULL;
-               goto out;
+       if (md) {
+               if ((md == MINOR_ALLOCED ||
+                    (MINOR(disk_devt(dm_disk(md))) != minor) ||
+                    dm_deleting_md(md) ||
+                    test_bit(DMF_FREEING, &md->flags))) {
+                       md = NULL;
+                       goto out;
+               }
+               dm_get(md);
        }
 
 out:
@@ -2595,16 +2598,6 @@ out:
 
        return md;
 }
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
-       struct mapped_device *md = dm_find_md(dev);
-
-       if (md)
-               dm_get(md);
-
-       return md;
-}
 EXPORT_SYMBOL_GPL(dm_get_md);
 
 void *dm_get_mdptr(struct mapped_device *md)
index c8d2bac4e28be4a65edb63d613b0336b7ff35783..cadf9cc02b2561ade72800e9a0986d7e23b09163 100644 (file)
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
        return err ? err : len;
 }
 static struct rdev_sysfs_entry rdev_state =
-__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
 
 static ssize_t
 errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
        return err ?: len;
 }
 static struct md_sysfs_entry md_resync_start =
-__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
+__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
+               resync_start_show, resync_start_store);
 
 /*
  * The array state can be:
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        return err ?: len;
 }
 static struct md_sysfs_entry md_array_state =
-__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
 
 static ssize_t
 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@ out_unlock:
 }
 
 static struct md_sysfs_entry md_metadata =
-__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
 
 static ssize_t
 action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
 }
 
 static struct md_sysfs_entry md_scan_mode =
-__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
 
 static ssize_t
 last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page)
        return sprintf(page, "%llu / %llu\n", resync, max_sectors);
 }
 
-static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
+static struct md_sysfs_entry md_sync_completed =
+       __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
 
 static ssize_t
 min_sync_show(struct mddev *mddev, char *page)
index cfbf9617e4658bd6aa84f45c840bda6f56a8402a..ebb280a14325e1d937986926b40592c3b1847168 100644 (file)
@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
        if (r)
                return r;
 
-       return count > 1;
+       *result = count > 1;
+
+       return 0;
 }
 
 static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
index 4153da5d40111844616e8247a78e16c561602395..d34e238afa54c24ccaefbc7c6d58974dc2104be6 100644 (file)
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                if (test_bit(WriteMostly, &rdev->flags)) {
                        /* Don't balance among write-mostly, just
                         * use the first as a last resort */
-                       if (best_disk < 0) {
+                       if (best_dist_disk < 0) {
                                if (is_badblock(rdev, this_sector, sectors,
                                                &first_bad, &bad_sectors)) {
                                        if (first_bad < this_sector)
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                                        best_good_sectors = first_bad - this_sector;
                                } else
                                        best_good_sectors = sectors;
-                               best_disk = disk;
+                               best_dist_disk = disk;
+                               best_pending_disk = disk;
                        }
                        continue;
                }
index e75d48c0421a41788c9159ef7e74d22ad93d9695..cd2f96b2c57263628ef0816af3b114ad9437b740 100644 (file)
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
                schedule_timeout_uninterruptible(1);
        }
        /* Need to check if array will still be degraded after recovery/resync
-        * We don't need to check the 'failed' flag as when that gets set,
-        * recovery aborts.
+        * Note in case of > 1 drive failures it's possible we're rebuilding
+        * one drive while leaving another faulty drive in array.
         */
-       for (i = 0; i < conf->raid_disks; i++)
-               if (conf->disks[i].rdev == NULL)
+       rcu_read_lock();
+       for (i = 0; i < conf->raid_disks; i++) {
+               struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+
+               if (rdev == NULL || test_bit(Faulty, &rdev->flags))
                        still_degraded = 1;
+       }
+       rcu_read_unlock();
 
        bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
 
index 9306219d5675442b186b0ab6381fdd246ad09e60..6ad049a08e4d9d27880e62b69a8d249207fa5a13 100644 (file)
@@ -341,6 +341,8 @@ void mei_stop(struct mei_device *dev)
 
        dev->dev_state = MEI_DEV_POWER_DOWN;
        mei_reset(dev);
+       /* move device to disabled state unconditionally */
+       dev->dev_state = MEI_DEV_DISABLED;
 
        mutex_unlock(&dev->device_lock);
 
index e9f1d8d8461353cd88831c154ae6dc43fe994772..c53f14a7ce546533c300313a54078e1bad327bf9 100644 (file)
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev)
                    PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
                        ret = PTR_ERR(pwrseq->reset_gpios[i]);
 
-                       while (--i)
+                       while (i--)
                                gpiod_put(pwrseq->reset_gpios[i]);
 
                        goto clk_put;
index 6af0a28ba37dd6fe54cc44a4164734597b53886d..e8a4218b57267f508eb871f216de112e93707d31 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/err.h>
 
 #include <linux/clk.h>
-#include <linux/clk/sunxi.h>
-
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
@@ -229,6 +227,8 @@ struct sunxi_mmc_host {
        /* clock management */
        struct clk      *clk_ahb;
        struct clk      *clk_mmc;
+       struct clk      *clk_sample;
+       struct clk      *clk_output;
 
        /* irq */
        spinlock_t      lock;
@@ -653,26 +653,31 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
 
        /* determine delays */
        if (rate <= 400000) {
-               oclk_dly = 0;
-               sclk_dly = 7;
+               oclk_dly = 180;
+               sclk_dly = 42;
        } else if (rate <= 25000000) {
-               oclk_dly = 0;
-               sclk_dly = 5;
+               oclk_dly = 180;
+               sclk_dly = 75;
        } else if (rate <= 50000000) {
                if (ios->timing == MMC_TIMING_UHS_DDR50) {
-                       oclk_dly = 2;
-                       sclk_dly = 4;
+                       oclk_dly = 60;
+                       sclk_dly = 120;
                } else {
-                       oclk_dly = 3;
-                       sclk_dly = 5;
+                       oclk_dly = 90;
+                       sclk_dly = 150;
                }
+       } else if (rate <= 100000000) {
+               oclk_dly = 6;
+               sclk_dly = 24;
+       } else if (rate <= 200000000) {
+               oclk_dly = 3;
+               sclk_dly = 12;
        } else {
-               /* rate > 50000000 */
-               oclk_dly = 2;
-               sclk_dly = 4;
+               return -EINVAL;
        }
 
-       clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly);
+       clk_set_phase(host->clk_sample, sclk_dly);
+       clk_set_phase(host->clk_output, oclk_dly);
 
        return sunxi_mmc_oclk_onoff(host, 1);
 }
@@ -913,6 +918,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                return PTR_ERR(host->clk_mmc);
        }
 
+       host->clk_output = devm_clk_get(&pdev->dev, "output");
+       if (IS_ERR(host->clk_output)) {
+               dev_err(&pdev->dev, "Could not get output clock\n");
+               return PTR_ERR(host->clk_output);
+       }
+
+       host->clk_sample = devm_clk_get(&pdev->dev, "sample");
+       if (IS_ERR(host->clk_sample)) {
+               dev_err(&pdev->dev, "Could not get sample clock\n");
+               return PTR_ERR(host->clk_sample);
+       }
+
        host->reset = devm_reset_control_get(&pdev->dev, "ahb");
 
        ret = clk_prepare_enable(host->clk_ahb);
@@ -927,11 +944,23 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                goto error_disable_clk_ahb;
        }
 
+       ret = clk_prepare_enable(host->clk_output);
+       if (ret) {
+               dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
+               goto error_disable_clk_mmc;
+       }
+
+       ret = clk_prepare_enable(host->clk_sample);
+       if (ret) {
+               dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
+               goto error_disable_clk_output;
+       }
+
        if (!IS_ERR(host->reset)) {
                ret = reset_control_deassert(host->reset);
                if (ret) {
                        dev_err(&pdev->dev, "reset err %d\n", ret);
-                       goto error_disable_clk_mmc;
+                       goto error_disable_clk_sample;
                }
        }
 
@@ -950,6 +979,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
 error_assert_reset:
        if (!IS_ERR(host->reset))
                reset_control_assert(host->reset);
+error_disable_clk_sample:
+       clk_disable_unprepare(host->clk_sample);
+error_disable_clk_output:
+       clk_disable_unprepare(host->clk_output);
 error_disable_clk_mmc:
        clk_disable_unprepare(host->clk_mmc);
 error_disable_clk_ahb:
index 5b76a173cd95d6d59c41c47d15c081ff39473c53..5897d8d8fa5a962d896e6726edde207b99575d39 100644 (file)
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI
 
 config MTD_NAND_HISI504
        tristate "Support for NAND controller on Hisilicon SoC Hip04"
+       depends on HAS_DMA
        help
          Enables support for NAND controller on Hisilicon SoC Hip04.
 
index 96b0b1d27df1b23846d09e81be2065ea59c66b7b..10b1f7a4fe50511e9fdac06242343f14df9cf71f 100644 (file)
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
        nand_writel(info, NDCR, ndcr | int_mask);
 }
 
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
+{
+       if (info->ecc_bch) {
+               int timeout;
+
+               /*
+                * According to the datasheet, when reading from NDDB
+                * with BCH enabled, after each 32 bytes reads, we
+                * have to make sure that the NDSR.RDDREQ bit is set.
+                *
+                * Drain the FIFO 8 32 bits reads at a time, and skip
+                * the polling on the last read.
+                */
+               while (len > 8) {
+                       __raw_readsl(info->mmio_base + NDDB, data, 8);
+
+                       for (timeout = 0;
+                            !(nand_readl(info, NDSR) & NDSR_RDDREQ);
+                            timeout++) {
+                               if (timeout >= 5) {
+                                       dev_err(&info->pdev->dev,
+                                               "Timeout on RDDREQ while draining the FIFO\n");
+                                       return;
+                               }
+
+                               mdelay(1);
+                       }
+
+                       data += 32;
+                       len -= 8;
+               }
+       }
+
+       __raw_readsl(info->mmio_base + NDDB, data, len);
+}
+
 static void handle_data_pio(struct pxa3xx_nand_info *info)
 {
        unsigned int do_bytes = min(info->data_size, info->chunk_size);
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
                                      DIV_ROUND_UP(info->oob_size, 4));
                break;
        case STATE_PIO_READING:
-               __raw_readsl(info->mmio_base + NDDB,
-                            info->data_buff + info->data_buff_pos,
-                            DIV_ROUND_UP(do_bytes, 4));
+               drain_fifo(info,
+                          info->data_buff + info->data_buff_pos,
+                          DIV_ROUND_UP(do_bytes, 4));
 
                if (info->oob_size > 0)
-                       __raw_readsl(info->mmio_base + NDDB,
-                                    info->oob_buff + info->oob_buff_pos,
-                                    DIV_ROUND_UP(info->oob_size, 4));
+                       drain_fifo(info,
+                                  info->oob_buff + info->oob_buff_pos,
+                                  DIV_ROUND_UP(info->oob_size, 4));
                break;
        default:
                dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev)
        int ret, irq, cs;
 
        pdata = dev_get_platdata(&pdev->dev);
+       if (pdata->num_cs <= 0)
+               return -ENODEV;
        info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
                            sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
        if (!info)
index 84673ebcf428846fadf26ae881c39172fd45d612..df51d6025a9017413500046edb78401abe0dfdb3 100644 (file)
@@ -157,7 +157,7 @@ config IPVLAN
       making it transparent to the connected L2 switch.
 
       Ipvlan devices can be added using the "ip" command from the
-      iproute2 package starting with the iproute2-X.Y.ZZ release:
+      iproute2 package starting with the iproute2-3.19 release:
 
       "ip link add link <main-dev> [ NAME ] type ipvlan"
 
index 4ce6ca5f3d365a48ab554c7477b157437fff9357..dc6b78e5342f937e6b3b2d498d4694bc0d0fe6c0 100644 (file)
@@ -40,7 +40,7 @@ config DEV_APPLETALK
 
 config LTPC
        tristate "Apple/Farallon LocalTalk PC support"
-       depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+       depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
        help
          This allows you to use the AppleTalk PC card to connect to LocalTalk
          networks. The card is also known as the Farallon PhoneNet PC card.
index 98d73aab52fe962888a675d1293ae69f9731f7d0..58808f6514520c869631b356d6476f8cbc060a14 100644 (file)
@@ -131,7 +131,7 @@ config CAN_RCAR
 
 config CAN_XILINXCAN
        tristate "Xilinx CAN"
-       depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+       depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
        depends on COMMON_CLK && HAS_IOMEM
        ---help---
          Xilinx CAN driver. This driver supports both soft AXI CAN IP and
index 3c82e02e3daee633b65274abce48a4ae71257d5a..b0f69248cb71cd5642f9e34f67d291ee8a842dd2 100644 (file)
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
 
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
 
index 2928f7003041d92c099d31d19621f1a429ea0848..e97a08ce0b90c298577fed09c96dc7a711076e5e 100644 (file)
@@ -14,6 +14,8 @@
  * Copyright (C) 2015 Valeo S.A.
  */
 
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
 #include <linux/completion.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
@@ -466,10 +468,11 @@ struct kvaser_usb {
 struct kvaser_usb_net_priv {
        struct can_priv can;
 
-       atomic_t active_tx_urbs;
-       struct usb_anchor tx_submitted;
+       spinlock_t tx_contexts_lock;
+       int active_tx_contexts;
        struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
 
+       struct usb_anchor tx_submitted;
        struct completion start_comp, stop_comp;
 
        struct kvaser_usb *dev;
@@ -584,8 +587,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
                while (pos <= actual_len - MSG_HEADER_LEN) {
                        tmp = buf + pos;
 
-                       if (!tmp->len)
-                               break;
+                       /* Handle messages crossing the USB endpoint max packet
+                        * size boundary. Check kvaser_usb_read_bulk_callback()
+                        * for further details.
+                        */
+                       if (tmp->len == 0) {
+                               pos = round_up(pos,
+                                              dev->bulk_in->wMaxPacketSize);
+                               continue;
+                       }
 
                        if (pos + tmp->len > actual_len) {
                                dev_err(dev->udev->dev.parent,
@@ -686,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
        struct kvaser_usb_net_priv *priv;
        struct sk_buff *skb;
        struct can_frame *cf;
+       unsigned long flags;
        u8 channel, tid;
 
        channel = msg->u.tx_acknowledge_header.channel;
@@ -729,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
 
        stats->tx_packets++;
        stats->tx_bytes += context->dlc;
-       can_get_echo_skb(priv->netdev, context->echo_index);
 
-       context->echo_index = MAX_TX_URBS;
-       atomic_dec(&priv->active_tx_urbs);
+       spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
+       can_get_echo_skb(priv->netdev, context->echo_index);
+       context->echo_index = MAX_TX_URBS;
+       --priv->active_tx_contexts;
        netif_wake_queue(priv->netdev);
+
+       spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 }
 
 static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -787,7 +801,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
                netdev_err(netdev, "Error transmitting URB\n");
                usb_unanchor_urb(urb);
                usb_free_urb(urb);
-               kfree(buf);
                return err;
        }
 
@@ -796,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
        return 0;
 }
 
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
-{
-       int i;
-
-       usb_kill_anchored_urbs(&priv->tx_submitted);
-       atomic_set(&priv->active_tx_urbs, 0);
-
-       for (i = 0; i < MAX_TX_URBS; i++)
-               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-}
-
 static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
                                                 const struct kvaser_usb_error_summary *es,
                                                 struct can_frame *cf)
@@ -1317,8 +1319,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
        while (pos <= urb->actual_length - MSG_HEADER_LEN) {
                msg = urb->transfer_buffer + pos;
 
-               if (!msg->len)
-                       break;
+               /* The Kvaser firmware can only read and write messages that
+                * does not cross the USB's endpoint wMaxPacketSize boundary.
+                * If a follow-up command crosses such boundary, firmware puts
+                * a placeholder zero-length command in its place then aligns
+                * the real command to the next max packet size.
+                *
+                * Handle such cases or we're going to miss a significant
+                * number of events in case of a heavy rx load on the bus.
+                */
+               if (msg->len == 0) {
+                       pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
+                       continue;
+               }
 
                if (pos + msg->len > urb->actual_length) {
                        dev_err(dev->udev->dev.parent, "Format error\n");
@@ -1326,7 +1339,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
                }
 
                kvaser_usb_handle_message(dev, msg);
-
                pos += msg->len;
        }
 
@@ -1498,6 +1510,24 @@ error:
        return err;
 }
 
+static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
+{
+       int i;
+
+       priv->active_tx_contexts = 0;
+       for (i = 0; i < MAX_TX_URBS; i++)
+               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+/* This method might sleep. Do not call it in the atomic context
+ * of URB completions.
+ */
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+       usb_kill_anchored_urbs(&priv->tx_submitted);
+       kvaser_usb_reset_tx_urb_contexts(priv);
+}
+
 static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
 {
        int i;
@@ -1615,9 +1645,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        struct urb *urb;
        void *buf;
        struct kvaser_msg *msg;
-       int i, err;
-       int ret = NETDEV_TX_OK;
+       int i, err, ret = NETDEV_TX_OK;
        u8 *msg_tx_can_flags = NULL;            /* GCC */
+       unsigned long flags;
 
        if (can_dropped_invalid_skb(netdev, skb))
                return NETDEV_TX_OK;
@@ -1634,7 +1664,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        if (!buf) {
                stats->tx_dropped++;
                dev_kfree_skb(skb);
-               goto nobufmem;
+               goto freeurb;
        }
 
        msg = buf;
@@ -1671,22 +1701,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        if (cf->can_id & CAN_RTR_FLAG)
                *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
 
+       spin_lock_irqsave(&priv->tx_contexts_lock, flags);
        for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
                if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
                        context = &priv->tx_contexts[i];
+
+                       context->echo_index = i;
+                       can_put_echo_skb(skb, netdev, context->echo_index);
+                       ++priv->active_tx_contexts;
+                       if (priv->active_tx_contexts >= MAX_TX_URBS)
+                               netif_stop_queue(netdev);
+
                        break;
                }
        }
+       spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
        /* This should never happen; it implies a flow control bug */
        if (!context) {
                netdev_warn(netdev, "cannot find free context\n");
+
+               kfree(buf);
                ret =  NETDEV_TX_BUSY;
-               goto releasebuf;
+               goto freeurb;
        }
 
        context->priv = priv;
-       context->echo_index = i;
        context->dlc = cf->can_dlc;
 
        msg->u.tx_can.tid = context->echo_index;
@@ -1698,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                          kvaser_usb_write_bulk_callback, context);
        usb_anchor_urb(urb, &priv->tx_submitted);
 
-       can_put_echo_skb(skb, netdev, context->echo_index);
-
-       atomic_inc(&priv->active_tx_urbs);
-
-       if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
-               netif_stop_queue(netdev);
-
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (unlikely(err)) {
+               spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
                can_free_echo_skb(netdev, context->echo_index);
+               context->echo_index = MAX_TX_URBS;
+               --priv->active_tx_contexts;
+               netif_wake_queue(netdev);
+
+               spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
-               atomic_dec(&priv->active_tx_urbs);
                usb_unanchor_urb(urb);
 
                stats->tx_dropped++;
@@ -1719,16 +1758,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                else
                        netdev_warn(netdev, "Failed tx_urb %d\n", err);
 
-               goto releasebuf;
+               goto freeurb;
        }
 
-       usb_free_urb(urb);
-
-       return NETDEV_TX_OK;
+       ret = NETDEV_TX_OK;
 
-releasebuf:
-       kfree(buf);
-nobufmem:
+freeurb:
        usb_free_urb(urb);
        return ret;
 }
@@ -1840,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
        struct kvaser_usb *dev = usb_get_intfdata(intf);
        struct net_device *netdev;
        struct kvaser_usb_net_priv *priv;
-       int i, err;
+       int err;
 
        err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
        if (err)
@@ -1854,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
 
        priv = netdev_priv(netdev);
 
+       init_usb_anchor(&priv->tx_submitted);
        init_completion(&priv->start_comp);
        init_completion(&priv->stop_comp);
 
-       init_usb_anchor(&priv->tx_submitted);
-       atomic_set(&priv->active_tx_urbs, 0);
-
-       for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
-               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-
        priv->dev = dev;
        priv->netdev = netdev;
        priv->channel = channel;
 
+       spin_lock_init(&priv->tx_contexts_lock);
+       kvaser_usb_reset_tx_urb_contexts(priv);
+
        priv->can.state = CAN_STATE_STOPPED;
        priv->can.clock.freq = CAN_USB_CLOCK;
        priv->can.bittiming_const = &kvaser_usb_bittiming_const;
index 962c3f027383a7a1e28077b4227b9c40f7542518..0bac0f14edc3cd73727b4a0d9f0776d857c4ba8c 100644 (file)
@@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
 
                pdev->usb_if = ppdev->usb_if;
                pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
+
+               /* do a copy of the ctrlmode[_supported] too */
+               dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
+               dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
        }
 
        pdev->usb_if->dev[dev->ctrl_idx] = dev;
index ee9f650d50264bb74771783b65e860b2aabb3d46..7b7053d3c5fad20e07a75eb5ea987743b51484ba 100644 (file)
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)  \
 {                                                                      \
        u32 indir, dir;                                                 \
        spin_lock(&priv->indir_lock);                                   \
-       indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        dir = __raw_readl(priv->name + off);                            \
+       indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        spin_unlock(&priv->indir_lock);                                 \
        return (u64)indir << 32 | dir;                                  \
 }                                                                      \
index 7769c05543f17fcc8432cac6145ebdd70c4fc5da..ec6eac1f8c95ab79d33209e272a31f71484b0f62 100644 (file)
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
     link->open++;
 
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ax_open(dev);
 } /* axnet_open */
index 9fb7b9d4fd6c6595f7642d859678bc3097998750..2777289a26c0419f855926ef028942074ca62a2f 100644 (file)
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
 
     info->phy_id = info->eth_phy;
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ei_open(dev);
 } /* pcnet_open */
index 760c72c6e2acd50ba8472e4b4dd77170c2c381d6..6725dc00750bd6da367396bceb33adaac10842d0 100644 (file)
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
        u16 pktlength;
        u16 pktstatus;
 
-       while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+       while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+              (count < limit))  {
                pktstatus = rxstatus >> 16;
                pktlength = rxstatus & 0xffff;
 
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
        struct altera_tse_private *priv =
                        container_of(napi, struct altera_tse_private, napi);
        int rxcomplete = 0;
-       int txcomplete = 0;
        unsigned long int flags;
 
-       txcomplete = tse_tx_complete(priv);
+       tse_tx_complete(priv);
 
        rxcomplete = tse_rx(priv, budget);
 
-       if (rxcomplete >= budget || txcomplete > 0)
-               return rxcomplete;
+       if (rxcomplete < budget) {
 
-       napi_gro_flush(napi, false);
-       __napi_complete(napi);
+               napi_gro_flush(napi, false);
+               __napi_complete(napi);
 
-       netdev_dbg(priv->dev,
-                  "NAPI Complete, did %d packets with budget %d\n",
-                  txcomplete+rxcomplete, budget);
+               netdev_dbg(priv->dev,
+                          "NAPI Complete, did %d packets with budget %d\n",
+                          rxcomplete, budget);
 
-       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
-       priv->dmaops->enable_rxirq(priv);
-       priv->dmaops->enable_txirq(priv);
-       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
-       return rxcomplete + txcomplete;
+               spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+               priv->dmaops->enable_rxirq(priv);
+               priv->dmaops->enable_txirq(priv);
+               spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+       }
+       return rxcomplete;
 }
 
 /* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct altera_tse_private *priv;
-       unsigned long int flags;
 
        if (unlikely(!dev)) {
                pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
        }
        priv = netdev_priv(dev);
 
-       /* turn off desc irqs and enable napi rx */
-       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+       spin_lock(&priv->rxdma_irq_lock);
+       /* reset IRQs */
+       priv->dmaops->clear_rxirq(priv);
+       priv->dmaops->clear_txirq(priv);
+       spin_unlock(&priv->rxdma_irq_lock);
 
        if (likely(napi_schedule_prep(&priv->napi))) {
+               spin_lock(&priv->rxdma_irq_lock);
                priv->dmaops->disable_rxirq(priv);
                priv->dmaops->disable_txirq(priv);
+               spin_unlock(&priv->rxdma_irq_lock);
                __napi_schedule(&priv->napi);
        }
 
-       /* reset IRQs */
-       priv->dmaops->clear_rxirq(priv);
-       priv->dmaops->clear_txirq(priv);
-
-       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 
        return IRQ_HANDLED;
 }
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        }
 
        if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
-                                &priv->rx_fifo_depth)) {
+                                &priv->tx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
                ret = -ENXIO;
                goto err_free_netdev;
index b93d4404d975571f0f4033f06f4de15b576156d3..885b02b5be07f6732fc0540684cb7875aeec1140 100644 (file)
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
        }
 }
 
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
+       struct net_device *netdev = pdata->netdev;
+       unsigned int i;
+       int ret;
+
+       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+                              netdev->name, pdata);
+       if (ret) {
+               netdev_alert(netdev, "error requesting irq %d\n",
+                            pdata->dev_irq);
+               return ret;
+       }
+
+       if (!pdata->per_channel_irq)
+               return 0;
+
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               snprintf(channel->dma_irq_name,
+                        sizeof(channel->dma_irq_name) - 1,
+                        "%s-TxRx-%u", netdev_name(netdev),
+                        channel->queue_index);
+
+               ret = devm_request_irq(pdata->dev, channel->dma_irq,
+                                      xgbe_dma_isr, 0,
+                                      channel->dma_irq_name, channel);
+               if (ret) {
+                       netdev_alert(netdev, "error requesting irq %d\n",
+                                    channel->dma_irq);
+                       goto err_irq;
+               }
+       }
+
+       return 0;
+
+err_irq:
+       /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+       for (i--, channel--; i < pdata->channel_count; i--, channel--)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
+       unsigned int i;
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       if (!pdata->per_channel_irq)
+               return;
+
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
                return -EINVAL;
        }
 
-       phy_stop(pdata->phydev);
-
        spin_lock_irqsave(&pdata->lock, flags);
 
        if (caller == XGMAC_DRIVER_CONTEXT)
                netif_device_detach(netdev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata, 0);
 
-       /* Powerdown Tx/Rx */
        hw_if->powerdown_tx(pdata);
        hw_if->powerdown_rx(pdata);
 
+       xgbe_napi_disable(pdata, 0);
+
+       phy_stop(pdata->phydev);
+
        pdata->power_down = 1;
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 
        phy_start(pdata->phydev);
 
-       /* Enable Tx/Rx */
+       xgbe_napi_enable(pdata, 0);
+
        hw_if->powerup_tx(pdata);
        hw_if->powerup_rx(pdata);
 
        if (caller == XGMAC_DRIVER_CONTEXT)
                netif_device_attach(netdev);
 
-       xgbe_napi_enable(pdata, 0);
        netif_tx_start_all_queues(netdev);
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct net_device *netdev = pdata->netdev;
+       int ret;
 
        DBGPR("-->xgbe_start\n");
 
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        phy_start(pdata->phydev);
 
+       xgbe_napi_enable(pdata, 1);
+
+       ret = xgbe_request_irqs(pdata);
+       if (ret)
+               goto err_napi;
+
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
 
        xgbe_init_tx_timers(pdata);
 
-       xgbe_napi_enable(pdata, 1);
        netif_tx_start_all_queues(netdev);
 
        DBGPR("<--xgbe_start\n");
 
        return 0;
+
+err_napi:
+       xgbe_napi_disable(pdata, 1);
+
+       phy_stop(pdata->phydev);
+
+       hw_if->exit(pdata);
+
+       return ret;
 }
 
 static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
        DBGPR("-->xgbe_stop\n");
 
-       phy_stop(pdata->phydev);
-
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata, 1);
 
        xgbe_stop_tx_timers(pdata);
 
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
 
+       xgbe_free_irqs(pdata);
+
+       xgbe_napi_disable(pdata, 1);
+
+       phy_stop(pdata->phydev);
+
+       hw_if->exit(pdata);
+
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_channel *channel;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int i;
-
        DBGPR("-->xgbe_restart_dev\n");
 
        /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
                return;
 
        xgbe_stop(pdata);
-       synchronize_irq(pdata->dev_irq);
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++)
-                       synchronize_irq(channel->dma_irq);
-       }
 
        xgbe_free_tx_data(pdata);
        xgbe_free_rx_data(pdata);
 
-       /* Issue software reset to device */
-       hw_if->exit(pdata);
-
        xgbe_start(pdata);
 
        DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
 static int xgbe_open(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
-       struct xgbe_channel *channel = NULL;
-       unsigned int i = 0;
        int ret;
 
        DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
        INIT_WORK(&pdata->restart_work, xgbe_restart);
        INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
-       /* Request interrupts */
-       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
-                              netdev->name, pdata);
-       if (ret) {
-               netdev_alert(netdev, "error requesting irq %d\n",
-                            pdata->dev_irq);
-               goto err_rings;
-       }
-
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++) {
-                       snprintf(channel->dma_irq_name,
-                                sizeof(channel->dma_irq_name) - 1,
-                                "%s-TxRx-%u", netdev_name(netdev),
-                                channel->queue_index);
-
-                       ret = devm_request_irq(pdata->dev, channel->dma_irq,
-                                              xgbe_dma_isr, 0,
-                                              channel->dma_irq_name, channel);
-                       if (ret) {
-                               netdev_alert(netdev,
-                                            "error requesting irq %d\n",
-                                            channel->dma_irq);
-                               goto err_irq;
-                       }
-               }
-       }
-
        ret = xgbe_start(pdata);
        if (ret)
-               goto err_start;
+               goto err_rings;
 
        DBGPR("<--xgbe_open\n");
 
        return 0;
 
-err_start:
-       hw_if->exit(pdata);
-
-err_irq:
-       if (pdata->per_channel_irq) {
-               /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
-               for (i--, channel--; i < pdata->channel_count; i--, channel--)
-                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
-       }
-
-       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
 err_rings:
        desc_if->free_ring_resources(pdata);
 
@@ -1399,30 +1424,16 @@ err_phy_init:
 static int xgbe_close(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
-       struct xgbe_channel *channel;
-       unsigned int i;
 
        DBGPR("-->xgbe_close\n");
 
        /* Stop the device */
        xgbe_stop(pdata);
 
-       /* Issue software reset to device */
-       hw_if->exit(pdata);
-
        /* Free the ring descriptors and buffers */
        desc_if->free_ring_resources(pdata);
 
-       /* Release the interrupts */
-       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++)
-                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
-       }
-
        /* Free the channel and ring structures */
        xgbe_free_channels(pdata);
 
index 869d97fcf7810ff9abb7a2cc6c7ade6e49ceb2df..b927021c6c4030c5f63abd9644aac7169d8b6034 100644 (file)
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
        if (!xgene_ring_mgr_init(pdata))
                return -ENODEV;
 
-       if (!efi_enabled(EFI_BOOT)) {
+       if (pdata->clk) {
                clk_prepare_enable(pdata->clk);
                clk_disable_unprepare(pdata->clk);
                clk_prepare_enable(pdata->clk);
index 4de62b210c85bab8e3d172234f24381da7a66276..635a83be7e5ec5bec5670ceebca692abd9257260 100644 (file)
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev)
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id xgene_enet_acpi_match[] = {
        { "APMC0D05", },
+       { "APMC0D30", },
+       { "APMC0D31", },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
 #ifdef CONFIG_OF
 static struct of_device_id xgene_enet_of_match[] = {
        {.compatible = "apm,xgene-enet",},
+       {.compatible = "apm,xgene1-sgenet",},
+       {.compatible = "apm,xgene1-xgenet",},
        {},
 };
 
index 21206d33b638cf3a76e67abeb0520d71a0310b29..a7f2cc3e485eebfae962fe24cfc1142021a74cde 100644 (file)
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
 {
        struct bcm_enet_priv *priv;
        struct net_device *dev;
-       int tx_work_done, rx_work_done;
+       int rx_work_done;
 
        priv = container_of(napi, struct bcm_enet_priv, napi);
        dev = priv->net_dev;
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
                         ENETDMAC_IR, priv->tx_chan);
 
        /* reclaim sent skb */
-       tx_work_done = bcm_enet_tx_reclaim(dev, 0);
+       bcm_enet_tx_reclaim(dev, 0);
 
        spin_lock(&priv->rx_lock);
        rx_work_done = bcm_enet_receive_queue(dev, budget);
        spin_unlock(&priv->rx_lock);
 
-       if (rx_work_done >= budget || tx_work_done > 0) {
-               /* rx/tx queue is not yet empty/clean */
+       if (rx_work_done >= budget) {
+               /* rx queue is not yet empty/clean */
                return rx_work_done;
        }
 
index 5b308a4a4d0eccc35c641967fcf2459ec736f094..783543ad1fcfa1a4976090e0797f7f15f29a1724 100644 (file)
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        /* RBUF misc statistics */
        STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
        STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
-       STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-       STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-       STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+       STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+       STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+       STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
                s = &bcm_sysport_gstrings_stats[i];
                switch (s->type) {
                case BCM_SYSPORT_STAT_NETDEV:
+               case BCM_SYSPORT_STAT_SOFT:
                        continue;
                case BCM_SYSPORT_STAT_MIB_RX:
                case BCM_SYSPORT_STAT_MIB_TX:
index fc19417d82a505dc61c9f25a940f891522763e00..7e3d87a88c76a81e2c36b65559d8b2b0bcf34217 100644 (file)
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
        BCM_SYSPORT_STAT_RUNT,
        BCM_SYSPORT_STAT_RXCHK,
        BCM_SYSPORT_STAT_RBUF,
+       BCM_SYSPORT_STAT_SOFT,
 };
 
 /* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
 #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
 #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
 #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
 
 #define STAT_RXCHK(str, m, ofs) { \
        .stat_string = str, \
index 676ffe09318073e33b707d46423bdcf780451750..0469f72c6e7e8e01147446a528d5771a147b5cc6 100644 (file)
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
        slot->skb = skb;
        slot->dma_addr = dma_addr;
 
-       if (slot->dma_addr & 0xC0000000)
-               bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
-
        return 0;
 }
 
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
                                  ring->mmio_base);
                        goto err_dma_free;
                }
-               if (ring->dma_base & 0xC0000000)
-                       bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
 
                ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
                                                      BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
                        err = -ENOMEM;
                        goto err_dma_free;
                }
-               if (ring->dma_base & 0xC0000000)
-                       bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
 
                ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
                                                      BGMAC_DMA_RING_RX);
index 7155e1d2c208c7253b846ee954086fef5fd574da..996e215fc3246c4fba6f1714ddceb69b88f179be 100644 (file)
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
                               PCICFG_VENDOR_ID_OFFSET);
 
+       /* Set PCIe reset type to fundamental for EEH recovery */
+       pdev->needs_freset = 1;
+
        /* AER (Advanced Error reporting) configuration */
        rc = pci_enable_pcie_error_reporting(pdev);
        if (!rc)
@@ -12766,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
                NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
-       if (!CHIP_IS_E1x(bp)) {
+       if (!chip_is_e1x) {
                dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
                                    NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
                dev->hw_enc_features =
index ff83c46bc38961561441814813f24d9b33da42ac..6befde61c203461a27ac0298d619f4f78b7c366e 100644 (file)
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
        BCMGENET_STAT_MIB_TX,
        BCMGENET_STAT_RUNT,
        BCMGENET_STAT_MISC,
+       BCMGENET_STAT_SOFT,
 };
 
 struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
 
 #define STAT_GENET_MISC(str, m, offset) { \
        .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
                        UMAC_RBUF_OVFL_CNT),
        STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
        STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
-       STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-       STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-       STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+       STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+       STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+       STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCMGENET_STATS_LEN     ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
                s = &bcmgenet_gstrings_stats[i];
                switch (s->type) {
                case BCMGENET_STAT_NETDEV:
+               case BCMGENET_STAT_SOFT:
                        continue;
                case BCMGENET_STAT_MIB_RX:
                case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
 }
 
 /* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
-                                 struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+                                         struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
+       unsigned int pkts_compl = 0;
        unsigned int bds_compl;
        unsigned int c_index;
 
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                tx_cb_ptr = ring->cbs + last_c_index;
                bds_compl = 0;
                if (tx_cb_ptr->skb) {
+                       pkts_compl++;
                        bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                last_c_index &= (num_tx_bds - 1);
        }
 
-       if (ring->free_bds > (MAX_SKB_FRAGS + 1))
-               ring->int_disable(priv, ring);
-
-       if (netif_tx_queue_stopped(txq))
-               netif_tx_wake_queue(txq);
+       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               if (netif_tx_queue_stopped(txq))
+                       netif_tx_wake_queue(txq);
+       }
 
        ring->c_index = c_index;
+
+       return pkts_compl;
 }
 
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
                                struct bcmgenet_tx_ring *ring)
 {
+       unsigned int released;
        unsigned long flags;
 
        spin_lock_irqsave(&ring->lock, flags);
-       __bcmgenet_tx_reclaim(dev, ring);
+       released = __bcmgenet_tx_reclaim(dev, ring);
        spin_unlock_irqrestore(&ring->lock, flags);
+
+       return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcmgenet_tx_ring *ring =
+               container_of(napi, struct bcmgenet_tx_ring, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+       if (work_done == 0) {
+               napi_complete(napi);
+               ring->int_enable(ring->priv, ring);
+
+               return 0;
+       }
+
+       return budget;
 }
 
 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        bcmgenet_tdma_ring_writel(priv, ring->index,
                                  ring->prod_index, TDMA_PROD_INDEX);
 
-       if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+       if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
-               ring->int_enable(priv, ring);
-       }
 
 out:
        spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        struct device *kdev = &priv->pdev->dev;
        int ret;
        u32 reg, cpu_mask_clear;
+       int index;
 
        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
 
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intr_disable(priv);
 
-       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
 
        dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
+       for (index = 0; index < priv->hw_params->tx_queues; index++)
+               bcmgenet_intrl2_1_writel(priv, (1 << index),
+                                        INTRL2_CPU_MASK_CLEAR);
+
        /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
 
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        unsigned int first_bd;
 
        spin_lock_init(&ring->lock);
+       ring->priv = priv;
+       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
        ring->index = index;
        if (index == DESC_INDEX) {
                ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  TDMA_WRITE_PTR);
        bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
                                  DMA_END_ADDR);
+
+       napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+                                 unsigned int index)
+{
+       struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+       napi_disable(&ring->napi);
+       netif_napi_del(&ring->napi);
 }
 
 /* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        return ret;
 }
 
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
        kfree(priv->tx_cbs);
 }
 
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+       int i;
+
+       bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+       for (i = 0; i < priv->hw_params->tx_queues; i++)
+               bcmgenet_fini_tx_ring(priv, i);
+
+       __bcmgenet_fini_dma(priv);
+}
+
 /* init_edma: Initialize DMA control register */
 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 {
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
        priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
                               GFP_KERNEL);
        if (!priv->tx_cbs) {
-               bcmgenet_fini_dma(priv);
+               __bcmgenet_fini_dma(priv);
                return -ENOMEM;
        }
 
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
                        struct bcmgenet_priv, napi);
        unsigned int work_done;
 
-       /* tx reclaim */
-       bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
        work_done = bcmgenet_desc_rx(priv, budget);
 
        /* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 {
        struct bcmgenet_priv *priv = dev_id;
+       struct bcmgenet_tx_ring *ring;
        unsigned int index;
 
        /* Save irq status for bottom-half processing. */
        priv->irq1_stat =
                bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
-               ~priv->int1_mask;
+               ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
        /* clear interrupts */
        bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
                  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
        /* Check the MBDONE interrupts.
         * packet is done, reclaim descriptors
         */
-       if (priv->irq1_stat & 0x0000ffff) {
-               index = 0;
-               for (index = 0; index < 16; index++) {
-                       if (priv->irq1_stat & (1 << index))
-                               bcmgenet_tx_reclaim(priv->dev,
-                                                   &priv->tx_rings[index]);
+       for (index = 0; index < priv->hw_params->tx_queues; index++) {
+               if (!(priv->irq1_stat & BIT(index)))
+                       continue;
+
+               ring = &priv->tx_rings[index];
+
+               if (likely(napi_schedule_prep(&ring->napi))) {
+                       ring->int_disable(priv, ring);
+                       __napi_schedule(&ring->napi);
                }
        }
+
        return IRQ_HANDLED;
 }
 
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        }
        if (priv->irq0_stat &
                        (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
-               /* Tx reclaim */
-               bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+               struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+               if (likely(napi_schedule_prep(&ring->napi))) {
+                       ring->int_disable(priv, ring);
+                       __napi_schedule(&ring->napi);
+               }
        }
        if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
                                UMAC_IRQ_PHY_DET_F |
index b36ddec0cc0a3c5d1c64f9c2818a4b44464c41f8..0d370d168aee0ea24924fbc3afc011f3a8697841 100644 (file)
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
 
 struct bcmgenet_tx_ring {
        spinlock_t      lock;           /* ring lock */
+       struct napi_struct napi;        /* NAPI per tx queue */
        unsigned int    index;          /* ring index */
        unsigned int    queue;          /* queue index */
        struct enet_cb  *cbs;           /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
                           struct bcmgenet_tx_ring *);
        void (*int_disable)(struct bcmgenet_priv *priv,
                            struct bcmgenet_tx_ring *);
+       struct bcmgenet_priv *priv;
 };
 
 /* device context */
index 149a0d70c10883f3f769904ef6ae2ca6b2f66f8e..b97122926d3aa91210a8945d45f268d370c86ee4 100644 (file)
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
                return -EINVAL;
 
+       reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
        if (wol->wolopts & WAKE_MAGICSECURE) {
                bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
                                     UMAC_MPD_PW_MS);
                bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
                                     UMAC_MPD_PW_LS);
-               reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
                reg |= MPD_PW_EN;
-               bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+       } else {
+               reg &= ~MPD_PW_EN;
        }
+       bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
 
        /* Flag the device and relevant IRQ as wakeup capable */
        if (wol->wolopts) {
index ad76b8e35a00e188e39d00f4c5f70d97c3df5363..81d41539fcbab8e015d28e4b7729bc4812f60f0a 100644 (file)
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = {
 };
 
 #if defined(CONFIG_OF)
-static struct macb_config pc302gem_config = {
+static const struct macb_config pc302gem_config = {
        .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
        .dma_burst_length = 16,
 };
 
-static struct macb_config sama5d3_config = {
+static const struct macb_config sama5d3_config = {
        .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
        .dma_burst_length = 16,
 };
 
-static struct macb_config sama5d4_config = {
+static const struct macb_config sama5d4_config = {
        .caps = 0,
        .dma_burst_length = 4,
 };
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp)
        if (bp->pdev->dev.of_node) {
                match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
                if (match && match->data) {
-                       config = (const struct macb_config *)match->data;
+                       config = match->data;
 
                        bp->caps = config->caps;
                        /*
index 31dc080f2437b6b02dde206b1e7c5343100fdde9..ff85619a97325fc0f1fa64a8896cbb8cc417d3cc 100644 (file)
 
 /* Bitfields in MID */
 #define MACB_IDNUM_OFFSET                      16
-#define MACB_IDNUM_SIZE                                16
+#define MACB_IDNUM_SIZE                                12
 #define MACB_REV_OFFSET                                0
 #define MACB_REV_SIZE                          16
 
index 9062a843424688beabaa21e46b3e210387658c81..c308429dd9c7fa0aebf2cee3b951f71f3863d939 100644 (file)
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
 }
 
 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
-                                  int addr_len)
+                                  u8 v6)
 {
-       return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
-                               ipv6_clip_hash(ctbl, addr);
+       return v6 ? ipv6_clip_hash(ctbl, addr) :
+                       ipv4_clip_hash(ctbl, addr);
 }
 
 static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
        struct clip_entry *ce, *cte;
        u32 *addr = (u32 *)lip;
        int hash;
-       int addr_len;
-       int ret = 0;
+       int ret = -1;
 
        if (!ctbl)
                return 0;
 
-       if (v6)
-               addr_len = 16;
-       else
-               addr_len = 4;
-
-       hash = clip_addr_hash(ctbl, addr, addr_len);
+       hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
        list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-               if (addr_len == cte->addr_len &&
-                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+               if (cte->addr6.sin6_family == AF_INET6 && v6)
+                       ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+                                    sizeof(struct in6_addr));
+               else if (cte->addr.sin_family == AF_INET && !v6)
+                       ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+                                    sizeof(struct in_addr));
+               if (!ret) {
                        ce = cte;
                        read_unlock_bh(&ctbl->lock);
                        goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
                spin_lock_init(&ce->lock);
                atomic_set(&ce->refcnt, 0);
                atomic_dec(&ctbl->nfree);
-               ce->addr_len = addr_len;
-               memcpy(ce->addr, lip, addr_len);
                list_add_tail(&ce->list, &ctbl->hash_list[hash]);
                if (v6) {
+                       ce->addr6.sin6_family = AF_INET6;
+                       memcpy(ce->addr6.sin6_addr.s6_addr,
+                              lip, sizeof(struct in6_addr));
                        ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
                        if (ret) {
                                write_unlock_bh(&ctbl->lock);
                                return ret;
                        }
+               } else {
+                       ce->addr.sin_family = AF_INET;
+                       memcpy((char *)(&ce->addr.sin_addr), lip,
+                              sizeof(struct in_addr));
                }
        } else {
                write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
        struct clip_entry *ce, *cte;
        u32 *addr = (u32 *)lip;
        int hash;
-       int addr_len;
-
-       if (v6)
-               addr_len = 16;
-       else
-               addr_len = 4;
+       int ret = -1;
 
-       hash = clip_addr_hash(ctbl, addr, addr_len);
+       hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
        list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-               if (addr_len == cte->addr_len &&
-                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+               if (cte->addr6.sin6_family == AF_INET6 && v6)
+                       ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+                                    sizeof(struct in6_addr));
+               else if (cte->addr.sin_family == AF_INET && !v6)
+                       ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+                                    sizeof(struct in_addr));
+               if (!ret) {
                        ce = cte;
                        read_unlock_bh(&ctbl->lock);
                        goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
        for (i = 0 ; i < ctbl->clipt_size;  ++i) {
                list_for_each_entry(ce, &ctbl->hash_list[i], list) {
                        ip[0] = '\0';
-                       if (ce->addr_len == 16)
-                               sprintf(ip, "%pI6c", ce->addr);
-                       else
-                               sprintf(ip, "%pI4c", ce->addr);
+                       sprintf(ip, "%pISc", &ce->addr);
                        seq_printf(seq, "%-25s   %u\n", ip,
                                   atomic_read(&ce->refcnt));
                }
index 2eaba0161cf8104eb8cbf9756112174fd1275b38..35eb43c6bcbbe37e5f934a767154bc4f4fe2f5c7 100644 (file)
@@ -14,8 +14,10 @@ struct clip_entry {
        spinlock_t lock;        /* Hold while modifying clip reference */
        atomic_t refcnt;
        struct list_head list;
-       u32 addr[4];
-       int addr_len;
+       union {
+               struct sockaddr_in addr;
+               struct sockaddr_in6 addr6;
+       };
 };
 
 struct clip_tbl {
index d6cda17efe6ef475a5579d8248e01a3158bbc272..97842d03675b327d65f564b351f24d12bf72c18b 100644 (file)
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 #define T4_MEMORY_WRITE        0
 #define T4_MEMORY_READ 1
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
-                __be32 *buf, int dir);
+                void *buf, int dir);
 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
                                  u32 len, __be32 *buf)
 {
index 4d643b65265e8ee0ad12ed886cbb2b77b9d2557b..1abdfa123c6cf8d42a4788400539b40b05ed84eb 100644 (file)
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  *     @addr: address within indicated memory type
  *     @len: amount of memory to transfer
- *     @buf: host memory buffer
+ *     @hbuf: host memory buffer
  *     @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  *
  *     Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  *     caller's responsibility to perform appropriate byte order conversions.
  */
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
-                u32 len, __be32 *buf, int dir)
+                u32 len, void *hbuf, int dir)
 {
        u32 pos, offset, resid, memoffset;
        u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+       u32 *buf;
 
        /* Argument sanity checks ...
         */
-       if (addr & 0x3)
+       if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
                return -EINVAL;
+       buf = (u32 *)hbuf;
 
        /* It's convenient to be able to handle lengths which aren't a
         * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 
        /* Transfer data to/from the adapter as long as there's an integral
         * number of 32-bit transfers to complete.
+        *
+        * A note on Endianness issues:
+        *
+        * The "register" reads and writes below from/to the PCI-E Memory
+        * Window invoke the standard adapter Big-Endian to PCI-E Link
+        * Little-Endian "swizzel."  As a result, if we have the following
+        * data in adapter memory:
+        *
+        *     Memory:  ... | b0 | b1 | b2 | b3 | ...
+        *     Address:      i+0  i+1  i+2  i+3
+        *
+        * Then a read of the adapter memory via the PCI-E Memory Window
+        * will yield:
+        *
+        *     x = readl(i)
+        *         31                  0
+        *         [ b3 | b2 | b1 | b0 ]
+        *
+        * If this value is stored into local memory on a Little-Endian system
+        * it will show up correctly in local memory as:
+        *
+        *     ( ..., b0, b1, b2, b3, ... )
+        *
+        * But on a Big-Endian system, the store will show up in memory
+        * incorrectly swizzled as:
+        *
+        *     ( ..., b3, b2, b1, b0, ... )
+        *
+        * So we need to account for this in the reads and writes to the
+        * PCI-E Memory Window below by undoing the register read/write
+        * swizzels.
         */
        while (len > 0) {
                if (dir == T4_MEMORY_READ)
-                       *buf++ = (__force __be32) t4_read_reg(adap,
-                                                       mem_base + offset);
+                       *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+                                               mem_base + offset));
                else
                        t4_write_reg(adap, mem_base + offset,
-                                    (__force u32) *buf++);
+                                    (__force u32)cpu_to_le32(*buf++));
                offset += sizeof(__be32);
                len -= sizeof(__be32);
 
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         */
        if (resid) {
                union {
-                       __be32 word;
+                       u32 word;
                        char byte[4];
                } last;
                unsigned char *bp;
                int i;
 
                if (dir == T4_MEMORY_READ) {
-                       last.word = (__force __be32) t4_read_reg(adap,
-                                                       mem_base + offset);
+                       last.word = le32_to_cpu(
+                                       (__force __le32)t4_read_reg(adap,
+                                               mem_base + offset));
                        for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
                                bp[i] = last.byte[i];
                } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
                        for (i = resid; i < 4; i++)
                                last.byte[i] = 0;
                        t4_write_reg(adap, mem_base + offset,
-                                    (__force u32) last.word);
+                                    (__force u32)cpu_to_le32(last.word));
                }
        }
 
@@ -1086,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                }
 
                /* Installed successfully, update the cached header too. */
-               memcpy(card_fw, fs_fw, sizeof(*card_fw));
+               *card_fw = *fs_fw;
                card_fw_usable = 1;
                *reset = 0;     /* already reset as part of load_fw */
        }
index 9cbe038a388ea62a6f4552e7f088dc5816dc5b5c..a5179bfcdc2c1b6124a92f33f9c5f57363751a02 100644 (file)
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
        }
 
        if (ENIC_TEST_INTR(pba, notify_intr)) {
-               vnic_intr_return_all_credits(&enic->intr[notify_intr]);
                enic_notify_check(enic);
+               vnic_intr_return_all_credits(&enic->intr[notify_intr]);
        }
 
        if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
        struct enic *enic = data;
        unsigned int intr = enic_msix_notify_intr(enic);
 
-       vnic_intr_return_all_credits(&enic->intr[intr]);
        enic_notify_check(enic);
+       vnic_intr_return_all_credits(&enic->intr[intr]);
 
        return IRQ_HANDLED;
 }
index 3b42556f7f8d66929f1435445df06072c65ae6aa..ed41559bae771b8d58e49f21a46f7a79d9f20cac 100644 (file)
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
                               (unsigned int)tp->rx_ring[i].buffer1,
                               (unsigned int)tp->rx_ring[i].buffer2,
                               buf[0], buf[1], buf[2]);
-                       for (j = 0; buf[j] != 0xee && j < 1600; j++)
+                       for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
                                if (j < 100)
                                        pr_cont(" %02x", buf[j]);
                        pr_cont(" j=%d\n", j);
index 9bb6220663b21a505f2332f028ce3a1e13b77f86..78e1ce09b1ab1deadad177472593e020a8f9d3c2 100644 (file)
@@ -1189,13 +1189,12 @@ static void
 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 {
        struct  fec_enet_private *fep;
-       struct bufdesc *bdp, *bdp_t;
+       struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
        struct fec_enet_priv_tx_q *txq;
        struct netdev_queue *nq;
        int     index = 0;
-       int     i, bdnum;
        int     entries_free;
 
        fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                if (bdp == txq->cur_tx)
                        break;
 
-               bdp_t = bdp;
-               bdnum = 1;
-               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
-               skb = txq->tx_skbuff[index];
-               while (!skb) {
-                       bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
-                       index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
-                       skb = txq->tx_skbuff[index];
-                       bdnum++;
-               }
-               if (skb_shinfo(skb)->nr_frags &&
-                   (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
-                       break;
+               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
 
-               for (i = 0; i < bdnum; i++) {
-                       if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
-                               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-                                                bdp->cbd_datlen, DMA_TO_DEVICE);
-                       bdp->cbd_bufaddr = 0;
-                       if (i < bdnum - 1)
-                               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-               }
+               skb = txq->tx_skbuff[index];
                txq->tx_skbuff[index] = NULL;
+               if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
+                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                                       bdp->cbd_datlen, DMA_TO_DEVICE);
+               bdp->cbd_bufaddr = 0;
+               if (!skb) {
+                       bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+                       continue;
+               }
 
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 
                        vlan_packet_rcvd = true;
 
-                       skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
-                                                      data, (2 * ETH_ALEN));
+                       memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
                        skb_pull(skb, VLAN_HLEN);
                }
 
@@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id)
        writel(int_events, fep->hwp + FEC_IEVENT);
        fec_enet_collect_events(fep, int_events);
 
-       if (fep->work_tx || fep->work_rx) {
+       if ((fep->work_tx || fep->work_rx) && fep->link) {
                ret = IRQ_HANDLED;
 
                if (napi_schedule_prep(&fep->napi)) {
@@ -3383,7 +3370,6 @@ fec_drv_remove(struct platform_device *pdev)
                regulator_disable(fep->reg_phy);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
-       fec_enet_clk_enable(ndev, false);
        of_node_put(fep->phy_node);
        free_netdev(ndev);
 
index 43df78882e484e065706bd04c322fa8276d4c424..7bf3682cdf478b1597cf04e071b525eed31adb69 100644 (file)
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np,
        return 0;
 }
 
+static int gfar_of_group_count(struct device_node *np)
+{
+       struct device_node *child;
+       int num = 0;
+
+       for_each_available_child_of_node(np, child)
+               if (!of_node_cmp(child->name, "queue-group"))
+                       num++;
+
+       return num;
+}
+
 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 {
        const char *model;
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                num_rx_qs = 1;
        } else { /* MQ_MG_MODE */
                /* get the actual number of supported groups */
-               unsigned int num_grps = of_get_available_child_count(np);
+               unsigned int num_grps = gfar_of_group_count(np);
 
                if (num_grps == 0 || num_grps > MAXGROUPS) {
                        dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 
        /* Parse and initialize group specific information */
        if (priv->mode == MQ_MG_MODE) {
-               for_each_child_of_node(np, child) {
+               for_each_available_child_of_node(np, child) {
+                       if (of_node_cmp(child->name, "queue-group"))
+                               continue;
+
                        err = gfar_parse_group(child, priv, model);
                        if (err)
                                goto err_grp_init;
@@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev)
        struct phy_device *phydev = priv->phydev;
 
        if (unlikely(phydev->link != priv->oldlink ||
-                    phydev->duplex != priv->oldduplex ||
-                    phydev->speed != priv->oldspeed))
+                    (phydev->link && (phydev->duplex != priv->oldduplex ||
+                                      phydev->speed != priv->oldspeed))))
                gfar_update_link_state(priv);
 }
 
index e8a1adb7a96255bf8da1baa87b29514527c2764d..c05e50759621137fa3f9749a55c347381d54ed55 100644 (file)
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
        device_remove_file(&dev->dev, &dev_attr_remove_port);
 }
 
+static int ehea_reboot_notifier(struct notifier_block *nb,
+                               unsigned long action, void *unused)
+{
+       if (action == SYS_RESTART) {
+               pr_info("Reboot: freeing all eHEA resources\n");
+               ibmebus_unregister_driver(&ehea_driver);
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+       .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+                            unsigned long action, void *data)
+{
+       int ret = NOTIFY_BAD;
+       struct memory_notify *arg = data;
+
+       mutex_lock(&dlpar_mem_lock);
+
+       switch (action) {
+       case MEM_CANCEL_OFFLINE:
+               pr_info("memory offlining canceled");
+               /* Fall through: re-add canceled memory block */
+
+       case MEM_ONLINE:
+               pr_info("memory is going online");
+               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+               if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+                       goto out_unlock;
+               ehea_rereg_mrs();
+               break;
+
+       case MEM_GOING_OFFLINE:
+               pr_info("memory is going offline");
+               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+               if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+                       goto out_unlock;
+               ehea_rereg_mrs();
+               break;
+
+       default:
+               break;
+       }
+
+       ehea_update_firmware_handles();
+       ret = NOTIFY_OK;
+
+out_unlock:
+       mutex_unlock(&dlpar_mem_lock);
+       return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+       .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+       int i;
+
+       if (ehea_fw_handles.arr)
+               for (i = 0; i < ehea_fw_handles.num_entries; i++)
+                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+                                            ehea_fw_handles.arr[i].fwh,
+                                            FORCE_FREE);
+
+       if (ehea_bcmc_regs.arr)
+               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+                                             ehea_bcmc_regs.arr[i].port_id,
+                                             ehea_bcmc_regs.arr[i].reg_type,
+                                             ehea_bcmc_regs.arr[i].macaddr,
+                                             0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+       int ret = 0;
+
+       if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+               return 0;
+
+       ret = ehea_create_busmap();
+       if (ret) {
+               pr_info("ehea_create_busmap failed\n");
+               goto out;
+       }
+
+       ret = register_reboot_notifier(&ehea_reboot_nb);
+       if (ret) {
+               pr_info("register_reboot_notifier failed\n");
+               goto out;
+       }
+
+       ret = register_memory_notifier(&ehea_mem_nb);
+       if (ret) {
+               pr_info("register_memory_notifier failed\n");
+               goto out2;
+       }
+
+       ret = crash_shutdown_register(ehea_crash_handler);
+       if (ret) {
+               pr_info("crash_shutdown_register failed\n");
+               goto out3;
+       }
+
+       return 0;
+
+out3:
+       unregister_memory_notifier(&ehea_mem_nb);
+out2:
+       unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+       return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+       if (atomic_read(&ehea_memory_hooks_registered))
+               return;
+
+       unregister_reboot_notifier(&ehea_reboot_nb);
+       if (crash_shutdown_unregister(ehea_crash_handler))
+               pr_info("failed unregistering crash handler\n");
+       unregister_memory_notifier(&ehea_mem_nb);
+}
+
 static int ehea_probe_adapter(struct platform_device *dev)
 {
        struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
        int ret;
        int i;
 
+       ret = ehea_register_memory_hooks();
+       if (ret)
+               return ret;
+
        if (!dev || !dev->dev.of_node) {
                pr_err("Invalid ibmebus device probed\n");
                return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
        return 0;
 }
 
-static void ehea_crash_handler(void)
-{
-       int i;
-
-       if (ehea_fw_handles.arr)
-               for (i = 0; i < ehea_fw_handles.num_entries; i++)
-                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
-                                            ehea_fw_handles.arr[i].fwh,
-                                            FORCE_FREE);
-
-       if (ehea_bcmc_regs.arr)
-               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
-                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
-                                             ehea_bcmc_regs.arr[i].port_id,
-                                             ehea_bcmc_regs.arr[i].reg_type,
-                                             ehea_bcmc_regs.arr[i].macaddr,
-                                             0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
-                             unsigned long action, void *data)
-{
-       int ret = NOTIFY_BAD;
-       struct memory_notify *arg = data;
-
-       mutex_lock(&dlpar_mem_lock);
-
-       switch (action) {
-       case MEM_CANCEL_OFFLINE:
-               pr_info("memory offlining canceled");
-               /* Readd canceled memory block */
-       case MEM_ONLINE:
-               pr_info("memory is going online");
-               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-               if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
-                       goto out_unlock;
-               ehea_rereg_mrs();
-               break;
-       case MEM_GOING_OFFLINE:
-               pr_info("memory is going offline");
-               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-               if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
-                       goto out_unlock;
-               ehea_rereg_mrs();
-               break;
-       default:
-               break;
-       }
-
-       ehea_update_firmware_handles();
-       ret = NOTIFY_OK;
-
-out_unlock:
-       mutex_unlock(&dlpar_mem_lock);
-       return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
-       .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
-                               unsigned long action, void *unused)
-{
-       if (action == SYS_RESTART) {
-               pr_info("Reboot: freeing all eHEA resources\n");
-               ibmebus_unregister_driver(&ehea_driver);
-       }
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
-       .notifier_call = ehea_reboot_notifier,
-};
-
 static int check_module_parm(void)
 {
        int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
        if (ret)
                goto out;
 
-       ret = ehea_create_busmap();
-       if (ret)
-               goto out;
-
-       ret = register_reboot_notifier(&ehea_reboot_nb);
-       if (ret)
-               pr_info("failed registering reboot notifier\n");
-
-       ret = register_memory_notifier(&ehea_mem_nb);
-       if (ret)
-               pr_info("failed registering memory remove notifier\n");
-
-       ret = crash_shutdown_register(ehea_crash_handler);
-       if (ret)
-               pr_info("failed registering crash handler\n");
-
        ret = ibmebus_register_driver(&ehea_driver);
        if (ret) {
                pr_err("failed registering eHEA device driver on ebus\n");
-               goto out2;
+               goto out;
        }
 
        ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
        if (ret) {
                pr_err("failed to register capabilities attribute, ret=%d\n",
                       ret);
-               goto out3;
+               goto out2;
        }
 
        return ret;
 
-out3:
-       ibmebus_unregister_driver(&ehea_driver);
 out2:
-       unregister_memory_notifier(&ehea_mem_nb);
-       unregister_reboot_notifier(&ehea_reboot_nb);
-       crash_shutdown_unregister(ehea_crash_handler);
+       ibmebus_unregister_driver(&ehea_driver);
 out:
        return ret;
 }
 
 static void __exit ehea_module_exit(void)
 {
-       int ret;
-
        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
        ibmebus_unregister_driver(&ehea_driver);
-       unregister_reboot_notifier(&ehea_reboot_nb);
-       ret = crash_shutdown_unregister(ehea_crash_handler);
-       if (ret)
-               pr_info("failed unregistering crash handler\n");
-       unregister_memory_notifier(&ehea_mem_nb);
+       ehea_unregister_memory_hooks();
        kfree(ehea_fw_handles.arr);
        kfree(ehea_bcmc_regs.arr);
        ehea_destroy_busmap();
index 21978cc019e7c86dab83968ba994c0e9051c8e33..cd7675ac5bf9ed8b8658996d2d27190c6b20245f 100644 (file)
@@ -1136,6 +1136,8 @@ restart_poll:
        ibmveth_replenish_task(adapter);
 
        if (frames_processed < budget) {
+               napi_complete(napi);
+
                /* We think we are done - reenable interrupts,
                 * then check once more to make sure we are done.
                 */
@@ -1144,8 +1146,6 @@ restart_poll:
 
                BUG_ON(lpar_rc != H_SUCCESS);
 
-               napi_complete(napi);
-
                if (ibmveth_rxq_pending_buffer(adapter) &&
                    napi_reschedule(napi)) {
                        lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
        return ret;
 }
 
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+       struct ibmveth_adapter *adapter = netdev_priv(dev);
+       struct sockaddr *addr = p;
+       u64 mac_address;
+       int rc;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+       rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+       if (rc) {
+               netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+               return rc;
+       }
+
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       return 0;
+}
+
 static const struct net_device_ops ibmveth_netdev_ops = {
        .ndo_open               = ibmveth_open,
        .ndo_stop               = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
        .ndo_fix_features       = ibmveth_fix_features,
        .ndo_set_features       = ibmveth_set_features,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = ibmveth_set_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ibmveth_poll_controller,
 #endif
index 11a9ffebf8d88acdfd5b738baab62bd2b34aa30a..6aea65dae5ed654b5da2e7a8885a02a92c75710d 100644 (file)
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
         * The grst delay value is in 100ms units, and we'll wait a
         * couple counts longer to be sure we don't just miss the end.
         */
-       grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
-                       >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+       grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+                   I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+                   I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
        for (cnt = 0; cnt < grst_del + 2; cnt++) {
                reg = rd32(hw, I40E_GLGEN_RSTAT);
                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-       if (!status)
+       if (!status && filter_index)
                *filter_index = resp->index;
 
        return status;
index 183dcb63ce98e14e5b504bed9874911b35ae18d3..a11c70ca5a2811c84cc094ac425b93ec0b840d5d 100644 (file)
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
        u32 val;
 
        val = rd32(hw, I40E_PRTDCB_GENC);
-       *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+       *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
                       I40E_PRTDCB_GENC_PFCLDA_SHIFT);
 }
 
index 61236f983971a1d55955586cd5a6ee288f5c60e7..c17ee77100d3651e254265ae192bd4d3e54c3659 100644 (file)
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        if (!cmd_buf)
                return count;
        bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
-       if (bytes_not_copied < 0)
+       if (bytes_not_copied < 0) {
+               kfree(cmd_buf);
                return bytes_not_copied;
+       }
        if (bytes_not_copied > 0)
                count -= bytes_not_copied;
        cmd_buf[count] = '\0';
index cbe281be1c9f0c1956c4e7d2467b42474fad70c2..dadda3c5d658b950cf64d21c838f5ffd5176f0f6 100644 (file)
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        vsi->tc_config.numtc = numtc;
        vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
        /* Number of queues per enabled TC */
-       num_tc_qps = vsi->alloc_queue_pairs/numtc;
+       /* In MFP case we can have a much lower count of MSIx
+        * vectors available and so we need to lower the used
+        * q count.
+        */
+       qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+       num_tc_qps = qcount / numtc;
        num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
 
        /* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
        u16 qoffset, qcount;
        int i, n;
 
-       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
-               return;
+       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+               /* Reset the TC information */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       rx_ring = vsi->rx_rings[i];
+                       tx_ring = vsi->tx_rings[i];
+                       rx_ring->dcb_tc = 0;
+                       tx_ring->dcb_tc = 0;
+               }
+       }
 
        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
                if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
 {
        int i;
 
+       i40e_stop_misc_vector(pf);
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               synchronize_irq(pf->msix_entries[0].vector);
+               free_irq(pf->msix_entries[0].vector, pf);
+       }
+
        i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
        for (i = 0; i < pf->num_alloc_vsi; i++)
                if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
 
        /* Wait for the PF's Tx queues to be disabled */
        ret = i40e_pf_wait_txq_disabled(pf);
-       if (!ret)
+       if (ret) {
+               /* Schedule PF reset to recover */
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               i40e_service_event_schedule(pf);
+       } else {
                i40e_pf_unquiesce_all_vsi(pf);
+       }
+
 exit:
        return ret;
 }
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
        int i, v;
 
        /* If we're down or resetting, just bail */
-       if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
                return;
 
        /* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
        cancel_work_sync(&pf->service_task);
+       i40e_fdir_teardown(pf);
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
        if (pf->vsi[pf->lan_vsi])
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
-       i40e_stop_misc_vector(pf);
-       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-               synchronize_irq(pf->msix_entries[0].vector);
-               free_irq(pf->msix_entries[0].vector, pf);
-       }
-
        /* shutdown and destroy the HMC */
        if (pf->hw.hmc.hmc_obj) {
                ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
        wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
+       i40e_clear_interrupt_scheme(pf);
+
        if (system_state == SYSTEM_POWER_OFF) {
                pci_wake_from_d3(pdev, pf->wol_en);
                pci_set_power_state(pdev, PCI_D3hot);
index 3e70f2e45a4768986a0a90ee21bdf98790db9b53..5defe0d635141ed5c886cb1086995f2b2d3d31a1 100644 (file)
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 {
        i40e_status status;
        enum i40e_nvmupd_cmd upd_cmd;
+       bool retry_attempt = false;
 
        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
 
+retry:
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
                *errno = -ESRCH;
                break;
        }
+
+       /* In some circumstances, a multi-write transaction takes longer
+        * than the default 3 minute timeout on the write semaphore.  If
+        * the write failed with an EBUSY status, this is likely the problem,
+        * so here we try to reacquire the semaphore then retry the write.
+        * We only do one retry, then give up.
+        */
+       if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+           !retry_attempt) {
+               i40e_status old_status = status;
+               u32 old_asq_status = hw->aq.asq_last_status;
+               u32 gtime;
+
+               gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+               if (gtime >= hw->nvm.hw_semaphore_timeout) {
+                       i40e_debug(hw, I40E_DEBUG_ALL,
+                                  "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+                                  gtime, hw->nvm.hw_semaphore_timeout);
+                       i40e_release_nvm(hw);
+                       status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+                       if (status) {
+                               i40e_debug(hw, I40E_DEBUG_ALL,
+                                          "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+                                          hw->aq.asq_last_status);
+                               status = old_status;
+                               hw->aq.asq_last_status = old_asq_status;
+                       } else {
+                               retry_attempt = true;
+                               goto retry;
+                       }
+               }
+       }
+
        return status;
 }
 
index 2206d2d36f0fdf2e9f249c7624e156f8c1b17118..bbf1b1247ac471bb712ed1397956db83cf80e4f3 100644 (file)
@@ -585,6 +585,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-       u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-                       ? ring->next_to_use
-                       : ring->next_to_use + ring->count);
-       return ntu - ring->next_to_clean;
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
 }
 
 /**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
        struct i40e_pf *pf = tx_ring->vsi->back;
        bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-           (tx_pending >= I40E_MIN_DESC_PENDING)) {
+       if ((tx_done_old == tx_done) && tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
-       } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-                  (tx_pending < I40E_MIN_DESC_PENDING) &&
-                  (tx_pending > 0)) {
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
                if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
                        dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
                                 tx_pending, tx_ring->queue_index);
                pf->tx_sluggish_count++;
        } else {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_done;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
        return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -2139,6 +2145,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
        return __i40e_maybe_stop_tx(tx_ring, size);
 }
 
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+                              const u8 hdr_len)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 1;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                   I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               size = hdr_len;
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               if (size < skb_shinfo(skb)->gso_size) {
+                                       linearize = true;
+                                       break;
+                               }
+                               j = 1;
+                               size -= skb_shinfo(skb)->gso_size;
+                               if (size)
+                                       j++;
+                               size += hdr_len;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
+       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+               if (skb_linearize(skb))
+                       goto out_drop;
+
        skb_tx_timestamp(skb);
 
        /* always enable CRC insertion offload */
index 18b00231d2f117d714e7e1399aecba0061ead41a..dff0baeb1ecc092e53ef22ea373be46e79df13a2 100644 (file)
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_DATA_PER_TXD  8192
 
index 29004382f462ce717fd27b5b004139f5ef0efd1a..708891571dae328299e2b31e83cbf40b42726473 100644 (file)
@@ -125,6 +125,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-       u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-                       ? ring->next_to_use
-                       : ring->next_to_use + ring->count);
-       return ntu - ring->next_to_clean;
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
 }
 
 /**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
        bool ret = false;
 
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-           (tx_pending >= I40E_MIN_DESC_PENDING)) {
+       if ((tx_done_old == tx_done) && tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
-       } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
-                  !(tx_pending < I40E_MIN_DESC_PENDING) ||
-                  !(tx_pending > 0)) {
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_done;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
        return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
+
 
        } else {
                network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+                              const u8 hdr_len)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 1;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                   I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               size = hdr_len;
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               if (size < skb_shinfo(skb)->gso_size) {
+                                       linearize = true;
+                                       break;
+                               }
+                               j = 1;
+                               size -= skb_shinfo(skb)->gso_size;
+                               if (size)
+                                       j++;
+                               size += hdr_len;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
+       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+               if (skb_linearize(skb))
+                       goto out_drop;
+
        skb_tx_timestamp(skb);
 
        /* always enable CRC insertion offload */
index 4e15903b2b6ded2a054981999f71e2406b1e80a2..c950a038237c2c63dc66b9bf7be887556b80a5de 100644 (file)
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_DATA_PER_TXD  8192
 
index 2a210c4efb895728ec6ad12eaef9ec8f9ff7fd08..ebce5bb24df98b77eec38a1fc6c720987768934b 100644 (file)
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
        /* Schedule multicast task to populate multicast list */
        queue_work(mdev->workqueue, &priv->rx_mode_task);
 
-       mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
-
 #ifdef CONFIG_MLX4_EN_VXLAN
        if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                vxlan_get_rx_port(dev);
@@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                queue_delayed_work(mdev->workqueue, &priv->service_task,
                                   SERVICE_TASK_DELAY);
 
+       mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+
        return 0;
 
 out:
index 2d8ee66138e8ad48cb72daa773a67c1f421e4cac..a61009f4b2df728e05a4e54def4bb1336949f03e 100644 (file)
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 {
        u32 loopback_ok = 0;
        int i;
-
+       bool gro_enabled;
 
         priv->loopback_ok = 0;
        priv->validate_loopback = 1;
+       gro_enabled = priv->dev->features & NETIF_F_GRO;
 
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+       priv->dev->features &= ~NETIF_F_GRO;
 
        /* xmit */
        if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 mlx4_en_test_loopback_exit:
 
        priv->validate_loopback = 0;
+
+       if (gro_enabled)
+               priv->dev->features |= NETIF_F_GRO;
+
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
        return !loopback_ok;
 }
index 2a8268e6be15d0b8682b8ad47bb4bb4ac071b243..ebbe244e80dde55068d924905e08d1d631821811 100644 (file)
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
        unsigned long rx_chksum_none;
        unsigned long rx_chksum_complete;
        unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS         9
+#define NUM_PORT_STATS         10
 };
 
 struct mlx4_en_perf_stats {
index 2bb8553bd9054b25456ec694ee25696e93ebde25..eda29dbbfcd259824f0a0fbec3876975f215d2e2 100644 (file)
@@ -412,7 +412,6 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params)
index 486e3d26cd4a9ef4bb6a23995b85ac50cd413776..d97ca88c55b59e039af66991e4ad413f82984158 100644 (file)
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
        u32 qp_type;
-       int port;
+       int port, err = 0;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
        priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                        } else {
                                struct mlx4_update_qp_params params = {.flags = 0};
 
-                               mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                               err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                               if (err)
+                                       goto out;
                        }
                }
 
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
        }
-       return 0;
+out:
+       return err;
 }
 
 static int mpt_mask(struct mlx4_dev *dev)
index 44e8d7d255474d30bf48577723caf9032af43854..57a6e6cd74fc3c9c99708530b431cd0e5b768f8c 100644 (file)
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
        if (mac->phydev)
                phy_start(mac->phydev);
 
-       init_timer(&mac->tx->clean_timer);
-       mac->tx->clean_timer.function = pasemi_mac_tx_timer;
-       mac->tx->clean_timer.data = (unsigned long)mac->tx;
-       mac->tx->clean_timer.expires = jiffies+HZ;
-       add_timer(&mac->tx->clean_timer);
+       setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+                   (unsigned long)mac->tx);
+       mod_timer(&mac->tx->clean_timer, jiffies + HZ);
 
        return 0;
 
index 6e426ae9469228ed55586bca15a8eef1dcb5e5c4..0a5e204a0179a35c15f52a3dea58729c30c2c31f 100644 (file)
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
 
 } __attribute__ ((aligned(64)));
 
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
 struct rcv_desc {
        __le16 reference_handle;
        __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
 #define NETXEN_IMAGE_START     0x43000 /* compressed image */
 #define NETXEN_SECONDARY_START 0x200000        /* backup images */
 #define NETXEN_PXE_START       0x3E0000        /* PXE boot rom */
-#define NETXEN_USER_START      0x3E8000        /* Firmare info */
+#define NETXEN_USER_START      0x3E8000        /* Firmware info */
 #define NETXEN_FIXED_START     0x3F0000        /* backup of crbinit */
 #define NETXEN_USER_START_OLD  NETXEN_PXE_START /* very old flash */
 
index fa4317611fd63fe81df2e23e47fa307b8c5c5348..f221126a5c4e6789cb2630a07dc58b02f0676239 100644 (file)
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
 #define QLCNIC_BRDCFG_START    0x4000          /* board config */
 #define QLCNIC_BOOTLD_START    0x10000         /* bootld */
 #define QLCNIC_IMAGE_START     0x43000         /* compressed image */
-#define QLCNIC_USER_START      0x3E8000        /* Firmare info */
+#define QLCNIC_USER_START      0x3E8000        /* Firmware info */
 
 #define QLCNIC_FW_VERSION_OFFSET       (QLCNIC_USER_START+0x408)
 #define QLCNIC_FW_SIZE_OFFSET          (QLCNIC_USER_START+0x40c)
index ad0020af2193da8749534c25242047212cccc1a4..c70ab40d86989974d54c9161bf7acd8558d93c74 100644 (file)
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
        int rc = -EINVAL;
 
        if (!rtl_fw_format_ok(tp, rtl_fw)) {
-               netif_err(tp, ifup, dev, "invalid firwmare\n");
+               netif_err(tp, ifup, dev, "invalid firmware\n");
                goto out;
        }
 
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
        RTL_W8(ChipCmd, CmdReset);
 
        rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
-       netdev_reset_queue(tp->dev);
 }
 
 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        u32 status, len;
        u32 opts[2];
        int frags;
-       bool stop_queue;
 
        if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        txd->opts2 = cpu_to_le32(opts[1]);
 
-       netdev_sent_queue(dev, skb->len);
-
        skb_tx_timestamp(skb);
 
        /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        tp->cur_tx += frags + 1;
 
-       stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+       RTL_W8(TxPoll, NPQ);
 
-       if (!skb->xmit_more || stop_queue ||
-           netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
-               RTL_W8(TxPoll, NPQ);
-
-               mmiowb();
-       }
+       mmiowb();
 
-       if (stop_queue) {
+       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
                 * not miss a ring update when it notices a stopped queue.
                 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
 {
        unsigned int dirty_tx, tx_left;
-       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        dirty_tx = tp->dirty_tx;
        smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
                rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
                                     tp->TxDescArray + entry);
                if (status & LastFrag) {
-                       pkts_compl++;
-                       bytes_compl += tx_skb->skb->len;
+                       u64_stats_update_begin(&tp->tx_stats.syncp);
+                       tp->tx_stats.packets++;
+                       tp->tx_stats.bytes += tx_skb->skb->len;
+                       u64_stats_update_end(&tp->tx_stats.syncp);
                        dev_kfree_skb_any(tx_skb->skb);
                        tx_skb->skb = NULL;
                }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
        }
 
        if (tp->dirty_tx != dirty_tx) {
-               netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
-               u64_stats_update_begin(&tp->tx_stats.syncp);
-               tp->tx_stats.packets += pkts_compl;
-               tp->tx_stats.bytes += bytes_compl;
-               u64_stats_update_end(&tp->tx_stats.syncp);
-
                tp->dirty_tx = dirty_tx;
                /* Sync with rtl8169_start_xmit:
                 * - publish dirty_tx ring index (write barrier)
index 4da8bd263997a17baf89b5fe7a3d2198f186827b..736d5d1624a142e902d6023cf3ee801c5169fa14 100644 (file)
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .rmiimode       = 1,
-       .shift_rd0      = 1,
 };
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        msleep(2); /* max frame time at 10 Mbps < 1250 us */
        sh_eth_get_stats(ndev);
        sh_eth_reset(ndev);
+
+       /* Set MAC address again */
+       update_mac_address(ndev);
 }
 
 /* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
                txdesc = &mdp->tx_ring[entry];
                if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
                        break;
+               /* TACT bit must be checked before all the following reads */
+               rmb();
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
                        dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        limit = boguscnt;
        rxdesc = &mdp->rx_ring[entry];
        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+               /* RACT bit must be checked before all the following reads */
+               rmb();
                desc_status = edmac_to_cpu(mdp, rxdesc->status);
                pkt_len = rxdesc->frame_length;
 
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 
                /* In case of almost all GETHER/ETHERs, the Receive Frame State
                 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
-                * bit 0. However, in case of the R8A7740, R8A779x, and
-                * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+                * bit 0. However, in case of the R8A7740 and R7S72100
+                * the RFS bits are from bit 25 to bit 16. So, the
                 * driver needs right shifting by 16.
                 */
                if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        skb_checksum_none_assert(skb);
                        rxdesc->addr = dma_addr;
                }
+               wmb(); /* RACT bit must be set after all the above writes */
                if (entry >= mdp->num_rx_ring - 1)
                        rxdesc->status |=
                                cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        /* If we don't need to check status, don't. -KDU */
        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
                /* fix the values for the next receiving if RDE is set */
-               if (intr_status & EESR_RDE) {
+               if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
                        u32 count = (sh_eth_read(ndev, RDFAR) -
                                     sh_eth_read(ndev, RDLAR)) >> 4;
 
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        spin_unlock_irqrestore(&mdp->lock, flags);
 
-       if (skb_padto(skb, ETH_ZLEN))
+       if (skb_put_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
 
        entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        txdesc->buffer_length = skb->len;
 
+       wmb(); /* TACT bit must be set after all the above writes */
        if (entry >= mdp->num_tx_ring - 1)
                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
        else
index 34389b6aa67cbd26263366ca1bff769f1b27c68a..9fb6948e14c64ef424c032811364aaefc608d4cb 100644 (file)
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
        u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
        if (enable)
-               val |= 1 << rocker_port->lport;
+               val |= 1ULL << rocker_port->lport;
        else
-               val &= ~(1 << rocker_port->lport);
+               val &= ~(1ULL << rocker_port->lport);
        rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
 }
 
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
 
        alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
        rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+       if (!rocker->ports)
+               return -ENOMEM;
        for (i = 0; i < rocker->port_count; i++) {
                err = rocker_probe_port(rocker, i);
                if (err)
index 6b33127ab352a43ed6a787af7eedde554241e1b3..3449893aea8d402fb2fc56582df92a04aa157c10 100644 (file)
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
     smc->packets_waiting = 0;
 
     smc_reset(dev);
-    init_timer(&smc->media);
-    smc->media.function = media_check;
-    smc->media.data = (u_long) dev;
-    smc->media.expires = jiffies + HZ;
-    add_timer(&smc->media);
+    setup_timer(&smc->media, media_check, (u_long)dev);
+    mod_timer(&smc->media, jiffies + HZ);
 
     return 0;
 } /* smc_open */
index 88a55f95fe09bc544b0acf0e28bae507086788f8..8678e39aba08cfe0d5b3b0578348b258812ec4ec 100644 (file)
@@ -91,6 +91,11 @@ static const char version[] =
 
 #include "smc91x.h"
 
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/assabet.h>
+#include <mach/neponset.h>
+#endif
+
 #ifndef SMC_NOWAIT
 # define SMC_NOWAIT            0
 #endif
@@ -2243,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
        const struct of_device_id *match = NULL;
        struct smc_local *lp;
        struct net_device *ndev;
-       struct resource *res;
+       struct resource *res, *ires;
        unsigned int __iomem *addr;
        unsigned long irq_flags = SMC_IRQ_FLAGS;
-       unsigned long irq_resflags;
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2338,25 +2342,23 @@ static int smc_drv_probe(struct platform_device *pdev)
                goto out_free_netdev;
        }
 
-       ndev->irq = platform_get_irq(pdev, 0);
-       if (ndev->irq <= 0) {
+       ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!ires) {
                ret = -ENODEV;
                goto out_release_io;
        }
-       /*
-        * If this platform does not specify any special irqflags, or if
-        * the resource supplies a trigger, override the irqflags with
-        * the trigger flags from the resource.
-        */
-       irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
-       if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
-               irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
+
+       ndev->irq = ires->start;
+
+       if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
+               irq_flags = ires->flags & IRQF_TRIGGER_MASK;
 
        ret = smc_request_attrib(pdev, ndev);
        if (ret)
                goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
-       neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+       if (machine_is_assabet() && machine_has_neponset())
+               neponset_ncr_set(NCR_ENET_OSC_EN);
 #endif
        platform_set_drvdata(pdev, ndev);
        ret = smc_enable_device(pdev);
index be67baf5f6778d08df4eaa06216914b77ab8f2b5..3a18501d1068c36816554f953e367ff1439c2a36 100644 (file)
  * Define your architecture specific bus configuration parameters here.
  */
 
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
-    defined(CONFIG_MACH_MAINSTONE) ||\
-    defined(CONFIG_MACH_ZYLONITE) ||\
-    defined(CONFIG_MACH_LITTLETON) ||\
-    defined(CONFIG_MACH_ZYLONITE2) ||\
-    defined(CONFIG_ARCH_VIPER) ||\
-    defined(CONFIG_MACH_STARGATE2) ||\
-    defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
 
 #include <asm/mach-types.h>
 
 /* We actually can't write halfwords properly if not word aligned */
 static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 {
-       if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
-               unsigned int v = val << 16;
-               v |= readl(ioaddr + (reg & ~2)) & 0xffff;
-               writel(v, ioaddr + (reg & ~2));
-       } else {
-               writew(val, ioaddr + reg);
-       }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, (l))
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS          (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      0
-#define SMC_CAN_USE_32BIT      0
-#define SMC_NOWAIT             1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT           2
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS          (-1)    /* from resource */
-
-#elif  defined(CONFIG_MACH_LOGICPD_PXA270) ||  \
-       defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT       0
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#elif  defined(CONFIG_ARCH_INNOKOM) || \
-       defined(CONFIG_ARCH_PXA_IDP) || \
-       defined(CONFIG_ARCH_RAMSES) || \
-       defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      1
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-#define SMC_USE_PXA_DMA                1
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_inl(a, r)          readl((a) + (r))
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_outl(v, a, r)      writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l)  writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS          (-1)    /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
-       if (reg & 2) {
+       if ((machine_is_mainstone() || machine_is_stargate2() ||
+            machine_is_pxa_idp()) && reg & 2) {
                unsigned int v = val << 16;
                v |= readl(ioaddr + (reg & ~2)) & 0xffff;
                writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 #define RPC_LSA_DEFAULT         RPC_LED_100_10
 #define RPC_LSB_DEFAULT         RPC_LED_TX_RX
 
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT       0
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_NOWAIT             1
-
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS          IRQF_TRIGGER_HIGH
-
 #elif defined(CONFIG_COLDFIRE)
 
 #define SMC_CAN_USE_8BIT       0
index 55e89b3838f1cb60df3f2f751ba254eddbef8fa2..a0ea84fe6519badffb8b5cabf0e9892d135ea081 100644 (file)
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
-                       init_timer(&priv->eee_ctrl_timer);
-                       priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
-                       priv->eee_ctrl_timer.data = (unsigned long)priv;
-                       priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
-                       add_timer(&priv->eee_ctrl_timer);
+                       setup_timer(&priv->eee_ctrl_timer,
+                                   stmmac_eee_ctrl_timer,
+                                   (unsigned long)priv);
+                       mod_timer(&priv->eee_ctrl_timer,
+                                 STMMAC_LPI_T(eee_timer));
 
                        priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
index fb846ebba1d9b0860acf920356aedfaf9d967f27..f9b42f11950f74dd38c41a9c535a14f94c51318b 100644 (file)
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
        struct stmmac_priv *priv = NULL;
        struct plat_stmmacenet_data *plat_dat = NULL;
        const char *mac = NULL;
+       int irq, wol_irq, lpi_irq;
+
+       /* Get IRQ information early to have an ability to ask for deferred
+        * probe if needed before we went too far with resource allocation.
+        */
+       irq = platform_get_irq_byname(pdev, "macirq");
+       if (irq < 0) {
+               if (irq != -EPROBE_DEFER) {
+                       dev_err(dev,
+                               "MAC IRQ configuration information not found\n");
+               }
+               return irq;
+       }
+
+       /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+        * The external wake up irq can be passed through the platform code
+        * named as "eth_wake_irq"
+        *
+        * In case the wake up interrupt is not passed from the platform
+        * so the driver will continue to use the mac irq (ndev->irq)
+        */
+       wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (wol_irq < 0) {
+               if (wol_irq == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               wol_irq = irq;
+       }
+
+       lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+       if (lpi_irq == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
                return PTR_ERR(priv);
        }
 
+       /* Copy IRQ values to priv structure which is now avaialble */
+       priv->dev->irq = irq;
+       priv->wol_irq = wol_irq;
+       priv->lpi_irq = lpi_irq;
+
        /* Get MAC address if available (DT) */
        if (mac)
                memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
 
-       /* Get the MAC information */
-       priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
-       if (priv->dev->irq < 0) {
-               if (priv->dev->irq != -EPROBE_DEFER) {
-                       netdev_err(priv->dev,
-                                  "MAC IRQ configuration information not found\n");
-               }
-               return priv->dev->irq;
-       }
-
-       /*
-        * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
-        * The external wake up irq can be passed through the platform code
-        * named as "eth_wake_irq"
-        *
-        * In case the wake up interrupt is not passed from the platform
-        * so the driver will continue to use the mac irq (ndev->irq)
-        */
-       priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-       if (priv->wol_irq < 0) {
-               if (priv->wol_irq == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               priv->wol_irq = priv->dev->irq;
-       }
-
-       priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-       if (priv->lpi_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
-
        platform_set_drvdata(pdev, priv->dev);
 
        pr_debug("STMMAC platform driver registration completed");
index 4b51f903fb733cba9b9b8a3fe9539fe3bc811c84..0c5842aeb807014c632a2d713b366133d7021f56 100644 (file)
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
                *flow_type = IP_USER_FLOW;
                break;
        default:
-               return 0;
+               return -EINVAL;
        }
 
-       return 1;
+       return 0;
 }
 
 static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
        class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
                TCAM_V4KEY0_CLASS_CODE_SHIFT;
        ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
        if (ret < 0) {
                netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
                            parent->index);
-               ret = -EINVAL;
                goto out;
        }
 
index 7d8dd0d2182ef9f8d94d1e84b3c7a45f3364347c..a1bbaf6352ba379d209c7fc5cac33c8bfcbcb347 100644 (file)
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
        cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                           port_mask, ALE_VLAN, slave->port_vlan, 0);
        cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-               priv->host_port, ALE_VLAN, slave->port_vlan);
+               priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cpsw_suspend(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
        }
        return 0;
 }
+#endif
 
-static const struct dev_pm_ops cpsw_pm_ops = {
-       .suspend        = cpsw_suspend,
-       .resume         = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
 static const struct of_device_id cpsw_of_mtable[] = {
        { .compatible = "ti,cpsw", },
index 98655b44b97e2d7690ef2fa28156730697098d2b..c00084d689f3ba99fe846c2e50f5b21daec73189 100644 (file)
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int davinci_mdio_suspend(struct device *dev)
 {
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops davinci_mdio_pm_ops = {
-       .suspend_late   = davinci_mdio_suspend,
-       .resume_early   = davinci_mdio_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
 };
 
 #if IS_ENABLED(CONFIG_OF)
index a495931a66a1f217216cdb90a6b6989881039f32..0e0fbb5842b3d25e0d18ebaedfaeff60b006c1b7 100644 (file)
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
        }
 
        if (rx_count < budget) {
+               napi_complete(napi);
                w5100_write(priv, W5100_IMR, IR_S0);
                mmiowb();
-               napi_complete(napi);
        }
 
        return rx_count;
index 09322d9db5785ccb622a3ee3ec854442576bd537..4b310002258d0742031f4bcb36d395dd992d0a3d 100644 (file)
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
        }
 
        if (rx_count < budget) {
+               napi_complete(napi);
                w5300_write(priv, W5300_IMR, IR_S0);
                mmiowb();
-               napi_complete(napi);
        }
 
        return rx_count;
index f7e0f0f7c2e27dd19b2cbc674644cd4678074c2c..9e16a2819d4850938389c924e18f56184fedd946 100644 (file)
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
        int i;
        static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
-       if (dev->flags & IFF_ALLMULTI) {
+       if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
                for (i = 0; i < ETH_ALEN; i++) {
                        __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
                        __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
index e40fdfccc9c10df4ea8676a1dd59275d5d9c6b88..27ecc5c4fa2665cd42ac1ca81717255f85507113 100644 (file)
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
        } /* else everything is zero */
 }
 
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                                struct iov_iter *from, int noblock)
 {
-       int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+       int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
        struct sk_buff *skb;
        struct macvlan_dev *vlan;
        unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                        linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
        }
 
-       skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+       skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
                                linear, noblock, &err);
        if (!skb)
                goto err;
index 9e3af54c90102a2c113596d326d893670b7e6c24..32efbd48f32642ddabb21126384b0c21e160a403 100644 (file)
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XGBE_PHY_CDR_RATE_PROPERTY     "amd,serdes-cdr-rate"
 #define XGBE_PHY_PQ_SKEW_PROPERTY      "amd,serdes-pq-skew"
 #define XGBE_PHY_TX_AMP_PROPERTY       "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY      "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY      "amd,serdes-dfe-tap-enable"
 
 #define XGBE_PHY_SPEEDS                        3
 #define XGBE_PHY_SPEED_1000            0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_10000_BLWC               0
 #define SPEED_10000_CDR                        0x7
 #define SPEED_10000_PLL                        0x1
-#define SPEED_10000_PQ                 0x1e
+#define SPEED_10000_PQ                 0x12
 #define SPEED_10000_RATE               0x0
 #define SPEED_10000_TXAMP              0xa
 #define SPEED_10000_WORD               0x7
+#define SPEED_10000_DFE_TAP_CONFIG     0x1
+#define SPEED_10000_DFE_TAP_ENABLE     0x7f
 
 #define SPEED_2500_BLWC                        1
 #define SPEED_2500_CDR                 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_2500_RATE                        0x1
 #define SPEED_2500_TXAMP               0xf
 #define SPEED_2500_WORD                        0x1
+#define SPEED_2500_DFE_TAP_CONFIG      0x3
+#define SPEED_2500_DFE_TAP_ENABLE      0x0
 
 #define SPEED_1000_BLWC                        1
 #define SPEED_1000_CDR                 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_1000_RATE                        0x3
 #define SPEED_1000_TXAMP               0xf
 #define SPEED_1000_WORD                        0x1
+#define SPEED_1000_DFE_TAP_CONFIG      0x3
+#define SPEED_1000_DFE_TAP_ENABLE      0x0
 
 /* SerDes RxTx register offsets */
+#define RXTX_REG6                      0x0018
 #define RXTX_REG20                     0x0050
+#define RXTX_REG22                     0x0058
 #define RXTX_REG114                    0x01c8
+#define RXTX_REG129                    0x0204
 
 /* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX     8
+#define RXTX_REG6_RESETB_RXD_WIDTH     1
 #define RXTX_REG20_BLWC_ENA_INDEX      2
 #define RXTX_REG20_BLWC_ENA_WIDTH      1
 #define RXTX_REG114_PQ_REG_INDEX       9
 #define RXTX_REG114_PQ_REG_WIDTH       7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
 
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
        SPEED_10000_TXAMP,
 };
 
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+       SPEED_1000_DFE_TAP_CONFIG,
+       SPEED_2500_DFE_TAP_CONFIG,
+       SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+       SPEED_1000_DFE_TAP_ENABLE,
+       SPEED_2500_DFE_TAP_ENABLE,
+       SPEED_10000_DFE_TAP_ENABLE,
+};
+
 enum amd_xgbe_phy_an {
        AMD_XGBE_AN_READY = 0,
        AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
        u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
        u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
        u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+       u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+       u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
 
        /* Auto-negotiation state machine support */
        struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
                status = XSIR0_IOREAD(priv, SIR0_STATUS);
                if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
                    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
-                       return;
+                       goto rx_reset;
        }
 
        netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
                   status);
+
+rx_reset:
+       /* Perform Rx reset for the DFE changes */
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
 }
 
 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                       sizeof(priv->serdes_tx_amp));
        }
 
+       if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_DFE_CFG_PROPERTY,
+                                                    priv->serdes_dfe_tap_cfg,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_DFE_CFG_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_dfe_tap_cfg,
+                      amd_xgbe_phy_serdes_dfe_tap_cfg,
+                      sizeof(priv->serdes_dfe_tap_cfg));
+       }
+
+       if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_DFE_ENA_PROPERTY,
+                                                    priv->serdes_dfe_tap_ena,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_DFE_ENA_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_dfe_tap_ena,
+                      amd_xgbe_phy_serdes_dfe_tap_ena,
+                      sizeof(priv->serdes_dfe_tap_ena));
+       }
+
        phydev->priv = priv;
 
        if (!priv->adev || acpi_disabled)
index cdcac6aa4260b32927d7c903e024b42e5d17861e..52cd8db2c57daad2767dec72149f4cdabbcf6917 100644 (file)
@@ -235,6 +235,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
        return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
 }
 
+/**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ *                  speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+       unsigned int idx;
+
+       idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+       return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+               (settings[idx].setting & features);
+}
+
 /**
  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  * @phydev: the target phy_device struct
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
                int status;
-               unsigned int idx;
 
                /* Read phy status to properly get the right settings */
                status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
-               idx = phy_find_setting(phydev->speed, phydev->duplex);
-               if (!(lp & adv & settings[idx].setting))
+               if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
                        goto eee_exit_err;
 
                if (clk_stop_enable) {
index 0e62274e884a89de170d668795b40c3620d41046..7d394846afc214900f9e85baa2130f6f6121ac33 100644 (file)
@@ -43,9 +43,7 @@
 
 static struct team_port *team_port_get_rcu(const struct net_device *dev)
 {
-       struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
-       return team_port_exists(dev) ? port : NULL;
+       return rcu_dereference(dev->rx_handler_data);
 }
 
 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
@@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
        if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-       rcu_read_lock();
-       list_for_each_entry_rcu(port, &team->port_list, list)
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list)
                if (team->ops.port_change_dev_addr)
                        team->ops.port_change_dev_addr(team, port);
-       rcu_read_unlock();
+       mutex_unlock(&team->lock);
        return 0;
 }
 
index 3bd9678315ad651beccd96ca1b5d74bcd1e68c1e..7ba8d0885f120156c47f44884212a2fd73f604b9 100644 (file)
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
            * Linksys USB200M
            * Netgear FA120
            * Sitecom LN-029
+           * Sitecom LN-028
            * Intellinet USB 2.0 Ethernet
            * ST Lab USB 2.0 Ethernet
            * TrendNet TU2-ET100
index bf49792062a2b40c2f1bd2f5a06e6eff8954ab90..1173a24feda38c3af236c84acaf8982f39c0e0b1 100644 (file)
@@ -978,6 +978,10 @@ static const struct usb_device_id  products [] = {
        // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
        USB_DEVICE (0x0df6, 0x0056),
        .driver_info =  (unsigned long) &ax88178_info,
+}, {
+       // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+       USB_DEVICE (0x0df6, 0x061c),
+       .driver_info =  (unsigned long) &ax88178_info,
 }, {
        // corega FEther USB2-TX
        USB_DEVICE (0x07aa, 0x0017),
index 3eed708a6182e761fb79d197eadfbcbc028e5afc..fe48f4c513730fa485aaf31dcf8be8ed119736f8 100644 (file)
@@ -300,9 +300,18 @@ static const struct driver_info    cx82310_info = {
        .tx_fixup       = cx82310_tx_fixup,
 };
 
+#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+                      USB_DEVICE_ID_MATCH_DEV_INFO, \
+       .idVendor = (vend), \
+       .idProduct = (prod), \
+       .bDeviceClass = (cl), \
+       .bDeviceSubClass = (sc), \
+       .bDeviceProtocol = (pr)
+
 static const struct usb_device_id products[] = {
        {
-               USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
+               USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
                .driver_info = (unsigned long) &cx82310_info
        },
        { },
index 9cdfb3fe9c156ba775d41a9d6d343ddbb5fc9b60..778e91531fac7f35480208ba35f6ae3e6c9ad5b2 100644 (file)
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
                }
                cprev = cnow;
        }
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        remove_wait_queue(&tiocmget->waitq, &wait);
 
        return ret;
index 3d18bb0eee8528ece6509ec3848c3044ff5804ce..1bfe0fcaccf5ba31bf125f898ec6c624f506206e 100644 (file)
@@ -134,6 +134,11 @@ static const struct usb_device_id  products [] = {
 }, {
        USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
        .driver_info =  (unsigned long) &prolific_info,
+}, {
+       USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
+                                        * Host-to-Host Cable
+                                        */
+       .driver_info =  (unsigned long) &prolific_info,
 },
 
        { },            // END
index f1ff3666f090d886e6b8ecc99bc6ac09d8687ef6..59b0e9754ae39cbc38812d407688f66f5e79b539 100644 (file)
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
 {
        int i;
 
-       for (i = 0; i < vi->max_queue_pairs; i++)
+       for (i = 0; i < vi->max_queue_pairs; i++) {
+               napi_hash_del(&vi->rq[i].napi);
                netif_napi_del(&vi->rq[i].napi);
+       }
 
        kfree(vi->rq);
        kfree(vi->sq);
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
        cancel_delayed_work_sync(&vi->refill);
 
        if (netif_running(vi->dev)) {
-               for (i = 0; i < vi->max_queue_pairs; i++) {
+               for (i = 0; i < vi->max_queue_pairs; i++)
                        napi_disable(&vi->rq[i].napi);
-                       napi_hash_del(&vi->rq[i].napi);
-                       netif_napi_del(&vi->rq[i].napi);
-               }
        }
 
        remove_vq_common(vi);
index 1e0a775ea882995d88127e4d3c2a8f3f0afb8d60..f8528a4cf54f2b0bc78b78bd705a82577530bdeb 100644 (file)
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                        goto drop;
 
                flags &= ~VXLAN_HF_RCO;
-               vni &= VXLAN_VID_MASK;
+               vni &= VXLAN_VNI_MASK;
        }
 
        /* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                flags &= ~VXLAN_GBP_USED_BITS;
        }
 
-       if (flags || (vni & ~VXLAN_VID_MASK)) {
+       if (flags || vni & ~VXLAN_VNI_MASK) {
                /* If there are any unprocessed flags remaining treat
                 * this as a malformed packet. This behavior diverges from
                 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
index 83c39e2858bf70a1673cf2c6d9813a92f25ce4d3..88d121d43c08bedf2efc3265964188cf2b7f94a7 100644 (file)
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
        spin_lock_irqsave(&cosa->lock, flags);
        add_wait_queue(&chan->rxwaitq, &wait);
        while (!chan->rx_status) {
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(&cosa->lock, flags);
                schedule();
                spin_lock_irqsave(&cosa->lock, flags);
                if (signal_pending(current) && chan->rx_status == 0) {
                        chan->rx_status = 1;
                        remove_wait_queue(&chan->rxwaitq, &wait);
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        mutex_unlock(&chan->rlock);
                        return -ERESTARTSYS;
                }
        }
        remove_wait_queue(&chan->rxwaitq, &wait);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        kbuf = chan->rxdata;
        count = chan->rxsize;
        spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
        spin_lock_irqsave(&cosa->lock, flags);
        add_wait_queue(&chan->txwaitq, &wait);
        while (!chan->tx_status) {
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(&cosa->lock, flags);
                schedule();
                spin_lock_irqsave(&cosa->lock, flags);
                if (signal_pending(current) && chan->tx_status == 0) {
                        chan->tx_status = 1;
                        remove_wait_queue(&chan->txwaitq, &wait);
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        chan->tx_status = 1;
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
                }
        }
        remove_wait_queue(&chan->txwaitq, &wait);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        up(&chan->wsem);
        spin_unlock_irqrestore(&cosa->lock, flags);
        kfree(kbuf);
index ccbdb05b28cd7e2dc457afe9443e35ac2ca3fc21..75345c1e8c3487468a23611a3072d525cdc8b8ca 100644 (file)
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
        case 0x432a: /* BCM4321 */
        case 0x432d: /* BCM4322 */
        case 0x4352: /* BCM43222 */
+       case 0x435a: /* BCM43228 */
        case 0x4333: /* BCM4331 */
        case 0x43a2: /* BCM4360 */
        case 0x43b3: /* BCM4352 */
index 50cdf7090198b3662b83488ff52e3868162401c5..8eff2753abadeb2704f87f88f7c5eabd75bf091b 100644 (file)
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
        void *dcmd_buf = NULL, *wr_pointer;
        u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
 
-       brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
-                 cmdhdr->len);
+       if (len < sizeof(*cmdhdr)) {
+               brcmf_err("vendor command too short: %d\n", len);
+               return -EINVAL;
+       }
 
        vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
        ifp = vif->ifp;
 
-       len -= sizeof(struct brcmf_vndr_dcmd_hdr);
+       brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
+
+       if (cmdhdr->offset > len) {
+               brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
+               return -EINVAL;
+       }
+
+       len -= cmdhdr->offset;
        ret_len = cmdhdr->len;
        if (ret_len > 0 || len > 0) {
                if (len > BRCMF_DCMD_MAXLEN) {
index c3817fae16c04207136e5d45e8cc65bd3a125429..06f6cc08f451cadc7696c5b1810735288a85b1a9 100644 (file)
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
        .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION,  \
        .base_params = &iwl1000_base_params,                    \
        .eeprom_params = &iwl1000_eeprom_params,                \
-       .led_mode = IWL_LED_BLINK
+       .led_mode = IWL_LED_BLINK,                              \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl1000_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
        .base_params = &iwl1000_base_params,                    \
        .eeprom_params = &iwl1000_eeprom_params,                \
        .led_mode = IWL_LED_RF_STATE,                           \
-       .rx_with_siso_diversity = true
+       .rx_with_siso_diversity = true,                         \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl100_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
index 21e5d0843a62a84a0f21ff337d1b674750fa3999..890b95f497d6eca97949a5e3989926d5b4d17d38 100644 (file)
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
        .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,          \
        .base_params = &iwl2000_base_params,                    \
        .eeprom_params = &iwl20x0_eeprom_params,                \
-       .led_mode = IWL_LED_RF_STATE
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+
 
 const struct iwl_cfg iwl2000_2bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
        .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,  \
        .base_params = &iwl2030_base_params,                    \
        .eeprom_params = &iwl20x0_eeprom_params,                \
-       .led_mode = IWL_LED_RF_STATE
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl2030_2bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
        .base_params = &iwl2000_base_params,                    \
        .eeprom_params = &iwl20x0_eeprom_params,                \
        .led_mode = IWL_LED_RF_STATE,                           \
-       .rx_with_siso_diversity = true
+       .rx_with_siso_diversity = true,                         \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl105_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
        .base_params = &iwl2030_base_params,                    \
        .eeprom_params = &iwl20x0_eeprom_params,                \
        .led_mode = IWL_LED_RF_STATE,                           \
-       .rx_with_siso_diversity = true
+       .rx_with_siso_diversity = true,                         \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl135_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
index 332bbede39e5b0fc6bb25b7ab30bbbd22c929b8b..724194e234141e707dac6eb8ec57563435957aae 100644 (file)
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
        .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION,  \
        .base_params = &iwl5000_base_params,                    \
        .eeprom_params = &iwl5000_eeprom_params,                \
-       .led_mode = IWL_LED_BLINK
+       .led_mode = IWL_LED_BLINK,                              \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl5300_agn_cfg = {
        .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
        .base_params = &iwl5000_base_params,                    \
        .eeprom_params = &iwl5000_eeprom_params,                \
        .led_mode = IWL_LED_BLINK,                              \
-       .internal_wimax_coex = true
+       .internal_wimax_coex = true,                            \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl5150_agn_cfg = {
        .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
index 8f2c3c8c6b843f78f346225d371ee3ad3df54f23..21b2630763dc933db72f54fd7fadba9244d6b449 100644 (file)
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
        .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION,  \
        .base_params = &iwl6000_g2_base_params,                 \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .led_mode = IWL_LED_RF_STATE
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6005_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
        .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,  \
        .base_params = &iwl6000_g2_base_params,                 \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .led_mode = IWL_LED_RF_STATE
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6030_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
        .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,  \
        .base_params = &iwl6000_g2_base_params,                 \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .led_mode = IWL_LED_RF_STATE
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6035_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
        .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,  \
        .base_params = &iwl6000_base_params,                    \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .led_mode = IWL_LED_BLINK
+       .led_mode = IWL_LED_BLINK,                              \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6000i_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
        .base_params = &iwl6050_base_params,                    \
        .eeprom_params = &iwl6000_eeprom_params,                \
        .led_mode = IWL_LED_BLINK,                              \
-       .internal_wimax_coex = true
+       .internal_wimax_coex = true,                            \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6050_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
        .base_params = &iwl6050_base_params,                    \
        .eeprom_params = &iwl6000_eeprom_params,                \
        .led_mode = IWL_LED_BLINK,                              \
-       .internal_wimax_coex = true
+       .internal_wimax_coex = true,                            \
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6150_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
index 1ec4d55155f7d72fecdbe19db62dd36546bb05e0..7810c41cf9a7300e0d3b9bc16bca22a44188b463 100644 (file)
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        if (!vif->bss_conf.assoc)
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-       if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
+       if (mvmvif->phy_ctxt &&
+           IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
                               mvmvif->phy_ctxt->id))
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
index d530ef3da1071e5a421db3f4c072504b3571d8e3..542ee74f290aec1a3f356b3c69718b5029c4e708 100644 (file)
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        if (!vif->bss_conf.assoc)
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-       if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
+       if (mvmvif->phy_ctxt &&
+           data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
        IWL_DEBUG_COEX(data->mvm,
index 1ff7ec08532d113aa5be85cb59eed8100e3f7eed..09654e73a533f7733c2a5b1e47bf6251d6267585 100644 (file)
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
 
-               if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
+               if ((mvm->fw->ucode_capa.capa[0] &
+                    IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+                   (mvm->fw->ucode_capa.api[0] &
+                    IWL_UCODE_TLV_API_LQ_SS_PARAMS))
                        hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
                                IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
        }
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       iwl_mvm_cancel_scan(mvm);
+       /* Due to a race condition, it's possible that mac80211 asks
+        * us to stop a hw_scan when it's already stopped.  This can
+        * happen, for instance, if we stopped the scan ourselves,
+        * called ieee80211_scan_completed() and the userspace called
+        * cancel scan scan before ieee80211_scan_work() could run.
+        * To handle that, simply return if the scan is not running.
+       */
+       /* FIXME: for now, we ignore this race for UMAC scans, since
+        * they don't set the scan_status.
+        */
+       if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
+           (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+               iwl_mvm_cancel_scan(mvm);
 
        mutex_unlock(&mvm->mutex);
 }
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
        int ret;
 
        mutex_lock(&mvm->mutex);
+
+       /* Due to a race condition, it's possible that mac80211 asks
+        * us to stop a sched_scan when it's already stopped.  This
+        * can happen, for instance, if we stopped the scan ourselves,
+        * called ieee80211_sched_scan_stopped() and the userspace called
+        * stop sched scan scan before ieee80211_sched_scan_stopped_work()
+        * could run.  To handle this, simply return if the scan is
+        * not running.
+       */
+       /* FIXME: for now, we ignore this race for UMAC scans, since
+        * they don't set the scan_status.
+        */
+       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
+           !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+               mutex_unlock(&mvm->mutex);
+               return 0;
+       }
+
        ret = iwl_mvm_scan_offload_stop(mvm, false);
        mutex_unlock(&mvm->mutex);
        iwl_mvm_wait_for_async_handlers(mvm);
 
        return ret;
-
 }
 
 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
index 194bd1f939ca3c6a42e9c4a9d6cc7eb843c2d256..efa9688a4cf11150f6d1e92c71e0a367d90fea87 100644 (file)
@@ -134,9 +134,12 @@ enum rs_column_mode {
 #define MAX_NEXT_COLUMNS 7
 #define MAX_COLUMN_CHECKS 3
 
+struct rs_tx_column;
+
 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta,
-                                    struct iwl_scale_tbl_info *tbl);
+                                    struct iwl_scale_tbl_info *tbl,
+                                    const struct rs_tx_column *next_col);
 
 struct rs_tx_column {
        enum rs_column_mode mode;
@@ -147,13 +150,15 @@ struct rs_tx_column {
 };
 
 static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl)
+                        struct iwl_scale_tbl_info *tbl,
+                        const struct rs_tx_column *next_col)
 {
-       return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
+       return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
 }
 
 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl)
+                         struct iwl_scale_tbl_info *tbl,
+                         const struct rs_tx_column *next_col)
 {
        if (!sta->ht_cap.ht_supported)
                return false;
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl)
+                         struct iwl_scale_tbl_info *tbl,
+                         const struct rs_tx_column *next_col)
 {
        if (!sta->ht_cap.ht_supported)
                return false;
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl)
+                        struct iwl_scale_tbl_info *tbl,
+                        const struct rs_tx_column *next_col)
 {
        struct rs_rate *rate = &tbl->rate;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 
                for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
                        allow_func = next_col->checks[j];
-                       if (allow_func && !allow_func(mvm, sta, tbl))
+                       if (allow_func && !allow_func(mvm, sta, tbl, next_col))
                                break;
                }
 
index 7e9aa3cb325401fcf4105e278294b11657a90df1..c47c8051da7770f2a2a89c9b33868151c62f0ff6 100644 (file)
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
        if (mvm->scan_status == IWL_MVM_SCAN_NONE)
                return 0;
 
-       if (iwl_mvm_is_radio_killed(mvm))
+       if (iwl_mvm_is_radio_killed(mvm)) {
+               ret = 0;
                goto out;
+       }
 
        if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
            (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
                IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
                               sched ? "offloaded " : "", ret);
                iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
-               return ret;
+               goto out;
        }
 
        IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
                       sched ? "offloaded " : "");
 
        ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-       if (ret)
-               return ret;
-
+out:
        /*
         * Clear the scan status so the next scan requests will succeed. This
         * also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
        if (mvm->scan_status == IWL_MVM_SCAN_OS)
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 
-out:
        mvm->scan_status = IWL_MVM_SCAN_NONE;
 
        if (notify) {
@@ -1177,7 +1176,7 @@ out:
                        ieee80211_scan_completed(mvm->hw, true);
        }
 
-       return 0;
+       return ret;
 }
 
 static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
index 54fafbf9a711fb9bacb3149fcb9f8ce1826c4b90..f8d6f306dd76d276b82056c9fb1a74956e78ac3c 100644 (file)
@@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
         * request
         */
        list_for_each_entry(te_data, &mvm->time_event_list, list) {
-               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
-                   te_data->running) {
+               if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
                        is_p2p = true;
                        goto remove_te;
@@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
         * request
         */
        list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
-               if (te_data->running) {
-                       mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-                       goto remove_te;
-               }
+               mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+               goto remove_te;
        }
 
 remove_te:
index 4a4c6586a8d2dcda2b6f49a5b767bcc48304e138..8908be6dbc48233db9183247e1928bfab6eaa491 100644 (file)
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
-       genlmsg_unicast(&init_net, skb, dst_portid);
+       if (genlmsg_unicast(&init_net, skb, dst_portid))
+               goto err_free_txskb;
 
        /* Enqueue the packet */
        skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
        return;
 
 nla_put_failure:
+       nlmsg_free(skb);
+err_free_txskb:
        printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
        ieee80211_free_txskb(hw, my_skb);
        data->tx_failed++;
index 1d46774607116af89f96c6e835cb84775ddaa8ca..074f716020aae4e28d3e8340da846514065a94db 100644 (file)
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                }
 
                return true;
-       } else if (0x86DD == ether_type) {
-               return true;
+       } else if (ETH_P_IPV6 == ether_type) {
+               /* TODO: Handle any IPv6 cases that need special handling.
+                * For now, always return false
+                */
+               goto end;
        }
 
 end:
index f38227afe0998a668c40e2194def6128895bb077..3aa8648080c8dee1133b9faf0b6ac0c2ad538a9b 100644 (file)
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
        unsigned int num_queues = vif->num_queues;
        int i;
        unsigned int queue_index;
-       struct xenvif_stats *vif_stats;
 
        for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
                unsigned long accum = 0;
                for (queue_index = 0; queue_index < num_queues; ++queue_index) {
-                       vif_stats = &vif->queues[queue_index].stats;
+                       void *vif_stats = &vif->queues[queue_index].stats;
                        accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
                }
                data[i] = accum;
index f7a31d2cb3f1819fdf7ebdeb40e0f6bf44aabe0a..997cf0901ac2dc50d92f26ea2c5c4e51511b8362 100644 (file)
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 static void make_tx_response(struct xenvif_queue *queue,
                             struct xen_netif_tx_request *txp,
                             s8       st);
+static void push_tx_responses(struct xenvif_queue *queue);
 
 static inline int tx_work_todo(struct xenvif_queue *queue);
 
@@ -657,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
        do {
                spin_lock_irqsave(&queue->response_lock, flags);
                make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+               push_tx_responses(queue);
                spin_unlock_irqrestore(&queue->response_lock, flags);
                if (cons == end)
                        break;
@@ -1343,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
 {
        unsigned int offset = skb_headlen(skb);
        skb_frag_t frags[MAX_SKB_FRAGS];
-       int i;
+       int i, f;
        struct ubuf_info *uarg;
        struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
 
@@ -1383,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
                frags[i].page_offset = 0;
                skb_frag_size_set(&frags[i], len);
        }
-       /* swap out with old one */
-       memcpy(skb_shinfo(skb)->frags,
-              frags,
-              i * sizeof(skb_frag_t));
-       skb_shinfo(skb)->nr_frags = i;
-       skb->truesize += i * PAGE_SIZE;
 
-       /* remove traces of mapped pages and frag_list */
+       /* Copied all the bits from the frag list -- free it. */
        skb_frag_list_init(skb);
+       xenvif_skb_zerocopy_prepare(queue, nskb);
+       kfree_skb(nskb);
+
+       /* Release all the original (foreign) frags. */
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+               skb_frag_unref(skb, f);
        uarg = skb_shinfo(skb)->destructor_arg;
        /* increase inflight counter to offset decrement in callback */
        atomic_inc(&queue->inflight_packets);
        uarg->callback(uarg, true);
        skb_shinfo(skb)->destructor_arg = NULL;
 
-       xenvif_skb_zerocopy_prepare(queue, nskb);
-       kfree_skb(nskb);
+       /* Fill the skb with the new (local) frags. */
+       memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
+       skb_shinfo(skb)->nr_frags = i;
+       skb->truesize += i * PAGE_SIZE;
 
        return 0;
 }
@@ -1652,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
        unsigned long flags;
 
        pending_tx_info = &queue->pending_tx_info[pending_idx];
+
        spin_lock_irqsave(&queue->response_lock, flags);
+
        make_tx_response(queue, &pending_tx_info->req, status);
-       index = pending_index(queue->pending_prod);
+
+       /* Release the pending index before pusing the Tx response so
+        * its available before a new Tx request is pushed by the
+        * frontend.
+        */
+       index = pending_index(queue->pending_prod++);
        queue->pending_ring[index] = pending_idx;
-       /* TX shouldn't use the index before we give it back here */
-       mb();
-       queue->pending_prod++;
+
+       push_tx_responses(queue);
+
        spin_unlock_irqrestore(&queue->response_lock, flags);
 }
 
@@ -1669,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue,
 {
        RING_IDX i = queue->tx.rsp_prod_pvt;
        struct xen_netif_tx_response *resp;
-       int notify;
 
        resp = RING_GET_RESPONSE(&queue->tx, i);
        resp->id     = txp->id;
@@ -1679,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue,
                RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
 
        queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+       int notify;
+
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
        if (notify)
                notify_remote_via_irq(queue->tx_irq);
index 38d1c51f58b108dbee2b2a2c83f970f60471c569..7bcaeec876c0c3a5ea80a01d13b13b186bb2227f 100644 (file)
@@ -84,8 +84,7 @@ config OF_RESOLVE
        bool
 
 config OF_OVERLAY
-       bool
-       depends on OF
+       bool "Device Tree overlays"
        select OF_DYNAMIC
        select OF_RESOLVE
 
index 0a8aeb8523fe7d54a66207f25eb145e88addb90d..adb8764861c02893d1e965bf077c0c9706abdadb 100644 (file)
@@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
                                                const char *path)
 {
        struct device_node *child;
-       int len = strchrnul(path, '/') - path;
-       int term;
+       int len;
+       const char *end;
 
+       end = strchr(path, ':');
+       if (!end)
+               end = strchrnul(path, '/');
+
+       len = end - path;
        if (!len)
                return NULL;
 
-       term = strchrnul(path, ':') - path;
-       if (term < len)
-               len = term;
-
        __for_each_child_of_node(parent, child) {
                const char *name = strrchr(child->full_name, '/');
                if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
 
        /* The path could begin with an alias */
        if (*path != '/') {
-               char *p = strchrnul(path, '/');
-               int len = separator ? separator - path : p - path;
+               int len;
+               const char *p = separator;
+
+               if (!p)
+                       p = strchrnul(path, '/');
+               len = p - path;
 
                /* of_aliases must not be NULL */
                if (!of_aliases)
@@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
                path++; /* Increment past '/' delimiter */
                np = __of_find_node_by_path(np, path);
                path = strchrnul(path, '/');
+               if (separator && separator < path)
+                       break;
        }
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
        return np;
@@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
                        name = of_get_property(of_chosen, "linux,stdout-path", NULL);
                if (IS_ENABLED(CONFIG_PPC) && !name)
                        name = of_get_property(of_aliases, "stdout", NULL);
-               if (name)
+               if (name) {
                        of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
+                       add_preferred_console("stdout-path", 0, NULL);
+               }
        }
 
        if (!of_aliases)
index 352b4f28f82cd729fb842a210e7460ff9f123833..dee9270ba5471e730518b0ddc647469c8a52776f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/idr.h>
 
 #include "of_private.h"
 
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
                struct device_node *target, struct device_node *child)
 {
        const char *cname;
-       struct device_node *tchild, *grandchild;
+       struct device_node *tchild;
        int ret = 0;
 
        cname = kbasename(child->full_name);
index 0cf9a236d438a78c63e0949f9f010f4ba2f199f7..aba8946cac46dddd171aed6ddf6e35f58be152b7 100644 (file)
@@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void)
                 "option path test failed\n");
        of_node_put(np);
 
+       np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
+       selftest(np && !strcmp("test/option", options),
+                "option path test, subcase #1 failed\n");
+       of_node_put(np);
+
        np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
        selftest(np, "NULL option path test failed\n");
        of_node_put(np);
@@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void)
                 "option alias path test failed\n");
        of_node_put(np);
 
+       np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
+                                      &options);
+       selftest(np && !strcmp("test/alias/option", options),
+                "option alias path test, subcase #1 failed\n");
+       of_node_put(np);
+
        np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
        selftest(np, "NULL option alias path test failed\n");
        of_node_put(np);
@@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void)
        rc = of_property_match_string(np, "phandle-list-names", "first");
        selftest(rc == 0, "first expected:0 got:%i\n", rc);
        rc = of_property_match_string(np, "phandle-list-names", "second");
-       selftest(rc == 1, "second expected:0 got:%i\n", rc);
+       selftest(rc == 1, "second expected:1 got:%i\n", rc);
        rc = of_property_match_string(np, "phandle-list-names", "third");
-       selftest(rc == 2, "third expected:0 got:%i\n", rc);
+       selftest(rc == 2, "third expected:2 got:%i\n", rc);
        rc = of_property_match_string(np, "phandle-list-names", "fourth");
        selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
        rc = of_property_match_string(np, "missing-property", "blah");
@@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void)
        struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
        struct of_changeset chgset;
 
-       of_changeset_init(&chgset);
        n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
        selftest(n1, "testcase setup failure\n");
        n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
@@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path)
        return pdev != NULL;
 }
 
-#if IS_ENABLED(CONFIG_I2C)
+#if IS_BUILTIN(CONFIG_I2C)
 
 /* get the i2c client device instantiated at the path */
 static struct i2c_client *of_path_to_i2c_client(const char *path)
@@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void)
                return;
 }
 
-#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
+#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
 
 struct selftest_i2c_bus_data {
        struct platform_device  *pdev;
@@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = {
        .id_table = selftest_i2c_dev_id,
 };
 
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
 
 struct selftest_i2c_mux_data {
        int nchans;
@@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void)
                        "could not register selftest i2c bus driver\n"))
                return ret;
 
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
        ret = i2c_add_driver(&selftest_i2c_mux_driver);
        if (selftest(ret == 0,
                        "could not register selftest i2c mux driver\n"))
@@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void)
 
 static void of_selftest_overlay_i2c_cleanup(void)
 {
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
        i2c_del_driver(&selftest_i2c_mux_driver);
 #endif
        platform_driver_unregister(&selftest_i2c_bus_driver);
@@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void)
        of_selftest_overlay_10();
        of_selftest_overlay_11();
 
-#if IS_ENABLED(CONFIG_I2C)
+#if IS_BUILTIN(CONFIG_I2C)
        if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
                goto out;
 
index 1ec694a52379ea4c753cc1b397c931bc940bb1cc..464bf492ee2ae375e8fe92e0a5446dbdf0461f9d 100644 (file)
@@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
        if (err)
                return err;
 
-       resource_list_for_each_entry(win, res, list) {
+       resource_list_for_each_entry(win, res) {
                struct resource *parent, *res = win->res;
 
                switch (resource_type(res)) {
index aab55474dd0d6a4bd661ccbb160f1c2e8dc3c69c..ee082c0366ecca0d9978f9b23589646a0999a174 100644 (file)
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
        return false;
 }
 
-static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
                              int offset)
 {
        struct xgene_pcie_port *port = bus->sysdata;
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
                return NULL;
 
        xgene_pcie_set_rtdid_reg(bus, devfn);
-       return xgene_pcie_get_cfg_base(bus);
+       return xgene_pcie_get_cfg_base(bus) + offset;
 }
 
 static struct pci_ops xgene_pcie_ops = {
index aa012fb3834b48dbc0001565d453b2dc267009c1..312f23a8429cd9331b45afaf72a278e84bd41d83 100644 (file)
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev,
        struct pci_dev *pdev = to_pci_dev(dev);
        char *driver_override, *old = pdev->driver_override, *cp;
 
-       if (count > PATH_MAX)
+       /* We need to keep extra room for a newline */
+       if (count >= (PAGE_SIZE - 1))
                return -EINVAL;
 
        driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev,
 {
        struct pci_dev *pdev = to_pci_dev(dev);
 
-       return sprintf(buf, "%s\n", pdev->driver_override);
+       return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
 }
 static DEVICE_ATTR_RW(driver_override);
 
index 5afe03e28b911223c8909229d513a7c0e99fef4e..2062c224e32fbe7ee73da7b283205040fbd7164c 100644 (file)
 #define BYT_DIR_MASK           (BIT(1) | BIT(2))
 #define BYT_TRIG_MASK          (BIT(26) | BIT(25) | BIT(24))
 
+#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \
+                                BYT_PIN_MUX)
+#define BYT_VAL_RESTORE_MASK   (BYT_DIR_MASK | BYT_LEVEL)
+
 #define BYT_NGPIO_SCORE                102
 #define BYT_NGPIO_NCORE                28
 #define BYT_NGPIO_SUS          44
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = {
        },
 };
 
+struct byt_gpio_pin_context {
+       u32 conf0;
+       u32 val;
+};
+
 struct byt_gpio {
        struct gpio_chip                chip;
        struct platform_device          *pdev;
        spinlock_t                      lock;
        void __iomem                    *reg_base;
        struct pinctrl_gpio_range       *range;
+       struct byt_gpio_pin_context     *saved_context;
 };
 
 #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip)
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
        return vg->reg_base + reg_offset + reg;
 }
 
-static bool is_special_pin(struct byt_gpio *vg, unsigned offset)
+static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
+{
+       void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+       unsigned long flags;
+       u32 value;
+
+       spin_lock_irqsave(&vg->lock, flags);
+       value = readl(reg);
+       value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+       writel(value, reg);
+       spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
 {
        /* SCORE pin 92-93 */
        if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
                offset >= 92 && offset <= 93)
-               return true;
+               return 1;
 
        /* SUS pin 11-21 */
        if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
                offset >= 11 && offset <= 21)
-               return true;
+               return 1;
 
-       return false;
+       return 0;
 }
 
 static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
        struct byt_gpio *vg = to_byt_gpio(chip);
        void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
-       u32 value;
-       bool special;
+       u32 value, gpio_mux;
 
        /*
         * In most cases, func pin mux 000 means GPIO function.
         * But, some pins may have func pin mux 001 represents
-        * GPIO function. Only allow user to export pin with
-        * func pin mux preset as GPIO function by BIOS/FW.
+        * GPIO function.
+        *
+        * Because there are devices out there where some pins were not
+        * configured correctly we allow changing the mux value from
+        * request (but print out warning about that).
         */
        value = readl(reg) & BYT_PIN_MUX;
-       special = is_special_pin(vg, offset);
-       if ((special && value != 1) || (!special && value)) {
-               dev_err(&vg->pdev->dev,
-                       "pin %u cannot be used as GPIO.\n", offset);
-               return -EINVAL;
+       gpio_mux = byt_get_gpio_mux(vg, offset);
+       if (WARN_ON(gpio_mux != value)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&vg->lock, flags);
+               value = readl(reg) & ~BYT_PIN_MUX;
+               value |= gpio_mux;
+               writel(value, reg);
+               spin_unlock_irqrestore(&vg->lock, flags);
+
+               dev_warn(&vg->pdev->dev,
+                        "pin %u forcibly re-configured as GPIO\n", offset);
        }
 
        pm_runtime_get(&vg->pdev->dev);
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
 static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
 {
        struct byt_gpio *vg = to_byt_gpio(chip);
-       void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
-       u32 value;
-
-       /* clear interrupt triggering */
-       value = readl(reg);
-       value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
-       writel(value, reg);
 
+       byt_gpio_clear_triggering(vg, offset);
        pm_runtime_put(&vg->pdev->dev);
 }
 
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
        value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
                   BYT_TRIG_LVL);
 
-       switch (type) {
-       case IRQ_TYPE_LEVEL_HIGH:
-               value |= BYT_TRIG_LVL;
-       case IRQ_TYPE_EDGE_RISING:
-               value |= BYT_TRIG_POS;
-               break;
-       case IRQ_TYPE_LEVEL_LOW:
-               value |= BYT_TRIG_LVL;
-       case IRQ_TYPE_EDGE_FALLING:
-               value |= BYT_TRIG_NEG;
-               break;
-       case IRQ_TYPE_EDGE_BOTH:
-               value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
-               break;
-       }
        writel(value, reg);
 
+       if (type & IRQ_TYPE_EDGE_BOTH)
+               __irq_set_handler_locked(d->irq, handle_edge_irq);
+       else if (type & IRQ_TYPE_LEVEL_MASK)
+               __irq_set_handler_locked(d->irq, handle_level_irq);
+
        spin_unlock_irqrestore(&vg->lock, flags);
 
        return 0;
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
        struct irq_data *data = irq_desc_get_irq_data(desc);
        struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
        struct irq_chip *chip = irq_data_get_irq_chip(data);
-       u32 base, pin, mask;
+       u32 base, pin;
        void __iomem *reg;
-       u32 pending;
+       unsigned long pending;
        unsigned virq;
-       int looplimit = 0;
 
        /* check from GPIO controller which pin triggered the interrupt */
        for (base = 0; base < vg->chip.ngpio; base += 32) {
-
                reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
-
-               while ((pending = readl(reg))) {
-                       pin = __ffs(pending);
-                       mask = BIT(pin);
-                       /* Clear before handling so we can't lose an edge */
-                       writel(mask, reg);
-
+               pending = readl(reg);
+               for_each_set_bit(pin, &pending, 32) {
                        virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
                        generic_handle_irq(virq);
-
-                       /* In case bios or user sets triggering incorretly a pin
-                        * might remain in "interrupt triggered" state.
-                        */
-                       if (looplimit++ > 32) {
-                               dev_err(&vg->pdev->dev,
-                                       "Gpio %d interrupt flood, disabling\n",
-                                       base + pin);
-
-                               reg = byt_gpio_reg(&vg->chip, base + pin,
-                                                  BYT_CONF0_REG);
-                               mask = readl(reg);
-                               mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS |
-                                         BYT_TRIG_LVL);
-                               writel(mask, reg);
-                               mask = readl(reg); /* flush */
-                               break;
-                       }
                }
        }
        chip->irq_eoi(data);
 }
 
+static void byt_irq_ack(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct byt_gpio *vg = to_byt_gpio(gc);
+       unsigned offset = irqd_to_hwirq(d);
+       void __iomem *reg;
+
+       reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
+       writel(BIT(offset % 32), reg);
+}
+
 static void byt_irq_unmask(struct irq_data *d)
 {
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct byt_gpio *vg = to_byt_gpio(gc);
+       unsigned offset = irqd_to_hwirq(d);
+       unsigned long flags;
+       void __iomem *reg;
+       u32 value;
+
+       spin_lock_irqsave(&vg->lock, flags);
+
+       reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+       value = readl(reg);
+
+       switch (irqd_get_trigger_type(d)) {
+       case IRQ_TYPE_LEVEL_HIGH:
+               value |= BYT_TRIG_LVL;
+       case IRQ_TYPE_EDGE_RISING:
+               value |= BYT_TRIG_POS;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               value |= BYT_TRIG_LVL;
+       case IRQ_TYPE_EDGE_FALLING:
+               value |= BYT_TRIG_NEG;
+               break;
+       case IRQ_TYPE_EDGE_BOTH:
+               value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
+               break;
+       }
+
+       writel(value, reg);
+
+       spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_irq_mask(struct irq_data *d)
 {
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct byt_gpio *vg = to_byt_gpio(gc);
+
+       byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
 }
 
 static struct irq_chip byt_irqchip = {
        .name = "BYT-GPIO",
+       .irq_ack = byt_irq_ack,
        .irq_mask = byt_irq_mask,
        .irq_unmask = byt_irq_unmask,
        .irq_set_type = byt_irq_type,
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
        void __iomem *reg;
        u32 base, value;
+       int i;
+
+       /*
+        * Clear interrupt triggers for all pins that are GPIOs and
+        * do not use direct IRQ mode. This will prevent spurious
+        * interrupts from misconfigured pins.
+        */
+       for (i = 0; i < vg->chip.ngpio; i++) {
+               value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
+               if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
+                   !(value & BYT_DIRECT_IRQ_EN)) {
+                       byt_gpio_clear_triggering(vg, i);
+                       dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+               }
+       }
 
        /* clear interrupt status trigger registers */
        for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev)
        gc->can_sleep = false;
        gc->dev = dev;
 
+#ifdef CONFIG_PM_SLEEP
+       vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
+                                      sizeof(*vg->saved_context), GFP_KERNEL);
+#endif
+
        ret = gpiochip_add(gc);
        if (ret) {
                dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int byt_gpio_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct byt_gpio *vg = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < vg->chip.ngpio; i++) {
+               void __iomem *reg;
+               u32 value;
+
+               reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+               value = readl(reg) & BYT_CONF0_RESTORE_MASK;
+               vg->saved_context[i].conf0 = value;
+
+               reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+               value = readl(reg) & BYT_VAL_RESTORE_MASK;
+               vg->saved_context[i].val = value;
+       }
+
+       return 0;
+}
+
+static int byt_gpio_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct byt_gpio *vg = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < vg->chip.ngpio; i++) {
+               void __iomem *reg;
+               u32 value;
+
+               reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+               value = readl(reg);
+               if ((value & BYT_CONF0_RESTORE_MASK) !=
+                    vg->saved_context[i].conf0) {
+                       value &= ~BYT_CONF0_RESTORE_MASK;
+                       value |= vg->saved_context[i].conf0;
+                       writel(value, reg);
+                       dev_info(dev, "restored pin %d conf0 %#08x", i, value);
+               }
+
+               reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+               value = readl(reg);
+               if ((value & BYT_VAL_RESTORE_MASK) !=
+                    vg->saved_context[i].val) {
+                       u32 v;
+
+                       v = value & ~BYT_VAL_RESTORE_MASK;
+                       v |= vg->saved_context[i].val;
+                       if (v != value) {
+                               writel(v, reg);
+                               dev_dbg(dev, "restored pin %d val %#08x\n",
+                                       i, v);
+                       }
+               }
+       }
+
+       return 0;
+}
+#endif
+
 static int byt_gpio_runtime_suspend(struct device *dev)
 {
        return 0;
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops byt_gpio_pm_ops = {
-       .runtime_suspend = byt_gpio_runtime_suspend,
-       .runtime_resume = byt_gpio_runtime_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
+       SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume,
+                          NULL)
 };
 
 static const struct acpi_device_id byt_gpio_acpi_match[] = {
index 3034fd03bced3ff7587f8bfd248c7bb212a0c16c..82f691eeeec4d82cd5e75b7a96be719befbcd57f 100644 (file)
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                     int value)
 {
+       chv_gpio_set(chip, offset, value);
        return pinctrl_gpio_direction_output(chip->base + offset);
 }
 
index f4cd0b9b2438b3548fafa4746b6c6a6cf8874bb4..a4814066ea0876d7f52063f5ba2cfb5ca5a6f6d3 100644 (file)
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d)
        /* the interrupt is already cleared before by reading ISR */
 }
 
-static unsigned int gpio_irq_startup(struct irq_data *d)
+static int gpio_irq_request_res(struct irq_data *d)
 {
        struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
        unsigned        pin = d->hwirq;
        int ret;
 
        ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
-       if (ret) {
+       if (ret)
                dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
                        d->hwirq);
-               return ret;
-       }
-       gpio_irq_unmask(d);
-       return 0;
+
+       return ret;
 }
 
-static void gpio_irq_shutdown(struct irq_data *d)
+static void gpio_irq_release_res(struct irq_data *d)
 {
        struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
        unsigned        pin = d->hwirq;
 
-       gpio_irq_mask(d);
        gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
 }
 
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void)
 static struct irq_chip gpio_irqchip = {
        .name           = "GPIO",
        .irq_ack        = gpio_irq_ack,
-       .irq_startup    = gpio_irq_startup,
-       .irq_shutdown   = gpio_irq_shutdown,
+       .irq_request_resources = gpio_irq_request_res,
+       .irq_release_resources = gpio_irq_release_res,
        .irq_disable    = gpio_irq_mask,
        .irq_mask       = gpio_irq_mask,
        .irq_unmask     = gpio_irq_unmask,
index 24c5d88f943f7809910507727ff50d4a43532530..3c68a8e5e0dd4d4bbb8d6fae4166a6bf92e521a2 100644 (file)
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
        .pins = sun4i_a10_pins,
        .npins = ARRAY_SIZE(sun4i_a10_pins),
        .irq_banks = 1,
+       .irq_read_needs_mux = true,
 };
 
 static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
index 3d07443377362f0b84f59d971dcf73ecdf1d236c..f8e171b7669380d3d1f018124e04488fd4682f8f 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 
 #include "../core.h"
+#include "../../gpio/gpiolib.h"
 #include "pinctrl-sunxi.h"
 
 static struct irq_chip sunxi_pinctrl_edge_irq_chip;
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
 static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
-
        u32 reg = sunxi_data_reg(offset);
        u8 index = sunxi_data_offset(offset);
-       u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+       u32 set_mux = pctl->desc->irq_read_needs_mux &&
+                       test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+       u32 val;
+
+       if (set_mux)
+               sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
+
+       val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+
+       if (set_mux)
+               sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
 
        return val;
 }
index 5a51523a34599daba95571cb0d8b2597fdf4f45c..e248e81a0f9e0f8446cbd634040c93632b5e1268 100644 (file)
@@ -77,6 +77,9 @@
 #define IRQ_LEVEL_LOW          0x03
 #define IRQ_EDGE_BOTH          0x04
 
+#define SUN4I_FUNC_INPUT       0
+#define SUN4I_FUNC_IRQ         6
+
 struct sunxi_desc_function {
        const char      *name;
        u8              muxval;
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc {
        int                             npins;
        unsigned                        pin_base;
        unsigned                        irq_banks;
+       bool                            irq_read_needs_mux;
 };
 
 struct sunxi_pinctrl_function {
index 638e797037da973f7756b416a47f37c3ae314025..97527614141bf4a406528503d404ad8efd64695f 100644 (file)
@@ -735,6 +735,31 @@ config INTEL_IPS
          functionality.  If in doubt, say Y here; it will only load on
          supported platforms.
 
+config INTEL_IMR
+       bool "Intel Isolated Memory Region support"
+       default n
+       depends on X86_INTEL_QUARK && IOSF_MBI
+       ---help---
+         This option provides a means to manipulate Isolated Memory Regions.
+         IMRs are a set of registers that define read and write access masks
+         to prohibit certain system agents from accessing memory with 1 KiB
+         granularity.
+
+         IMRs make it possible to control read/write access to an address
+         by hardware agents inside the SoC. Read and write masks can be
+         defined for:
+               - eSRAM flush
+               - Dirty CPU snoop (write only)
+               - RMU access
+               - PCI Virtual Channel 0/Virtual Channel 1
+               - SMM mode
+               - Non SMM mode
+
+         Quark contains a set of eight IMR registers and makes use of those
+         registers during its bootup process.
+
+         If you are running on a Galileo/Quark say Y here.
+
 config IBM_RTL
        tristate "Device driver to enable PRTL support"
        depends on X86 && PCI
index 782e82289571b219ee0faa695c9d17cefb832c69..f980ff7166e98e93546f7f79657087983a5388ce 100644 (file)
@@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
        /* check if the resource is already in use, skip if the
         * device is active because it itself may be in use */
        if (!dev->active) {
-               if (__check_region(&ioport_resource, *port, length(port, end)))
+               if (!request_region(*port, length(port, end), "pnp"))
                        return 0;
+               release_region(*port, length(port, end));
        }
 
        /* check if the resource is reserved */
@@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
        /* check if the resource is already in use, skip if the
         * device is active because it itself may be in use */
        if (!dev->active) {
-               if (check_mem_region(*addr, length(addr, end)))
+               if (!request_mem_region(*addr, length(addr, end), "pnp"))
                        return 0;
+               release_mem_region(*addr, length(addr, end));
        }
 
        /* check if the resource is reserved */
index b899947d839d87b03608d1f9bf4b4208cf57aa01..a4a8a6dc60c470bf6911632a8b14f790cc415ef4 100644 (file)
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
        }
 
        if (rdev->ena_pin) {
-               ret = regulator_ena_gpio_ctrl(rdev, true);
-               if (ret < 0)
-                       return ret;
-               rdev->ena_gpio_state = 1;
+               if (!rdev->ena_gpio_state) {
+                       ret = regulator_ena_gpio_ctrl(rdev, true);
+                       if (ret < 0)
+                               return ret;
+                       rdev->ena_gpio_state = 1;
+               }
        } else if (rdev->desc->ops->enable) {
                ret = rdev->desc->ops->enable(rdev);
                if (ret < 0)
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
        trace_regulator_disable(rdev_get_name(rdev));
 
        if (rdev->ena_pin) {
-               ret = regulator_ena_gpio_ctrl(rdev, false);
-               if (ret < 0)
-                       return ret;
-               rdev->ena_gpio_state = 0;
+               if (rdev->ena_gpio_state) {
+                       ret = regulator_ena_gpio_ctrl(rdev, false);
+                       if (ret < 0)
+                               return ret;
+                       rdev->ena_gpio_state = 0;
+               }
 
        } else if (rdev->desc->ops->disable) {
                ret = rdev->desc->ops->disable(rdev);
@@ -3444,13 +3448,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
        if (attr == &dev_attr_requested_microamps.attr)
                return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
 
-       /* all the other attributes exist to support constraints;
-        * don't show them if there are no constraints, or if the
-        * relevant supporting methods are missing.
-        */
-       if (!rdev->constraints)
-               return 0;
-
        /* constraints need specific supporting methods */
        if (attr == &dev_attr_min_microvolts.attr ||
            attr == &dev_attr_max_microvolts.attr)
@@ -3633,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
                                 config->ena_gpio, ret);
                        goto wash;
                }
-
-               if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
-                       rdev->ena_gpio_state = 1;
-
-               if (config->ena_gpio_invert)
-                       rdev->ena_gpio_state = !rdev->ena_gpio_state;
        }
 
        /* set regulator constraints */
@@ -3807,9 +3798,11 @@ int regulator_suspend_finish(void)
        list_for_each_entry(rdev, &regulator_list, list) {
                mutex_lock(&rdev->mutex);
                if (rdev->use_count > 0  || rdev->constraints->always_on) {
-                       error = _regulator_do_enable(rdev);
-                       if (error)
-                               ret = error;
+                       if (!_regulator_is_enabled(rdev)) {
+                               error = _regulator_do_enable(rdev);
+                               if (error)
+                                       ret = error;
+                       }
                } else {
                        if (!have_full_constraints())
                                goto unlock;
index bc6100103f7f476e7381482a404f33ea01851fad..f0489cb9018b4e7895937d330d077e100783fc16 100644 (file)
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
        config.regmap = chip->regmap;
        config.of_node = dev->of_node;
 
+       /* Mask all interrupt sources to deassert interrupt line */
+       error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0);
+       if (!error)
+               error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0);
+       if (error) {
+               dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error);
+               return error;
+       }
+
        rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
        if (IS_ERR(rdev)) {
                dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
index 1f93b752a81cdc36a824459ebf66354aea275af8..3fd44353cc80eea153fe1118c81180b5e2d21b67 100644 (file)
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(0),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG2",
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(1),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG3",
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_BUCK4_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(2),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG4",
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(3),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG5",
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(4),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG6",
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(5),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG7",
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(6),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "LDO_REG8",
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = {
                .vsel_mask = RK808_LDO_VSEL_MASK,
                .enable_reg = RK808_LDO_EN_REG,
                .enable_mask = BIT(7),
+               .enable_time = 400,
                .owner = THIS_MODULE,
        }, {
                .name = "SWITCH_REG1",
index e2cffe01b8072263330b28164130cf2fd23953b5..fb991ec764235d55443a9f99414efc3511cad7a2 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
index 92f6af6da6991f0f15400b07572b4338458ff8ca..73354ee278771ac0be6fc9fcaa40b96f9964bc9b 100644 (file)
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
        void *bufs_va;
        int err = 0, i;
        size_t total_buf_space;
+       bool notify;
 
        vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
        if (!vrp)
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
                }
        }
 
+       /*
+        * Prepare to kick but don't notify yet - we can't do this before
+        * device is ready.
+        */
+       notify = virtqueue_kick_prepare(vrp->rvq);
+
+       /* From this point on, we can notify and get callbacks. */
+       virtio_device_ready(vdev);
+
        /* tell the remote processor it can start sending messages */
-       virtqueue_kick(vrp->rvq);
+       /*
+        * this might be concurrent with callbacks, but we are only
+        * doing notify, not a full kick here, so that's ok.
+        */
+       if (notify)
+               virtqueue_notify(vrp->rvq);
 
        dev_info(&vdev->dev, "rpmsg host is online\n");
 
index 70a5d94cc766af5302dfaf945dfb4a1a36b48236..b4f7744f67510b87be03400d270c7328e6c77912 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/suspend.h>
 #include <linux/uaccess.h>
 
 #include "rtc-at91rm9200.h"
@@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs;
 static int irq;
 static DEFINE_SPINLOCK(at91_rtc_lock);
 static u32 at91_rtc_shadow_imr;
+static bool suspended;
+static DEFINE_SPINLOCK(suspended_lock);
+static unsigned long cached_events;
+static u32 at91_rtc_imr;
 
 static void at91_rtc_write_ier(u32 mask)
 {
@@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
        unsigned int rtsr;
        unsigned long events = 0;
+       int ret = IRQ_NONE;
 
+       spin_lock(&suspended_lock);
        rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
        if (rtsr) {             /* this interrupt is shared!  Is it ours? */
                if (rtsr & AT91_RTC_ALARM)
@@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
 
                at91_rtc_write(AT91_RTC_SCCR, rtsr);    /* clear status reg */
 
-               rtc_update_irq(rtc, 1, events);
+               if (!suspended) {
+                       rtc_update_irq(rtc, 1, events);
 
-               dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__,
-                       events >> 8, events & 0x000000FF);
+                       dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n",
+                               __func__, events >> 8, events & 0x000000FF);
+               } else {
+                       cached_events |= events;
+                       at91_rtc_write_idr(at91_rtc_imr);
+                       pm_system_wakeup();
+               }
 
-               return IRQ_HANDLED;
+               ret = IRQ_HANDLED;
        }
-       return IRQ_NONE;                /* not handled */
+       spin_lock(&suspended_lock);
+
+       return ret;
 }
 
 static const struct at91_rtc_config at91rm9200_config = {
@@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
                                        AT91_RTC_CALEV);
 
        ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
-                               IRQF_SHARED,
-                               "at91_rtc", pdev);
+                              IRQF_SHARED | IRQF_COND_SUSPEND,
+                              "at91_rtc", pdev);
        if (ret) {
                dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
                return ret;
@@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev)
 
 /* AT91RM9200 RTC Power management control */
 
-static u32 at91_rtc_imr;
-
 static int at91_rtc_suspend(struct device *dev)
 {
        /* this IRQ is shared with DBGU and other hardware which isn't
@@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev)
        at91_rtc_imr = at91_rtc_read_imr()
                        & (AT91_RTC_ALARM|AT91_RTC_SECEV);
        if (at91_rtc_imr) {
-               if (device_may_wakeup(dev))
+               if (device_may_wakeup(dev)) {
+                       unsigned long flags;
+
                        enable_irq_wake(irq);
-               else
+
+                       spin_lock_irqsave(&suspended_lock, flags);
+                       suspended = true;
+                       spin_unlock_irqrestore(&suspended_lock, flags);
+               } else {
                        at91_rtc_write_idr(at91_rtc_imr);
+               }
        }
        return 0;
 }
 
 static int at91_rtc_resume(struct device *dev)
 {
+       struct rtc_device *rtc = dev_get_drvdata(dev);
+
        if (at91_rtc_imr) {
-               if (device_may_wakeup(dev))
+               if (device_may_wakeup(dev)) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&suspended_lock, flags);
+
+                       if (cached_events) {
+                               rtc_update_irq(rtc, 1, cached_events);
+                               cached_events = 0;
+                       }
+
+                       suspended = false;
+                       spin_unlock_irqrestore(&suspended_lock, flags);
+
                        disable_irq_wake(irq);
-               else
-                       at91_rtc_write_ier(at91_rtc_imr);
+               }
+               at91_rtc_write_ier(at91_rtc_imr);
        }
        return 0;
 }
index 2183fd2750abd9d4b388ca4a5f48916600356122..5ccaee32df7223ad1aeb2d8cb74233caa7fae351 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/suspend.h>
 #include <linux/clk.h>
 
 /*
@@ -77,6 +78,9 @@ struct sam9_rtc {
        unsigned int            gpbr_offset;
        int                     irq;
        struct clk              *sclk;
+       bool                    suspended;
+       unsigned long           events;
+       spinlock_t              lock;
 };
 
 #define rtt_readl(rtc, field) \
@@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
        return 0;
 }
 
-/*
- * IRQ handler for the RTC
- */
-static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
 {
-       struct sam9_rtc *rtc = _rtc;
        u32 sr, mr;
-       unsigned long events = 0;
 
        /* Shared interrupt may be for another device.  Note: reading
         * SR clears it, so we must only read it in this irq handler!
@@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
 
        /* alarm status */
        if (sr & AT91_RTT_ALMS)
-               events |= (RTC_AF | RTC_IRQF);
+               rtc->events |= (RTC_AF | RTC_IRQF);
 
        /* timer update/increment */
        if (sr & AT91_RTT_RTTINC)
-               events |= (RTC_UF | RTC_IRQF);
+               rtc->events |= (RTC_UF | RTC_IRQF);
+
+       return IRQ_HANDLED;
+}
+
+static void at91_rtc_flush_events(struct sam9_rtc *rtc)
+{
+       if (!rtc->events)
+               return;
 
-       rtc_update_irq(rtc->rtcdev, 1, events);
+       rtc_update_irq(rtc->rtcdev, 1, rtc->events);
+       rtc->events = 0;
 
        pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
-               events >> 8, events & 0x000000FF);
+               rtc->events >> 8, rtc->events & 0x000000FF);
+}
 
-       return IRQ_HANDLED;
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+{
+       struct sam9_rtc *rtc = _rtc;
+       int ret;
+
+       spin_lock(&rtc->lock);
+
+       ret = at91_rtc_cache_events(rtc);
+
+       /* We're called in suspended state */
+       if (rtc->suspended) {
+               /* Mask irqs coming from this peripheral */
+               rtt_writel(rtc, MR,
+                          rtt_readl(rtc, MR) &
+                          ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+               /* Trigger a system wakeup */
+               pm_system_wakeup();
+       } else {
+               at91_rtc_flush_events(rtc);
+       }
+
+       spin_unlock(&rtc->lock);
+
+       return ret;
 }
 
 static const struct rtc_class_ops at91_rtc_ops = {
@@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev)
 
        /* register irq handler after we know what name we'll use */
        ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
-                               IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc);
+                              IRQF_SHARED | IRQF_COND_SUSPEND,
+                              dev_name(&rtc->rtcdev->dev), rtc);
        if (ret) {
                dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
                return ret;
@@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev)
        rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
        if (rtc->imr) {
                if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
+                       unsigned long flags;
+
                        enable_irq_wake(rtc->irq);
+                       spin_lock_irqsave(&rtc->lock, flags);
+                       rtc->suspended = true;
+                       spin_unlock_irqrestore(&rtc->lock, flags);
                        /* don't let RTTINC cause wakeups */
                        if (mr & AT91_RTT_RTTINCIEN)
                                rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev)
        u32             mr;
 
        if (rtc->imr) {
+               unsigned long flags;
+
                if (device_may_wakeup(dev))
                        disable_irq_wake(rtc->irq);
                mr = rtt_readl(rtc, MR);
                rtt_writel(rtc, MR, mr | rtc->imr);
+
+               spin_lock_irqsave(&rtc->lock, flags);
+               rtc->suspended = false;
+               at91_rtc_cache_events(rtc);
+               at91_rtc_flush_events(rtc);
+               spin_unlock_irqrestore(&rtc->lock, flags);
        }
 
        return 0;
index 8c3bfcb115b787318d0ce942c07c655f79fc1cb7..803869c7d7c206f6dbff5cc7427ea3faf8e1bf2b 100644 (file)
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
         * of this RTC chip.  We check for it anyways in case support is
         * added in the future.
         */
-       if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+       if (unlikely(seconds >= 0xc0))
                alrm->time.tm_sec = -1;
        else
                alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
                                                       RTC_SECS_BCD_MASK,
                                                       RTC_SECS_BIN_MASK);
 
-       if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+       if (unlikely(minutes >= 0xc0))
                alrm->time.tm_min = -1;
        else
                alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
                                                       RTC_MINS_BCD_MASK,
                                                       RTC_MINS_BIN_MASK);
 
-       if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+       if (unlikely(hours >= 0xc0))
                alrm->time.tm_hour = -1;
        else
                alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
         * field, and we only support four fields.  We put the support
         * here anyways for the future.
         */
-       if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+       if (unlikely(seconds >= 0xc0))
                seconds = 0xff;
 
-       if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+       if (unlikely(minutes >= 0xc0))
                minutes = 0xff;
 
-       if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+       if (unlikely(hours >= 0xc0))
                hours = 0xff;
 
        alrm->time.tm_mon       = -1;
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 /* ----------------------------------------------------------------------- */
 /* /dev/rtcX Interface functions */
 
-#ifdef CONFIG_RTC_INTF_DEV
 /**
  * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
  * @dev: pointer to device structure.
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
        return 0;
 }
-#endif
 /* ----------------------------------------------------------------------- */
 
 
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev,
                ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
 
        /* Make sure we actually matched something. */
-       if (!bcd_reg_info && !bin_reg_info)
+       if (!bcd_reg_info || !bin_reg_info)
                return -EINVAL;
 
        /* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev,
                return -EINVAL;
 
        /* Make sure we actually matched something. */
-       if (!bcd_reg_info && !bin_reg_info)
+       if (!bcd_reg_info || !bin_reg_info)
                return -EINVAL;
 
        /* Check for a valid range. */
index 4241eeab3386ab3803d4238471fd2bd92ac66226..f4cf6851fae971e34d4b43e945ac91160bcaef21 100644 (file)
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = {
 
 static struct s3c_rtc_data const s3c6410_rtc_data = {
        .max_user_freq          = 32768,
+       .needs_src_clk          = true,
        .irq_handler            = s3c6410_rtc_irq,
        .set_freq               = s3c6410_rtc_setfreq,
        .enable_tick            = s3c6410_rtc_enable_tick,
index 96128cb009f352dba8497a47427dda713f608a4b..da212813f2d5dcd301a923df946daa9c481cb06f 100644 (file)
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
         * parse input
         */
        num_of_segments = 0;
-       for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
+       for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
                for (j = i; (buf[j] != ':') &&
                        (buf[j] != '\0') &&
                        (buf[j] != '\n') &&
index 09db45296eed2c87330399abbfecdbb2f9b44014..7497ddde2dd67a28ce2e042970bebda9b3e9da5b 100644 (file)
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq)
                        add = 0;
                        continue;
                }
-               for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
+               for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
                        if (clusters_intersect(req, iter->request[pos]) &&
                            (rq_data_dir(req) == WRITE ||
                             rq_data_dir(iter->request[pos]) == WRITE)) {
index aa3e2c7cd83c8a5275b900f37b23912884a789c7..a6f5ee80fadc5f3e8d462fc43b615a1134d509f6 100644 (file)
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp)
                        break;
                cpu_relax();
        }
-       if (resid > 1) {
-               /* FIFO not cleared */
-               shost_printk(KERN_INFO, esp->host,
-                            "FIFO not cleared, %d bytes left\n",
-                            resid);
-       }
 
        /*
         * When there is a residual BCMPLT will never be set
index 96241b20fd2c8b690e57139e8e404a522dcb2392..a7cc618378187fb7d38e504d51befb93e78b223c 100644 (file)
@@ -585,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
                        "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
                return NULL;
        }
-       shost->dma_boundary = pcidev->dma_mask;
        shost->max_id = BE2_MAX_SESSIONS;
        shost->max_channel = 0;
        shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
index 95d581c45413fb38dd778e2b4695e7f7159c9c0b..a1cfbd3dda4713d05f254b0a9ef33131f6c12c0b 100644 (file)
@@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
                                                char *name)
 {
        struct workqueue_struct *wq = NULL;
-       char wq_name[20];
 
-       snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
-       wq = alloc_ordered_workqueue(wq_name, 0);
+       wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
        if (!wq)
                dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
 
index 62b58d38ce2e63beda6d1ba4d9876c9af2e51008..60de66252fa2b9ea168648c5097da7f8ae7f49ae 100644 (file)
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
        struct sas_discovery_event *ev = to_sas_discovery_event(work);
        struct asd_sas_port *port = ev->port;
        struct sas_ha_struct *ha = port->ha;
+       struct domain_device *ddev = port->port_dev;
 
        /* prevent revalidation from finding sata links in recovery */
        mutex_lock(&ha->disco_mutex);
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
        SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
                    task_pid_nr(current));
 
-       if (port->port_dev)
-               res = sas_ex_revalidate_domain(port->port_dev);
+       if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+                    ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
+               res = sas_ex_revalidate_domain(ddev);
 
        SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
                    port->id, task_pid_nr(current), res);
index 73f9feecda72b71552b63eb28b7bec9106f6fd4e..99f43b7fc9ab74256d6f22f17e3c6d75c6e5fcdb 100644 (file)
@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
         * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
         */
        memset(&port_name, 0, 36);
-       snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-               fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
-               fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+       snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
        /*
         * Locate our struct se_node_acl either from an explict NodeACL created
         * via ConfigFS, or via running in TPG demo mode.
index 0cbc1fb45f10eb90ac74b1a20a2c755c4dc6a3cb..2270bd51f9c2c240c669e562eb77052f89425a83 100644 (file)
@@ -546,7 +546,7 @@ static ssize_t
 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
 {
        sg_io_hdr_t *hp = &srp->header;
-       int err = 0;
+       int err = 0, err2;
        int len;
 
        if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
                goto err_out;
        }
 err_out:
-       err = sg_finish_rem_req(srp);
-       return (0 == err) ? count : err;
+       err2 = sg_finish_rem_req(srp);
+       return err ? : err2 ? : count;
 }
 
 static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
        }
        /* Rely on write phase to clean out srp status values, so no "else" */
 
+       /*
+        * Free the request as soon as it is complete so that its resources
+        * can be reused without waiting for userspace to read() the
+        * result.  But keep the associated bio (if any) around until
+        * blk_rq_unmap_user() can be called from user context.
+        */
+       srp->rq = NULL;
+       if (rq->cmd != rq->__cmd)
+               kfree(rq->cmd);
+       __blk_put_request(rq->q, rq);
+
        write_lock_irqsave(&sfp->rq_list_lock, iflags);
        if (unlikely(srp->orphan)) {
                if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
                        return -ENOMEM;
        }
 
-       rq = blk_get_request(q, rw, GFP_ATOMIC);
+       /*
+        * NOTE
+        *
+        * With scsi-mq enabled, there are a fixed number of preallocated
+        * requests equal in number to shost->can_queue.  If all of the
+        * preallocated requests are already in use, then using GFP_ATOMIC with
+        * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
+        * will cause blk_get_request() to sleep until an active command
+        * completes, freeing up a request.  Neither option is ideal, but
+        * GFP_KERNEL is the better choice to prevent userspace from getting an
+        * unexpected EWOULDBLOCK.
+        *
+        * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
+        * does not sleep except under memory pressure.
+        */
+       rq = blk_get_request(q, rw, GFP_KERNEL);
        if (IS_ERR(rq)) {
                kfree(long_cmdp);
                return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
        SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
                                      "sg_finish_rem_req: res_used=%d\n",
                                      (int) srp->res_used));
-       if (srp->rq) {
-               if (srp->bio)
-                       ret = blk_rq_unmap_user(srp->bio);
+       if (srp->bio)
+               ret = blk_rq_unmap_user(srp->bio);
 
+       if (srp->rq) {
                if (srp->rq->cmd != srp->rq->__cmd)
                        kfree(srp->rq->cmd);
                blk_put_request(srp->rq);
index 7702664d7ed3257e1670ee7d97ee19cadb1dc7cc..289ad016d92504e9d4ebbc47cf8d244ab6cb68b5 100644 (file)
@@ -870,6 +870,7 @@ fail_free_params:
 }
 
 static struct scsi_host_template wd719x_template = {
+       .module                         = THIS_MODULE,
        .name                           = "Western Digital 719x",
        .queuecommand                   = wd719x_queuecommand,
        .eh_abort_handler               = wd719x_abort,
index f3ee439d6f0e23b41bd54b0ad1bbb7941ec4e505..cd4c293f0dd0d375be6d6d815108aeb588304fec 100644 (file)
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void)
                if (!of_machine_is_compatible("renesas,emev2") &&
                    !of_machine_is_compatible("renesas,r7s72100") &&
                    !of_machine_is_compatible("renesas,r8a73a4") &&
+#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
                    !of_machine_is_compatible("renesas,r8a7740") &&
+#endif
                    !of_machine_is_compatible("renesas,r8a7778") &&
                    !of_machine_is_compatible("renesas,r8a7779") &&
                    !of_machine_is_compatible("renesas,r8a7790") &&
index 9af7841f2e8c6010060c17a038bc8c86c792a729..06de34001c6695a5a741925e862d7baa05853c98 100644 (file)
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
                        (unsigned long long)xfer->rx_dma);
        }
 
-       /* REVISIT: We're waiting for ENDRX before we start the next
+       /* REVISIT: We're waiting for RXBUFF before we start the next
         * transfer because we need to handle some difficult timing
-        * issues otherwise. If we wait for ENDTX in one transfer and
-        * then starts waiting for ENDRX in the next, it's difficult
-        * to tell the difference between the ENDRX interrupt we're
-        * actually waiting for and the ENDRX interrupt of the
+        * issues otherwise. If we wait for TXBUFE in one transfer and
+        * then starts waiting for RXBUFF in the next, it's difficult
+        * to tell the difference between the RXBUFF interrupt we're
+        * actually waiting for and the RXBUFF interrupt of the
         * previous transfer.
         *
         * It should be doable, though. Just not now...
         */
-       spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
+       spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
        spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
 }
 
index a0197fd4e95c40b26817fe942d2069a0381a41e5..3ce39d10fafbc270d47a17107b7cc5ce257dca94 100644 (file)
@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
                                1,
                                DMA_MEM_TO_DEV,
                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!txdesc)
+               return NULL;
+
        txdesc->callback = dw_spi_dma_tx_done;
        txdesc->callback_param = dws;
 
@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
                                1,
                                DMA_DEV_TO_MEM,
                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!rxdesc)
+               return NULL;
+
        rxdesc->callback = dw_spi_dma_rx_done;
        rxdesc->callback_param = dws;
 
index 5ba331047cbefdea6d948ca20c8d6d3898cafbb0..6d331e0db33122b23cfa17f6602618a510efcde1 100644 (file)
@@ -36,13 +36,13 @@ struct spi_pci_desc {
 
 static struct spi_pci_desc spi_pci_mid_desc_1 = {
        .setup = dw_spi_mid_init,
-       .num_cs = 32,
+       .num_cs = 5,
        .bus_num = 0,
 };
 
 static struct spi_pci_desc spi_pci_mid_desc_2 = {
        .setup = dw_spi_mid_init,
-       .num_cs = 4,
+       .num_cs = 2,
        .bus_num = 1,
 };
 
index 5a97a62b298ac1a526d4c1bde932f3eb2d5ad574..4847afba89f4e933e5a5d778c5f94e1a14ffe156 100644 (file)
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
        if (!dws->fifo_len) {
                u32 fifo;
 
-               for (fifo = 2; fifo <= 256; fifo++) {
+               for (fifo = 1; fifo < 256; fifo++) {
                        dw_writew(dws, DW_SPI_TXFLTR, fifo);
                        if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
                                break;
                }
                dw_writew(dws, DW_SPI_TXFLTR, 0);
 
-               dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
+               dws->fifo_len = (fifo == 1) ? 0 : fifo;
                dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
        }
 }
index c01567d53581c0dcdfc6fcb61e7f6e821d0e0b39..e649bc7d4c086bb789d2dc1803c621bebdc29a19 100644 (file)
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master,
        unsigned long flags;
        int ret;
 
+       if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
+               dev_err(spfi->dev,
+                       "Transfer length (%d) is greater than the max supported (%d)",
+                       xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
+               return -EINVAL;
+       }
+
        /*
         * Stop all DMA and reset the controller if the previous transaction
         * timed-out and never completed it's DMA.
index 89ca162801da10b8ec7a9c4311a33344f6a438a7..ee513a85296b19a3c57aad4daef4ec42ee3d90b1 100644 (file)
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022)
        pl022->cur_msg = NULL;
        pl022->cur_transfer = NULL;
        pl022->cur_chip = NULL;
-       spi_finalize_current_message(pl022->master);
 
        /* disable the SPI/SSP operation */
        writew((readw(SSP_CR1(pl022->virtbase)) &
                (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
 
+       spi_finalize_current_message(pl022->master);
 }
 
 /**
index 884a716e50cb822ce49dcb0e925a2c1856130bcb..5c061687035886e5768bf6031616068906540b04 100644 (file)
@@ -101,6 +101,7 @@ struct ti_qspi {
 #define QSPI_FLEN(n)                   ((n - 1) << 0)
 
 /* STATUS REGISTER */
+#define BUSY                           0x01
 #define WC                             0x02
 
 /* INTERRUPT REGISTER */
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
        ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
 }
 
+static inline u32 qspi_is_busy(struct ti_qspi *qspi)
+{
+       u32 stat;
+       unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+       stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+       while ((stat & BUSY) && time_after(timeout, jiffies)) {
+               cpu_relax();
+               stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+       }
+
+       WARN(stat & BUSY, "qspi busy\n");
+       return stat & BUSY;
+}
+
 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
        int wlen, count;
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        wlen = t->bits_per_word >> 3;   /* in bytes */
 
        while (count) {
+               if (qspi_is_busy(qspi))
+                       return -EBUSY;
+
                switch (wlen) {
                case 1:
                        dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 
        while (count) {
                dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+               if (qspi_is_busy(qspi))
+                       return -EBUSY;
+
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
                if (!wait_for_completion_timeout(&qspi->transfer_complete,
                                                 QSPI_COMPLETION_TIMEOUT)) {
index 9800c01e6fb97409966e1b35157656ff139f31ea..3f72451d2de01aa44d1b16f633ddd8999894db25 100644 (file)
@@ -426,7 +426,6 @@ static int pci171x_ai_insn_read(struct comedi_device *dev,
                                unsigned int *data)
 {
        struct pci1710_private *devpriv = dev->private;
-       unsigned int chan = CR_CHAN(insn->chanspec);
        int ret = 0;
        int i;
 
@@ -447,7 +446,7 @@ static int pci171x_ai_insn_read(struct comedi_device *dev,
                if (ret)
                        break;
 
-               ret = pci171x_ai_read_sample(dev, s, chan, &val);
+               ret = pci171x_ai_read_sample(dev, s, 0, &val);
                if (ret)
                        break;
 
index dbdea71d6b95b5e3c25a0a2fe2deb9eb57422a94..e856f01ca07716c8acb3881bbe8e4373a64beab5 100644 (file)
@@ -91,9 +91,10 @@ unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
                        stalled++;
                        if (stalled > 10)
                                break;
+               } else {
+                       residue = new_residue;
+                       stalled = 0;
                }
-               residue = new_residue;
-               stalled = 0;
        }
        return residue;
 }
index e37118321a278466ab75f1e51294b2183551bc58..a0906685e27fcb59d9712e9b2619c9f343e5db5e 100644 (file)
@@ -103,11 +103,6 @@ enum vmk80xx_model {
        VMK8061_MODEL
 };
 
-struct firmware_version {
-       unsigned char ic3_vers[32];     /* USB-Controller */
-       unsigned char ic6_vers[32];     /* CPU */
-};
-
 static const struct comedi_lrange vmk8061_range = {
        2, {
                UNI_RANGE(5),
@@ -156,68 +151,12 @@ static const struct vmk80xx_board vmk80xx_boardinfo[] = {
 struct vmk80xx_private {
        struct usb_endpoint_descriptor *ep_rx;
        struct usb_endpoint_descriptor *ep_tx;
-       struct firmware_version fw;
        struct semaphore limit_sem;
        unsigned char *usb_rx_buf;
        unsigned char *usb_tx_buf;
        enum vmk80xx_model model;
 };
 
-static int vmk80xx_check_data_link(struct comedi_device *dev)
-{
-       struct vmk80xx_private *devpriv = dev->private;
-       struct usb_device *usb = comedi_to_usb_dev(dev);
-       unsigned int tx_pipe;
-       unsigned int rx_pipe;
-       unsigned char tx[1];
-       unsigned char rx[2];
-
-       tx_pipe = usb_sndbulkpipe(usb, 0x01);
-       rx_pipe = usb_rcvbulkpipe(usb, 0x81);
-
-       tx[0] = VMK8061_CMD_RD_PWR_STAT;
-
-       /*
-        * Check that IC6 (PIC16F871) is powered and
-        * running and the data link between IC3 and
-        * IC6 is working properly
-        */
-       usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
-       usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10);
-
-       return (int)rx[1];
-}
-
-static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag)
-{
-       struct vmk80xx_private *devpriv = dev->private;
-       struct usb_device *usb = comedi_to_usb_dev(dev);
-       unsigned int tx_pipe;
-       unsigned int rx_pipe;
-       unsigned char tx[1];
-       unsigned char rx[64];
-       int cnt;
-
-       tx_pipe = usb_sndbulkpipe(usb, 0x01);
-       rx_pipe = usb_rcvbulkpipe(usb, 0x81);
-
-       tx[0] = VMK8061_CMD_RD_VERSION;
-
-       /*
-        * Read the firmware version info of IC3 and
-        * IC6 from the internal EEPROM of the IC
-        */
-       usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
-       usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10);
-
-       rx[cnt] = '\0';
-
-       if (flag & IC3_VERSION)
-               strncpy(devpriv->fw.ic3_vers, rx + 1, 24);
-       else                    /* IC6_VERSION */
-               strncpy(devpriv->fw.ic6_vers, rx + 25, 24);
-}
-
 static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
 {
        struct vmk80xx_private *devpriv = dev->private;
@@ -878,16 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
 
        usb_set_intfdata(intf, devpriv);
 
-       if (devpriv->model == VMK8061_MODEL) {
-               vmk80xx_read_eeprom(dev, IC3_VERSION);
-               dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers);
-
-               if (vmk80xx_check_data_link(dev)) {
-                       vmk80xx_read_eeprom(dev, IC6_VERSION);
-                       dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers);
-               }
-       }
-
        if (devpriv->model == VMK8055_MODEL)
                vmk80xx_reset_device(dev);
 
index d9d6fad7cb00725f43142967bad4ca6b39a32b06..816174388f13347447c495700e3dad85e7c269db 100644 (file)
@@ -214,11 +214,17 @@ struct mxs_lradc {
        unsigned long           is_divided;
 
        /*
-        * Touchscreen LRADC channels receives a private slot in the CTRL4
-        * register, the slot #7. Therefore only 7 slots instead of 8 in the
-        * CTRL4 register can be mapped to LRADC channels when using the
-        * touchscreen.
-        *
+        * When the touchscreen is enabled, we give it two private virtual
+        * channels: #6 and #7. This means that only 6 virtual channels (instead
+        * of 8) will be available for buffered capture.
+        */
+#define TOUCHSCREEN_VCHANNEL1          7
+#define TOUCHSCREEN_VCHANNEL2          6
+#define BUFFER_VCHANS_LIMITED          0x3f
+#define BUFFER_VCHANS_ALL              0xff
+       u8                      buffer_vchans;
+
+       /*
         * Furthermore, certain LRADC channels are shared between touchscreen
         * and/or touch-buttons and generic LRADC block. Therefore when using
         * either of these, these channels are not available for the regular
@@ -342,6 +348,9 @@ struct mxs_lradc {
 #define        LRADC_CTRL4                             0x140
 #define        LRADC_CTRL4_LRADCSELECT_MASK(n)         (0xf << ((n) * 4))
 #define        LRADC_CTRL4_LRADCSELECT_OFFSET(n)       ((n) * 4)
+#define        LRADC_CTRL4_LRADCSELECT(n, x) \
+                               (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
+                               LRADC_CTRL4_LRADCSELECT_MASK(n))
 
 #define LRADC_RESOLUTION                       12
 #define LRADC_SINGLE_SAMPLE_MASK               ((1 << LRADC_RESOLUTION) - 1)
@@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
                                        LRADC_STATUS_TOUCH_DETECT_RAW);
 }
 
+static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
+                                 unsigned ch)
+{
+       mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
+                               LRADC_CTRL4);
+       mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
+}
+
 static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
 {
        /*
@@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
                LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
                        LRADC_DELAY(3));
 
-       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
-                       LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
-                       LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
 
-       /* wake us again, when the complete conversion is done */
-       mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
        /*
         * after changing the touchscreen plates setting
         * the signals need some initial time to settle. Start the
@@ -509,12 +522,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
                LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
                                        LRADC_DELAY(3));
 
-       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
-                       LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
-                       LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
 
-       /* wake us again, when the conversions are done */
-       mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
        /*
         * after changing the touchscreen plates setting
         * the signals need some initial time to settle. Start the
@@ -580,36 +589,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
 #define TS_CH_XM 4
 #define TS_CH_YM 5
 
-static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
-{
-       u32 reg;
-       int val;
-
-       reg = readl(lradc->base + LRADC_CTRL1);
-
-       /* only channels 3 to 5 are of interest here */
-       if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
-               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
-                       LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
-               val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
-       } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
-               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
-                       LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
-               val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
-       } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
-               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
-                       LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
-               val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
-       } else {
-               return -EIO;
-       }
-
-       mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
-       mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
-
-       return val;
-}
-
 /*
  * YP(open)--+-------------+
  *           |             |--+
@@ -653,7 +632,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
        mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
 
        lradc->cur_plate = LRADC_SAMPLE_X;
-       mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
+       mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
+       mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
 }
 
 /*
@@ -674,7 +654,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
        mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
 
        lradc->cur_plate = LRADC_SAMPLE_Y;
-       mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
+       mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
+       mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
 }
 
 /*
@@ -695,7 +676,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
        mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
 
        lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
-       mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+       mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
+       mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
+       mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
+                                               TOUCHSCREEN_VCHANNEL1);
 }
 
 static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
@@ -708,6 +692,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
        mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
 }
 
+static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
+{
+       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+                               LRADC_CTRL1);
+       mxs_lradc_reg_set(lradc,
+               LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
+       /*
+        * start with the Y-pos, because it uses nearly the same plate
+        * settings like the touch detection
+        */
+       mxs_lradc_prepare_y_pos(lradc);
+}
+
 static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
 {
        input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
@@ -725,10 +722,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
         * start a dummy conversion to burn time to settle the signals
         * note: we are not interested in the conversion's value
         */
-       mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
-       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
-       mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
-       mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
+       mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
+       mxs_lradc_reg_clear(lradc,
+               LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
+               LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
+       mxs_lradc_reg_wrt(lradc,
+               LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
                LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
                        LRADC_DELAY(2));
 }
@@ -760,59 +759,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
 
        /* if it is released, wait for the next touch via IRQ */
        lradc->cur_plate = LRADC_TOUCH;
-       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
+       mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+       mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+       mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
+               LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
+               LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
        mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
 }
 
 /* touchscreen's state machine */
 static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
 {
-       int val;
-
        switch (lradc->cur_plate) {
        case LRADC_TOUCH:
-               /*
-                * start with the Y-pos, because it uses nearly the same plate
-                * settings like the touch detection
-                */
-               if (mxs_lradc_check_touch_event(lradc)) {
-                       mxs_lradc_reg_clear(lradc,
-                                       LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
-                                       LRADC_CTRL1);
-                       mxs_lradc_prepare_y_pos(lradc);
-               }
+               if (mxs_lradc_check_touch_event(lradc))
+                       mxs_lradc_start_touch_event(lradc);
                mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
                                        LRADC_CTRL1);
                return;
 
        case LRADC_SAMPLE_Y:
-               val = mxs_lradc_read_ts_channel(lradc);
-               if (val < 0) {
-                       mxs_lradc_enable_touch_detection(lradc); /* re-start */
-                       return;
-               }
-               lradc->ts_y_pos = val;
+               lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
+                                                       TOUCHSCREEN_VCHANNEL1);
                mxs_lradc_prepare_x_pos(lradc);
                return;
 
        case LRADC_SAMPLE_X:
-               val = mxs_lradc_read_ts_channel(lradc);
-               if (val < 0) {
-                       mxs_lradc_enable_touch_detection(lradc); /* re-start */
-                       return;
-               }
-               lradc->ts_x_pos = val;
+               lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
+                                                       TOUCHSCREEN_VCHANNEL1);
                mxs_lradc_prepare_pressure(lradc);
                return;
 
        case LRADC_SAMPLE_PRESSURE:
-               lradc->ts_pressure =
-                       mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+               lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
+                                                       TOUCHSCREEN_VCHANNEL2,
+                                                       TOUCHSCREEN_VCHANNEL1);
                mxs_lradc_complete_touch_event(lradc);
                return;
 
        case LRADC_SAMPLE_VALID:
-               val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
                mxs_lradc_finish_touch_event(lradc, 1);
                break;
        }
@@ -844,9 +829,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
         * used if doing raw sampling.
         */
        if (lradc->soc == IMX28_LRADC)
-               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
                        LRADC_CTRL1);
-       mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+       mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
 
        /* Enable / disable the divider per requirement */
        if (test_bit(chan, &lradc->is_divided))
@@ -1090,9 +1075,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
 {
        /* stop all interrupts from firing */
        mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
-               LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
-               LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
-               LRADC_CTRL1);
+               LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
+               LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
 
        /* Power-down touchscreen touch-detect circuitry. */
        mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
@@ -1158,26 +1142,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
        struct iio_dev *iio = data;
        struct mxs_lradc *lradc = iio_priv(iio);
        unsigned long reg = readl(lradc->base + LRADC_CTRL1);
+       uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
        const uint32_t ts_irq_mask =
                LRADC_CTRL1_TOUCH_DETECT_IRQ |
-               LRADC_CTRL1_LRADC_IRQ(2) |
-               LRADC_CTRL1_LRADC_IRQ(3) |
-               LRADC_CTRL1_LRADC_IRQ(4) |
-               LRADC_CTRL1_LRADC_IRQ(5);
+               LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
+               LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
 
        if (!(reg & mxs_lradc_irq_mask(lradc)))
                return IRQ_NONE;
 
-       if (lradc->use_touchscreen && (reg & ts_irq_mask))
+       if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
                mxs_lradc_handle_touch(lradc);
 
-       if (iio_buffer_enabled(iio))
-               iio_trigger_poll(iio->trig);
-       else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
+               /* Make sure we don't clear the next conversion's interrupt. */
+               clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
+                               LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
+       }
+
+       if (iio_buffer_enabled(iio)) {
+               if (reg & lradc->buffer_vchans)
+                       iio_trigger_poll(iio->trig);
+       } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
                complete(&lradc->completion);
+       }
 
-       mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
-                       LRADC_CTRL1);
+       mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
 
        return IRQ_HANDLED;
 }
@@ -1289,9 +1278,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
        }
 
        if (lradc->soc == IMX28_LRADC)
-               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
-                                                       LRADC_CTRL1);
-       mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+               mxs_lradc_reg_clear(lradc,
+                       lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+                       LRADC_CTRL1);
+       mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
 
        for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
                ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
@@ -1324,10 +1314,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
        mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
                                        LRADC_DELAY_KICK, LRADC_DELAY(0));
 
-       mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+       mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
        if (lradc->soc == IMX28_LRADC)
-               mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
-                                       LRADC_CTRL1);
+               mxs_lradc_reg_clear(lradc,
+                       lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+                       LRADC_CTRL1);
 
        kfree(lradc->buffer);
        mutex_unlock(&lradc->lock);
@@ -1353,7 +1344,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
        if (lradc->use_touchbutton)
                rsvd_chans++;
        if (lradc->use_touchscreen)
-               rsvd_chans++;
+               rsvd_chans += 2;
 
        /* Test for attempts to map channels with special mode of operation. */
        if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
@@ -1413,6 +1404,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
                .channel = 8,
                .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
        },
+       /* Hidden channel to keep indexes */
+       {
+               .type = IIO_TEMP,
+               .indexed = 1,
+               .scan_index = -1,
+               .channel = 9,
+       },
        MXS_ADC_CHAN(10, IIO_VOLTAGE),  /* VDDIO */
        MXS_ADC_CHAN(11, IIO_VOLTAGE),  /* VTH */
        MXS_ADC_CHAN(12, IIO_VOLTAGE),  /* VDDA */
@@ -1583,6 +1581,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
 
        touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
 
+       if (touch_ret == 0)
+               lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
+       else
+               lradc->buffer_vchans = BUFFER_VCHANS_ALL;
+
        /* Grab all IRQ sources */
        for (i = 0; i < of_cfg->irq_count; i++) {
                lradc->irq[i] = platform_get_irq(pdev, i);
index 017d2f8379b78ca86f7e30ed0e85855684c7b6ec..c17893b4918c82bf176e9ecfac235cd6439e7763 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/bitops.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -68,7 +69,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
                break;
        case IIO_ANGL_VEL:
                vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
-               vel = (vel << 4) >> 4;
+               vel = sign_extend32(vel, 11);
                *val = vel;
                break;
        default:
index 88614b71cf6d2886950a890ca64e25778ecae602..ddf1fa9f67f8f80fbb8237260a0b4bef3a74b0d4 100644 (file)
@@ -270,7 +270,7 @@ void ll_invalidate_aliases(struct inode *inode)
 
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
                            struct lookup_intent *it,
-                           struct dentry *de)
+                           struct inode *inode)
 {
        int rc = 0;
 
@@ -280,19 +280,17 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
        if (it_disposition(it, DISP_LOOKUP_NEG))
                return -ENOENT;
 
-       rc = ll_prep_inode(&de->d_inode, request, NULL, it);
+       rc = ll_prep_inode(&inode, request, NULL, it);
 
        return rc;
 }
 
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 {
        LASSERT(it != NULL);
-       LASSERT(dentry != NULL);
 
-       if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
-               struct inode *inode = dentry->d_inode;
-               struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+       if (it->d.lustre.it_lock_mode && inode != NULL) {
+               struct ll_sb_info *sbi = ll_i2sbi(inode);
 
                CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
                       inode, inode->i_ino, inode->i_generation);
index 7c7ef7ec908e48467207144b682291a607d89514..5ebee6ca0a108330711cd02aefea5f87eaccd771 100644 (file)
@@ -2912,8 +2912,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                        oit.it_op = IT_LOOKUP;
 
                /* Call getattr by fid, so do not provide name at all. */
-               op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
-                                            dentry->d_inode, NULL, 0, 0,
+               op_data = ll_prep_md_op_data(NULL, inode,
+                                            inode, NULL, 0, 0,
                                             LUSTRE_OPC_ANY, NULL);
                if (IS_ERR(op_data))
                        return PTR_ERR(op_data);
@@ -2931,7 +2931,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                        goto out;
                }
 
-               rc = ll_revalidate_it_finish(req, &oit, dentry);
+               rc = ll_revalidate_it_finish(req, &oit, inode);
                if (rc != 0) {
                        ll_intent_release(&oit);
                        goto out;
@@ -2944,7 +2944,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                if (!dentry->d_inode->i_nlink)
                        d_lustre_invalidate(dentry, 0);
 
-               ll_lookup_finish_locks(&oit, dentry);
+               ll_lookup_finish_locks(&oit, inode);
        } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
                struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
                u64 valid = OBD_MD_FLGETATTR;
index d032c2b086ccc535ccd488f1410a251b8811601d..2af1d7286250a32097ac89d1735e08ae3c97ebad 100644 (file)
@@ -786,9 +786,9 @@ extern const struct dentry_operations ll_d_ops;
 void ll_intent_drop_lock(struct lookup_intent *);
 void ll_intent_release(struct lookup_intent *);
 void ll_invalidate_aliases(struct inode *);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
-                           struct lookup_intent *it, struct dentry *de);
+                           struct lookup_intent *it, struct inode *inode);
 
 /* llite/llite_lib.c */
 extern struct super_operations lustre_super_operations;
index 4f361b77c749a718621c8767ec97663e43a7c87f..890ac190f5faf3300ab8461f08cfe63182078c27 100644 (file)
@@ -481,6 +481,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
        struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
        struct dentry *save = dentry, *retval;
        struct ptlrpc_request *req = NULL;
+       struct inode *inode;
        struct md_op_data *op_data;
        __u32 opc;
        int rc;
@@ -539,12 +540,13 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
                goto out;
        }
 
-       if ((it->it_op & IT_OPEN) && dentry->d_inode &&
-           !S_ISREG(dentry->d_inode->i_mode) &&
-           !S_ISDIR(dentry->d_inode->i_mode)) {
-               ll_release_openhandle(dentry->d_inode, it);
+       inode = dentry->d_inode;
+       if ((it->it_op & IT_OPEN) && inode &&
+           !S_ISREG(inode->i_mode) &&
+           !S_ISDIR(inode->i_mode)) {
+               ll_release_openhandle(inode, it);
        }
-       ll_lookup_finish_locks(it, dentry);
+       ll_lookup_finish_locks(it, inode);
 
        if (dentry == save)
                retval = NULL;
index aebde3289c50de6722062dfdea21fa1c549090cd..50bad55a0c42e3bd9eef6925520cb1e9bddf2209 100644 (file)
@@ -30,7 +30,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
@@ -45,7 +45,7 @@
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
 #include "iscsi_target_device.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #include <target/iscsi/iscsi_transport.h>
 
@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
        if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-               spin_lock_bh(&conn->sess->ttt_lock);
-               cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-               if (cmd->targ_xfer_tag == 0xFFFFFFFF)
-                       cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-               spin_unlock_bh(&conn->sess->ttt_lock);
+               cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
        } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
        cmd->data_direction     = DMA_NONE;
+       cmd->text_in_ptr        = NULL;
 
        return 0;
 }
@@ -2011,9 +2008,13 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        int cmdsn_ret;
 
        if (!text_in) {
-               pr_err("Unable to locate text_in buffer for sendtargets"
-                      " discovery\n");
-               goto reject;
+               cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
+               if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
+                       pr_err("Unable to locate text_in buffer for sendtargets"
+                              " discovery\n");
+                       goto reject;
+               }
+               goto empty_sendtargets;
        }
        if (strncmp("SendTargets", text_in, 11) != 0) {
                pr_err("Received Text Data that is not"
@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
 
+empty_sendtargets:
        iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
        if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t(
        int_to_scsilun(cmd->se_cmd.orig_fe_lun,
                        (struct scsi_lun *)&hdr->lun);
        hdr->itt                = cmd->init_task_tag;
-       spin_lock_bh(&conn->sess->ttt_lock);
-       r2t->targ_xfer_tag      = conn->sess->targ_xfer_tag++;
-       if (r2t->targ_xfer_tag == 0xFFFFFFFF)
-               r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-       spin_unlock_bh(&conn->sess->ttt_lock);
+       r2t->targ_xfer_tag      = session_get_next_ttt(conn->sess);
        hdr->ttt                = cpu_to_be32(r2t->targ_xfer_tag);
        hdr->statsn             = cpu_to_be32(conn->stat_sn);
        hdr->exp_cmdsn          = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
 
 static int
 iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
-                                 enum iscsit_transport_type network_transport)
+                                 enum iscsit_transport_type network_transport,
+                                 int skip_bytes, bool *completed)
 {
        char *payload = NULL;
        struct iscsi_conn *conn = cmd->conn;
@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
        unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
        unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
 
-       buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+       buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
                         SENDTARGETS_BUF_LIMIT);
 
        payload = kzalloc(buffer_len, GFP_KERNEL);
@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                                end_of_buf = 1;
                                                goto eob;
                                        }
-                                       memcpy(payload + payload_len, buf, len);
-                                       payload_len += len;
-                                       target_name_printed = 1;
+
+                                       if (skip_bytes && len <= skip_bytes) {
+                                               skip_bytes -= len;
+                                       } else {
+                                               memcpy(payload + payload_len, buf, len);
+                                               payload_len += len;
+                                               target_name_printed = 1;
+                                               if (len > skip_bytes)
+                                                       skip_bytes = 0;
+                                       }
                                }
 
                                len = sprintf(buf, "TargetAddress="
@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                        end_of_buf = 1;
                                        goto eob;
                                }
-                               memcpy(payload + payload_len, buf, len);
-                               payload_len += len;
+
+                               if (skip_bytes && len <= skip_bytes) {
+                                       skip_bytes -= len;
+                               } else {
+                                       memcpy(payload + payload_len, buf, len);
+                                       payload_len += len;
+                                       if (len > skip_bytes)
+                                               skip_bytes = 0;
+                               }
                        }
                        spin_unlock(&tpg->tpg_np_lock);
                }
                spin_unlock(&tiqn->tiqn_tpg_lock);
 eob:
-               if (end_of_buf)
+               if (end_of_buf) {
+                       *completed = false;
                        break;
+               }
 
                if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                      enum iscsit_transport_type network_transport)
 {
        int text_length, padding;
+       bool completed = true;
 
-       text_length = iscsit_build_sendtargets_response(cmd, network_transport);
+       text_length = iscsit_build_sendtargets_response(cmd, network_transport,
+                                                       cmd->read_data_done,
+                                                       &completed);
        if (text_length < 0)
                return text_length;
 
+       if (completed) {
+               hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+       } else {
+               hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+               cmd->read_data_done += text_length;
+               if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+                       cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+       }
        hdr->opcode = ISCSI_OP_TEXT_RSP;
-       hdr->flags |= ISCSI_FLAG_CMD_FINAL;
        padding = ((-text_length) & 3);
        hton24(hdr->dlength, text_length);
        hdr->itt = cmd->init_task_tag;
@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
        hdr->statsn = cpu_to_be32(cmd->stat_sn);
 
        iscsit_increment_maxcmdsn(cmd, conn->sess);
+       /*
+        * Reset maxcmdsn_inc in multi-part text payload exchanges to
+        * correctly increment MaxCmdSN for each response answering a
+        * non immediate text request with a valid CmdSN.
+        */
+       cmd->maxcmdsn_inc = 0;
        hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
        hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
 
-       pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
-               " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
-               text_length, conn->cid);
+       pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
+               " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
+               cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
+               !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
+               !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
 
        return text_length + padding;
 }
 EXPORT_SYMBOL(iscsit_build_text_rsp);
 
-/*
- *     FIXME: Add support for F_BIT and C_BIT when the length is longer than
- *     MaxRecvDataSegmentLength.
- */
 static int iscsit_send_text_rsp(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn)
@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
                ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
                break;
        case ISCSI_OP_TEXT:
-               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
-               if (!cmd)
-                       goto reject;
+               if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+                       cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+                       if (!cmd)
+                               goto reject;
+               } else {
+                       cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+                       if (!cmd)
+                               goto reject;
+               }
 
                ret = iscsit_handle_text_cmd(conn, cmd, buf);
                break;
index ab4915c0d933a07021b076cf15232b85dc9dcf08..47e249dccb5fe7d9652bea77bddc00b35dd98429 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
index 9059c1e0b26e559dc0c1f69b3b97625d7cb806e4..48384b675e624b9b04011fe03750c11170961ef1 100644 (file)
@@ -28,7 +28,7 @@
 #include <target/configfs_macros.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_erl0.h"
@@ -36,7 +36,7 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_configfs.h"
 
 struct target_fabric_configfs *lio_target_fabric_configfs;
@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info(
                rb += sprintf(page+rb, "InitiatorAlias: %s\n",
                        sess->sess_ops->InitiatorAlias);
 
-               rb += sprintf(page+rb, "LIO Session ID: %u   "
-                       "ISID: 0x%02x %02x %02x %02x %02x %02x  "
-                       "TSIH: %hu  ", sess->sid,
-                       sess->isid[0], sess->isid[1], sess->isid[2],
-                       sess->isid[3], sess->isid[4], sess->isid[5],
-                       sess->tsih);
+               rb += sprintf(page+rb,
+                             "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+                             sess->sid, sess->isid, sess->tsih);
                rb += sprintf(page+rb, "SessionType: %s\n",
                                (sess->sess_ops->SessionType) ?
                                "Discovery" : "Normal");
@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid(
        /*
         * iSCSI Initiator Session Identifier from RFC-3720.
         */
-       return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
-               sess->isid[0], sess->isid[1], sess->isid[2],
-               sess->isid[3], sess->isid[4], sess->isid[5]);
+       return snprintf(buf, size, "%6phN", sess->isid);
 }
 
 static int lio_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
deleted file mode 100644 (file)
index cbcff38..0000000
+++ /dev/null
@@ -1,883 +0,0 @@
-#ifndef ISCSI_TARGET_CORE_H
-#define ISCSI_TARGET_CORE_H
-
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
-
-#define ISCSIT_VERSION                 "v4.1.0"
-#define ISCSI_MAX_DATASN_MISSING_COUNT 16
-#define ISCSI_TX_THREAD_TCP_TIMEOUT    2
-#define ISCSI_RX_THREAD_TCP_TIMEOUT    2
-#define SECONDS_FOR_ASYNC_LOGOUT       10
-#define SECONDS_FOR_ASYNC_TEXT         10
-#define SECONDS_FOR_LOGOUT_COMP                15
-#define WHITE_SPACE                    " \t\v\f\n\r"
-#define ISCSIT_MIN_TAGS                        16
-#define ISCSIT_EXTRA_TAGS              8
-#define ISCSIT_TCP_BACKLOG             256
-
-/* struct iscsi_node_attrib sanity values */
-#define NA_DATAOUT_TIMEOUT             3
-#define NA_DATAOUT_TIMEOUT_MAX         60
-#define NA_DATAOUT_TIMEOUT_MIX         2
-#define NA_DATAOUT_TIMEOUT_RETRIES     5
-#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
-#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
-#define NA_NOPIN_TIMEOUT               15
-#define NA_NOPIN_TIMEOUT_MAX           60
-#define NA_NOPIN_TIMEOUT_MIN           3
-#define NA_NOPIN_RESPONSE_TIMEOUT      30
-#define NA_NOPIN_RESPONSE_TIMEOUT_MAX  60
-#define NA_NOPIN_RESPONSE_TIMEOUT_MIN  3
-#define NA_RANDOM_DATAIN_PDU_OFFSETS   0
-#define NA_RANDOM_DATAIN_SEQ_OFFSETS   0
-#define NA_RANDOM_R2T_OFFSETS          0
-
-/* struct iscsi_tpg_attrib sanity values */
-#define TA_AUTHENTICATION              1
-#define TA_LOGIN_TIMEOUT               15
-#define TA_LOGIN_TIMEOUT_MAX           30
-#define TA_LOGIN_TIMEOUT_MIN           5
-#define TA_NETIF_TIMEOUT               2
-#define TA_NETIF_TIMEOUT_MAX           15
-#define TA_NETIF_TIMEOUT_MIN           2
-#define TA_GENERATE_NODE_ACLS          0
-#define TA_DEFAULT_CMDSN_DEPTH         64
-#define TA_DEFAULT_CMDSN_DEPTH_MAX     512
-#define TA_DEFAULT_CMDSN_DEPTH_MIN     1
-#define TA_CACHE_DYNAMIC_ACLS          0
-/* Enabled by default in demo mode (generic_node_acls=1) */
-#define TA_DEMO_MODE_WRITE_PROTECT     1
-/* Disabled by default in production mode w/ explict ACLs */
-#define TA_PROD_MODE_WRITE_PROTECT     0
-#define TA_DEMO_MODE_DISCOVERY         1
-#define TA_DEFAULT_ERL                 0
-#define TA_CACHE_CORE_NPS              0
-/* T10 protection information disabled by default */
-#define TA_DEFAULT_T10_PI              0
-
-#define ISCSI_IOV_DATA_BUFFER          5
-
-enum iscsit_transport_type {
-       ISCSI_TCP                               = 0,
-       ISCSI_SCTP_TCP                          = 1,
-       ISCSI_SCTP_UDP                          = 2,
-       ISCSI_IWARP_TCP                         = 3,
-       ISCSI_IWARP_SCTP                        = 4,
-       ISCSI_INFINIBAND                        = 5,
-};
-
-/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
-enum target_conn_state_table {
-       TARG_CONN_STATE_FREE                    = 0x1,
-       TARG_CONN_STATE_XPT_UP                  = 0x3,
-       TARG_CONN_STATE_IN_LOGIN                = 0x4,
-       TARG_CONN_STATE_LOGGED_IN               = 0x5,
-       TARG_CONN_STATE_IN_LOGOUT               = 0x6,
-       TARG_CONN_STATE_LOGOUT_REQUESTED        = 0x7,
-       TARG_CONN_STATE_CLEANUP_WAIT            = 0x8,
-};
-
-/* RFC-3720 7.3.2  Session State Diagram for a Target */
-enum target_sess_state_table {
-       TARG_SESS_STATE_FREE                    = 0x1,
-       TARG_SESS_STATE_ACTIVE                  = 0x2,
-       TARG_SESS_STATE_LOGGED_IN               = 0x3,
-       TARG_SESS_STATE_FAILED                  = 0x4,
-       TARG_SESS_STATE_IN_CONTINUE             = 0x5,
-};
-
-/* struct iscsi_data_count->type */
-enum data_count_type {
-       ISCSI_RX_DATA   = 1,
-       ISCSI_TX_DATA   = 2,
-};
-
-/* struct iscsi_datain_req->dr_complete */
-enum datain_req_comp_table {
-       DATAIN_COMPLETE_NORMAL                  = 1,
-       DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
-       DATAIN_COMPLETE_CONNECTION_RECOVERY     = 3,
-};
-
-/* struct iscsi_datain_req->recovery */
-enum datain_req_rec_table {
-       DATAIN_WITHIN_COMMAND_RECOVERY          = 1,
-       DATAIN_CONNECTION_RECOVERY              = 2,
-};
-
-/* struct iscsi_portal_group->state */
-enum tpg_state_table {
-       TPG_STATE_FREE                          = 0,
-       TPG_STATE_ACTIVE                        = 1,
-       TPG_STATE_INACTIVE                      = 2,
-       TPG_STATE_COLD_RESET                    = 3,
-};
-
-/* struct iscsi_tiqn->tiqn_state */
-enum tiqn_state_table {
-       TIQN_STATE_ACTIVE                       = 1,
-       TIQN_STATE_SHUTDOWN                     = 2,
-};
-
-/* struct iscsi_cmd->cmd_flags */
-enum cmd_flags_table {
-       ICF_GOT_LAST_DATAOUT                    = 0x00000001,
-       ICF_GOT_DATACK_SNACK                    = 0x00000002,
-       ICF_NON_IMMEDIATE_UNSOLICITED_DATA      = 0x00000004,
-       ICF_SENT_LAST_R2T                       = 0x00000008,
-       ICF_WITHIN_COMMAND_RECOVERY             = 0x00000010,
-       ICF_CONTIG_MEMORY                       = 0x00000020,
-       ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
-       ICF_OOO_CMDSN                           = 0x00000080,
-       ICF_SENDTARGETS_ALL                     = 0x00000100,
-       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
-};
-
-/* struct iscsi_cmd->i_state */
-enum cmd_i_state_table {
-       ISTATE_NO_STATE                 = 0,
-       ISTATE_NEW_CMD                  = 1,
-       ISTATE_DEFERRED_CMD             = 2,
-       ISTATE_UNSOLICITED_DATA         = 3,
-       ISTATE_RECEIVE_DATAOUT          = 4,
-       ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
-       ISTATE_RECEIVED_LAST_DATAOUT    = 6,
-       ISTATE_WITHIN_DATAOUT_RECOVERY  = 7,
-       ISTATE_IN_CONNECTION_RECOVERY   = 8,
-       ISTATE_RECEIVED_TASKMGT         = 9,
-       ISTATE_SEND_ASYNCMSG            = 10,
-       ISTATE_SENT_ASYNCMSG            = 11,
-       ISTATE_SEND_DATAIN              = 12,
-       ISTATE_SEND_LAST_DATAIN         = 13,
-       ISTATE_SENT_LAST_DATAIN         = 14,
-       ISTATE_SEND_LOGOUTRSP           = 15,
-       ISTATE_SENT_LOGOUTRSP           = 16,
-       ISTATE_SEND_NOPIN               = 17,
-       ISTATE_SENT_NOPIN               = 18,
-       ISTATE_SEND_REJECT              = 19,
-       ISTATE_SENT_REJECT              = 20,
-       ISTATE_SEND_R2T                 = 21,
-       ISTATE_SENT_R2T                 = 22,
-       ISTATE_SEND_R2T_RECOVERY        = 23,
-       ISTATE_SENT_R2T_RECOVERY        = 24,
-       ISTATE_SEND_LAST_R2T            = 25,
-       ISTATE_SENT_LAST_R2T            = 26,
-       ISTATE_SEND_LAST_R2T_RECOVERY   = 27,
-       ISTATE_SENT_LAST_R2T_RECOVERY   = 28,
-       ISTATE_SEND_STATUS              = 29,
-       ISTATE_SEND_STATUS_BROKEN_PC    = 30,
-       ISTATE_SENT_STATUS              = 31,
-       ISTATE_SEND_STATUS_RECOVERY     = 32,
-       ISTATE_SENT_STATUS_RECOVERY     = 33,
-       ISTATE_SEND_TASKMGTRSP          = 34,
-       ISTATE_SENT_TASKMGTRSP          = 35,
-       ISTATE_SEND_TEXTRSP             = 36,
-       ISTATE_SENT_TEXTRSP             = 37,
-       ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
-       ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
-       ISTATE_SEND_NOPIN_NO_RESPONSE   = 40,
-       ISTATE_REMOVE                   = 41,
-       ISTATE_FREE                     = 42,
-};
-
-/* Used for iscsi_recover_cmdsn() return values */
-enum recover_cmdsn_ret_table {
-       CMDSN_ERROR_CANNOT_RECOVER      = -1,
-       CMDSN_NORMAL_OPERATION          = 0,
-       CMDSN_LOWER_THAN_EXP            = 1,
-       CMDSN_HIGHER_THAN_EXP           = 2,
-       CMDSN_MAXCMDSN_OVERRUN          = 3,
-};
-
-/* Used for iscsi_handle_immediate_data() return values */
-enum immedate_data_ret_table {
-       IMMEDIATE_DATA_CANNOT_RECOVER   = -1,
-       IMMEDIATE_DATA_NORMAL_OPERATION = 0,
-       IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
-};
-
-/* Used for iscsi_decide_dataout_action() return values */
-enum dataout_action_ret_table {
-       DATAOUT_CANNOT_RECOVER          = -1,
-       DATAOUT_NORMAL                  = 0,
-       DATAOUT_SEND_R2T                = 1,
-       DATAOUT_SEND_TO_TRANSPORT       = 2,
-       DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
-};
-
-/* Used for struct iscsi_node_auth->naf_flags */
-enum naf_flags_table {
-       NAF_USERID_SET                  = 0x01,
-       NAF_PASSWORD_SET                = 0x02,
-       NAF_USERID_IN_SET               = 0x04,
-       NAF_PASSWORD_IN_SET             = 0x08,
-};
-
-/* Used by various struct timer_list to manage iSCSI specific state */
-enum iscsi_timer_flags_table {
-       ISCSI_TF_RUNNING                = 0x01,
-       ISCSI_TF_STOP                   = 0x02,
-       ISCSI_TF_EXPIRED                = 0x04,
-};
-
-/* Used for struct iscsi_np->np_flags */
-enum np_flags_table {
-       NPF_IP_NETWORK          = 0x00,
-};
-
-/* Used for struct iscsi_np->np_thread_state */
-enum np_thread_state_table {
-       ISCSI_NP_THREAD_ACTIVE          = 1,
-       ISCSI_NP_THREAD_INACTIVE        = 2,
-       ISCSI_NP_THREAD_RESET           = 3,
-       ISCSI_NP_THREAD_SHUTDOWN        = 4,
-       ISCSI_NP_THREAD_EXIT            = 5,
-};
-
-struct iscsi_conn_ops {
-       u8      HeaderDigest;                   /* [0,1] == [None,CRC32C] */
-       u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
-       u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
-       u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
-       u8      OFMarker;                       /* [0,1] == [No,Yes] */
-       u8      IFMarker;                       /* [0,1] == [No,Yes] */
-       u32     OFMarkInt;                      /* [1..65535] */
-       u32     IFMarkInt;                      /* [1..65535] */
-       /*
-        * iSER specific connection parameters
-        */
-       u32     InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
-       u32     TargetRecvDataSegmentLength;    /* [512..2**24-1] */
-};
-
-struct iscsi_sess_ops {
-       char    InitiatorName[224];
-       char    InitiatorAlias[256];
-       char    TargetName[224];
-       char    TargetAlias[256];
-       char    TargetAddress[256];
-       u16     TargetPortalGroupTag;           /* [0..65535] */
-       u16     MaxConnections;                 /* [1..65535] */
-       u8      InitialR2T;                     /* [0,1] == [No,Yes] */
-       u8      ImmediateData;                  /* [0,1] == [No,Yes] */
-       u32     MaxBurstLength;                 /* [512..2**24-1] */
-       u32     FirstBurstLength;               /* [512..2**24-1] */
-       u16     DefaultTime2Wait;               /* [0..3600] */
-       u16     DefaultTime2Retain;             /* [0..3600] */
-       u16     MaxOutstandingR2T;              /* [1..65535] */
-       u8      DataPDUInOrder;                 /* [0,1] == [No,Yes] */
-       u8      DataSequenceInOrder;            /* [0,1] == [No,Yes] */
-       u8      ErrorRecoveryLevel;             /* [0..2] */
-       u8      SessionType;                    /* [0,1] == [Normal,Discovery]*/
-       /*
-        * iSER specific session parameters
-        */
-       u8      RDMAExtensions;                 /* [0,1] == [No,Yes] */
-};
-
-struct iscsi_queue_req {
-       int                     state;
-       struct iscsi_cmd        *cmd;
-       struct list_head        qr_list;
-};
-
-struct iscsi_data_count {
-       int                     data_length;
-       int                     sync_and_steering;
-       enum data_count_type    type;
-       u32                     iov_count;
-       u32                     ss_iov_count;
-       u32                     ss_marker_count;
-       struct kvec             *iov;
-};
-
-struct iscsi_param_list {
-       bool                    iser;
-       struct list_head        param_list;
-       struct list_head        extra_response_list;
-};
-
-struct iscsi_datain_req {
-       enum datain_req_comp_table dr_complete;
-       int                     generate_recovery_values;
-       enum datain_req_rec_table recovery;
-       u32                     begrun;
-       u32                     runlength;
-       u32                     data_length;
-       u32                     data_offset;
-       u32                     data_sn;
-       u32                     next_burst_len;
-       u32                     read_data_done;
-       u32                     seq_send_order;
-       struct list_head        cmd_datain_node;
-} ____cacheline_aligned;
-
-struct iscsi_ooo_cmdsn {
-       u16                     cid;
-       u32                     batch_count;
-       u32                     cmdsn;
-       u32                     exp_cmdsn;
-       struct iscsi_cmd        *cmd;
-       struct list_head        ooo_list;
-} ____cacheline_aligned;
-
-struct iscsi_datain {
-       u8                      flags;
-       u32                     data_sn;
-       u32                     length;
-       u32                     offset;
-} ____cacheline_aligned;
-
-struct iscsi_r2t {
-       int                     seq_complete;
-       int                     recovery_r2t;
-       int                     sent_r2t;
-       u32                     r2t_sn;
-       u32                     offset;
-       u32                     targ_xfer_tag;
-       u32                     xfer_len;
-       struct list_head        r2t_list;
-} ____cacheline_aligned;
-
-struct iscsi_cmd {
-       enum iscsi_timer_flags_table dataout_timer_flags;
-       /* DataOUT timeout retries */
-       u8                      dataout_timeout_retries;
-       /* Within command recovery count */
-       u8                      error_recovery_count;
-       /* iSCSI dependent state for out or order CmdSNs */
-       enum cmd_i_state_table  deferred_i_state;
-       /* iSCSI dependent state */
-       enum cmd_i_state_table  i_state;
-       /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
-       u8                      immediate_cmd;
-       /* Immediate data present */
-       u8                      immediate_data;
-       /* iSCSI Opcode */
-       u8                      iscsi_opcode;
-       /* iSCSI Response Code */
-       u8                      iscsi_response;
-       /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
-       u8                      logout_reason;
-       /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
-       u8                      logout_response;
-       /* MaxCmdSN has been incremented */
-       u8                      maxcmdsn_inc;
-       /* Immediate Unsolicited Dataout */
-       u8                      unsolicited_data;
-       /* Reject reason code */
-       u8                      reject_reason;
-       /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
-       u16                     logout_cid;
-       /* Command flags */
-       enum cmd_flags_table    cmd_flags;
-       /* Initiator Task Tag assigned from Initiator */
-       itt_t                   init_task_tag;
-       /* Target Transfer Tag assigned from Target */
-       u32                     targ_xfer_tag;
-       /* CmdSN assigned from Initiator */
-       u32                     cmd_sn;
-       /* ExpStatSN assigned from Initiator */
-       u32                     exp_stat_sn;
-       /* StatSN assigned to this ITT */
-       u32                     stat_sn;
-       /* DataSN Counter */
-       u32                     data_sn;
-       /* R2TSN Counter */
-       u32                     r2t_sn;
-       /* Last DataSN acknowledged via DataAck SNACK */
-       u32                     acked_data_sn;
-       /* Used for echoing NOPOUT ping data */
-       u32                     buf_ptr_size;
-       /* Used to store DataDigest */
-       u32                     data_crc;
-       /* Counter for MaxOutstandingR2T */
-       u32                     outstanding_r2ts;
-       /* Next R2T Offset when DataSequenceInOrder=Yes */
-       u32                     r2t_offset;
-       /* Iovec current and orig count for iscsi_cmd->iov_data */
-       u32                     iov_data_count;
-       u32                     orig_iov_data_count;
-       /* Number of miscellaneous iovecs used for IP stack calls */
-       u32                     iov_misc_count;
-       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
-       u32                     pdu_count;
-       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
-       u32                     pdu_send_order;
-       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
-       u32                     pdu_start;
-       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
-       u32                     seq_send_order;
-       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
-       u32                     seq_count;
-       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
-       u32                     seq_no;
-       /* Lowest offset in current DataOUT sequence */
-       u32                     seq_start_offset;
-       /* Highest offset in current DataOUT sequence */
-       u32                     seq_end_offset;
-       /* Total size in bytes received so far of READ data */
-       u32                     read_data_done;
-       /* Total size in bytes received so far of WRITE data */
-       u32                     write_data_done;
-       /* Counter for FirstBurstLength key */
-       u32                     first_burst_len;
-       /* Counter for MaxBurstLength key */
-       u32                     next_burst_len;
-       /* Transfer size used for IP stack calls */
-       u32                     tx_size;
-       /* Buffer used for various purposes */
-       void                    *buf_ptr;
-       /* Used by SendTargets=[iqn.,eui.] discovery */
-       void                    *text_in_ptr;
-       /* See include/linux/dma-mapping.h */
-       enum dma_data_direction data_direction;
-       /* iSCSI PDU Header + CRC */
-       unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
-       /* Number of times struct iscsi_cmd is present in immediate queue */
-       atomic_t                immed_queue_count;
-       atomic_t                response_queue_count;
-       spinlock_t              datain_lock;
-       spinlock_t              dataout_timeout_lock;
-       /* spinlock for protecting struct iscsi_cmd->i_state */
-       spinlock_t              istate_lock;
-       /* spinlock for adding within command recovery entries */
-       spinlock_t              error_lock;
-       /* spinlock for adding R2Ts */
-       spinlock_t              r2t_lock;
-       /* DataIN List */
-       struct list_head        datain_list;
-       /* R2T List */
-       struct list_head        cmd_r2t_list;
-       /* Timer for DataOUT */
-       struct timer_list       dataout_timer;
-       /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
-       struct kvec             *iov_data;
-       /* Iovecs for miscellaneous purposes */
-#define ISCSI_MISC_IOVECS                      5
-       struct kvec             iov_misc[ISCSI_MISC_IOVECS];
-       /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
-       struct iscsi_pdu        *pdu_list;
-       /* Current struct iscsi_pdu used for DataPDUInOrder=No */
-       struct iscsi_pdu        *pdu_ptr;
-       /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
-       struct iscsi_seq        *seq_list;
-       /* Current struct iscsi_seq used for DataSequenceInOrder=No */
-       struct iscsi_seq        *seq_ptr;
-       /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
-       struct iscsi_tmr_req    *tmr_req;
-       /* Connection this command is alligient to */
-       struct iscsi_conn       *conn;
-       /* Pointer to connection recovery entry */
-       struct iscsi_conn_recovery *cr;
-       /* Session the command is part of,  used for connection recovery */
-       struct iscsi_session    *sess;
-       /* list_head for connection list */
-       struct list_head        i_conn_node;
-       /* The TCM I/O descriptor that is accessed via container_of() */
-       struct se_cmd           se_cmd;
-       /* Sense buffer that will be mapped into outgoing status */
-#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
-       unsigned char           sense_buffer[ISCSI_SENSE_BUFFER_LEN];
-
-       u32                     padding;
-       u8                      pad_bytes[4];
-
-       struct scatterlist      *first_data_sg;
-       u32                     first_data_sg_off;
-       u32                     kmapped_nents;
-       sense_reason_t          sense_reason;
-}  ____cacheline_aligned;
-
-struct iscsi_tmr_req {
-       bool                    task_reassign:1;
-       u32                     exp_data_sn;
-       struct iscsi_cmd        *ref_cmd;
-       struct iscsi_conn_recovery *conn_recovery;
-       struct se_tmr_req       *se_tmr_req;
-};
-
-struct iscsi_conn {
-       wait_queue_head_t       queues_wq;
-       /* Authentication Successful for this connection */
-       u8                      auth_complete;
-       /* State connection is currently in */
-       u8                      conn_state;
-       u8                      conn_logout_reason;
-       u8                      network_transport;
-       enum iscsi_timer_flags_table nopin_timer_flags;
-       enum iscsi_timer_flags_table nopin_response_timer_flags;
-       /* Used to know what thread encountered a transport failure */
-       u8                      which_thread;
-       /* connection id assigned by the Initiator */
-       u16                     cid;
-       /* Remote TCP Port */
-       u16                     login_port;
-       u16                     local_port;
-       int                     net_size;
-       int                     login_family;
-       u32                     auth_id;
-       u32                     conn_flags;
-       /* Used for iscsi_tx_login_rsp() */
-       itt_t                   login_itt;
-       u32                     exp_statsn;
-       /* Per connection status sequence number */
-       u32                     stat_sn;
-       /* IFMarkInt's Current Value */
-       u32                     if_marker;
-       /* OFMarkInt's Current Value */
-       u32                     of_marker;
-       /* Used for calculating OFMarker offset to next PDU */
-       u32                     of_marker_offset;
-#define IPV6_ADDRESS_SPACE                             48
-       unsigned char           login_ip[IPV6_ADDRESS_SPACE];
-       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
-       int                     conn_usage_count;
-       int                     conn_waiting_on_uc;
-       atomic_t                check_immediate_queue;
-       atomic_t                conn_logout_remove;
-       atomic_t                connection_exit;
-       atomic_t                connection_recovery;
-       atomic_t                connection_reinstatement;
-       atomic_t                connection_wait_rcfr;
-       atomic_t                sleep_on_conn_wait_comp;
-       atomic_t                transport_failed;
-       struct completion       conn_post_wait_comp;
-       struct completion       conn_wait_comp;
-       struct completion       conn_wait_rcfr_comp;
-       struct completion       conn_waiting_on_uc_comp;
-       struct completion       conn_logout_comp;
-       struct completion       tx_half_close_comp;
-       struct completion       rx_half_close_comp;
-       /* socket used by this connection */
-       struct socket           *sock;
-       void                    (*orig_data_ready)(struct sock *);
-       void                    (*orig_state_change)(struct sock *);
-#define LOGIN_FLAGS_READ_ACTIVE                1
-#define LOGIN_FLAGS_CLOSED             2
-#define LOGIN_FLAGS_READY              4
-       unsigned long           login_flags;
-       struct delayed_work     login_work;
-       struct delayed_work     login_cleanup_work;
-       struct iscsi_login      *login;
-       struct timer_list       nopin_timer;
-       struct timer_list       nopin_response_timer;
-       struct timer_list       transport_timer;
-       struct task_struct      *login_kworker;
-       /* Spinlock used for add/deleting cmd's from conn_cmd_list */
-       spinlock_t              cmd_lock;
-       spinlock_t              conn_usage_lock;
-       spinlock_t              immed_queue_lock;
-       spinlock_t              nopin_timer_lock;
-       spinlock_t              response_queue_lock;
-       spinlock_t              state_lock;
-       /* libcrypto RX and TX contexts for crc32c */
-       struct hash_desc        conn_rx_hash;
-       struct hash_desc        conn_tx_hash;
-       /* Used for scheduling TX and RX connection kthreads */
-       cpumask_var_t           conn_cpumask;
-       unsigned int            conn_rx_reset_cpumask:1;
-       unsigned int            conn_tx_reset_cpumask:1;
-       /* list_head of struct iscsi_cmd for this connection */
-       struct list_head        conn_cmd_list;
-       struct list_head        immed_queue_list;
-       struct list_head        response_queue_list;
-       struct iscsi_conn_ops   *conn_ops;
-       struct iscsi_login      *conn_login;
-       struct iscsit_transport *conn_transport;
-       struct iscsi_param_list *param_list;
-       /* Used for per connection auth state machine */
-       void                    *auth_protocol;
-       void                    *context;
-       struct iscsi_login_thread_s *login_thread;
-       struct iscsi_portal_group *tpg;
-       struct iscsi_tpg_np     *tpg_np;
-       /* Pointer to parent session */
-       struct iscsi_session    *sess;
-       /* Pointer to thread_set in use for this conn's threads */
-       struct iscsi_thread_set *thread_set;
-       /* list_head for session connection list */
-       struct list_head        conn_list;
-} ____cacheline_aligned;
-
-struct iscsi_conn_recovery {
-       u16                     cid;
-       u32                     cmd_count;
-       u32                     maxrecvdatasegmentlength;
-       u32                     maxxmitdatasegmentlength;
-       int                     ready_for_reallegiance;
-       struct list_head        conn_recovery_cmd_list;
-       spinlock_t              conn_recovery_cmd_lock;
-       struct timer_list       time2retain_timer;
-       struct iscsi_session    *sess;
-       struct list_head        cr_list;
-}  ____cacheline_aligned;
-
-struct iscsi_session {
-       u8                      initiator_vendor;
-       u8                      isid[6];
-       enum iscsi_timer_flags_table time2retain_timer_flags;
-       u8                      version_active;
-       u16                     cid_called;
-       u16                     conn_recovery_count;
-       u16                     tsih;
-       /* state session is currently in */
-       u32                     session_state;
-       /* session wide counter: initiator assigned task tag */
-       itt_t                   init_task_tag;
-       /* session wide counter: target assigned task tag */
-       u32                     targ_xfer_tag;
-       u32                     cmdsn_window;
-
-       /* protects cmdsn values */
-       struct mutex            cmdsn_mutex;
-       /* session wide counter: expected command sequence number */
-       u32                     exp_cmd_sn;
-       /* session wide counter: maximum allowed command sequence number */
-       u32                     max_cmd_sn;
-       struct list_head        sess_ooo_cmdsn_list;
-
-       /* LIO specific session ID */
-       u32                     sid;
-       char                    auth_type[8];
-       /* unique within the target */
-       int                     session_index;
-       /* Used for session reference counting */
-       int                     session_usage_count;
-       int                     session_waiting_on_uc;
-       atomic_long_t           cmd_pdus;
-       atomic_long_t           rsp_pdus;
-       atomic_long_t           tx_data_octets;
-       atomic_long_t           rx_data_octets;
-       atomic_long_t           conn_digest_errors;
-       atomic_long_t           conn_timeout_errors;
-       u64                     creation_time;
-       /* Number of active connections */
-       atomic_t                nconn;
-       atomic_t                session_continuation;
-       atomic_t                session_fall_back_to_erl0;
-       atomic_t                session_logout;
-       atomic_t                session_reinstatement;
-       atomic_t                session_stop_active;
-       atomic_t                sleep_on_sess_wait_comp;
-       /* connection list */
-       struct list_head        sess_conn_list;
-       struct list_head        cr_active_list;
-       struct list_head        cr_inactive_list;
-       spinlock_t              conn_lock;
-       spinlock_t              cr_a_lock;
-       spinlock_t              cr_i_lock;
-       spinlock_t              session_usage_lock;
-       spinlock_t              ttt_lock;
-       struct completion       async_msg_comp;
-       struct completion       reinstatement_comp;
-       struct completion       session_wait_comp;
-       struct completion       session_waiting_on_uc_comp;
-       struct timer_list       time2retain_timer;
-       struct iscsi_sess_ops   *sess_ops;
-       struct se_session       *se_sess;
-       struct iscsi_portal_group *tpg;
-} ____cacheline_aligned;
-
-struct iscsi_login {
-       u8 auth_complete;
-       u8 checked_for_existing;
-       u8 current_stage;
-       u8 leading_connection;
-       u8 first_request;
-       u8 version_min;
-       u8 version_max;
-       u8 login_complete;
-       u8 login_failed;
-       bool zero_tsih;
-       char isid[6];
-       u32 cmd_sn;
-       itt_t init_task_tag;
-       u32 initial_exp_statsn;
-       u32 rsp_length;
-       u16 cid;
-       u16 tsih;
-       char req[ISCSI_HDR_LEN];
-       char rsp[ISCSI_HDR_LEN];
-       char *req_buf;
-       char *rsp_buf;
-       struct iscsi_conn *conn;
-       struct iscsi_np *np;
-} ____cacheline_aligned;
-
-struct iscsi_node_attrib {
-       u32                     dataout_timeout;
-       u32                     dataout_timeout_retries;
-       u32                     default_erl;
-       u32                     nopin_timeout;
-       u32                     nopin_response_timeout;
-       u32                     random_datain_pdu_offsets;
-       u32                     random_datain_seq_offsets;
-       u32                     random_r2t_offsets;
-       u32                     tmr_cold_reset;
-       u32                     tmr_warm_reset;
-       struct iscsi_node_acl *nacl;
-};
-
-struct se_dev_entry_s;
-
-struct iscsi_node_auth {
-       enum naf_flags_table    naf_flags;
-       int                     authenticate_target;
-       /* Used for iscsit_global->discovery_auth,
-        * set to zero (auth disabled) by default */
-       int                     enforce_discovery_auth;
-#define MAX_USER_LEN                           256
-#define MAX_PASS_LEN                           256
-       char                    userid[MAX_USER_LEN];
-       char                    password[MAX_PASS_LEN];
-       char                    userid_mutual[MAX_USER_LEN];
-       char                    password_mutual[MAX_PASS_LEN];
-};
-
-#include "iscsi_target_stat.h"
-
-struct iscsi_node_stat_grps {
-       struct config_group     iscsi_sess_stats_group;
-       struct config_group     iscsi_conn_stats_group;
-};
-
-struct iscsi_node_acl {
-       struct iscsi_node_attrib node_attrib;
-       struct iscsi_node_auth  node_auth;
-       struct iscsi_node_stat_grps node_stat_grps;
-       struct se_node_acl      se_node_acl;
-};
-
-struct iscsi_tpg_attrib {
-       u32                     authentication;
-       u32                     login_timeout;
-       u32                     netif_timeout;
-       u32                     generate_node_acls;
-       u32                     cache_dynamic_acls;
-       u32                     default_cmdsn_depth;
-       u32                     demo_mode_write_protect;
-       u32                     prod_mode_write_protect;
-       u32                     demo_mode_discovery;
-       u32                     default_erl;
-       u8                      t10_pi;
-       struct iscsi_portal_group *tpg;
-};
-
-struct iscsi_np {
-       int                     np_network_transport;
-       int                     np_ip_proto;
-       int                     np_sock_type;
-       enum np_thread_state_table np_thread_state;
-       bool                    enabled;
-       enum iscsi_timer_flags_table np_login_timer_flags;
-       u32                     np_exports;
-       enum np_flags_table     np_flags;
-       unsigned char           np_ip[IPV6_ADDRESS_SPACE];
-       u16                     np_port;
-       spinlock_t              np_thread_lock;
-       struct completion       np_restart_comp;
-       struct socket           *np_socket;
-       struct __kernel_sockaddr_storage np_sockaddr;
-       struct task_struct      *np_thread;
-       struct timer_list       np_login_timer;
-       void                    *np_context;
-       struct iscsit_transport *np_transport;
-       struct list_head        np_list;
-} ____cacheline_aligned;
-
-struct iscsi_tpg_np {
-       struct iscsi_np         *tpg_np;
-       struct iscsi_portal_group *tpg;
-       struct iscsi_tpg_np     *tpg_np_parent;
-       struct list_head        tpg_np_list;
-       struct list_head        tpg_np_child_list;
-       struct list_head        tpg_np_parent_list;
-       struct se_tpg_np        se_tpg_np;
-       spinlock_t              tpg_np_parent_lock;
-       struct completion       tpg_np_comp;
-       struct kref             tpg_np_kref;
-};
-
-struct iscsi_portal_group {
-       unsigned char           tpg_chap_id;
-       /* TPG State */
-       enum tpg_state_table    tpg_state;
-       /* Target Portal Group Tag */
-       u16                     tpgt;
-       /* Id assigned to target sessions */
-       u16                     ntsih;
-       /* Number of active sessions */
-       u32                     nsessions;
-       /* Number of Network Portals available for this TPG */
-       u32                     num_tpg_nps;
-       /* Per TPG LIO specific session ID. */
-       u32                     sid;
-       /* Spinlock for adding/removing Network Portals */
-       spinlock_t              tpg_np_lock;
-       spinlock_t              tpg_state_lock;
-       struct se_portal_group tpg_se_tpg;
-       struct mutex            tpg_access_lock;
-       struct semaphore        np_login_sem;
-       struct iscsi_tpg_attrib tpg_attrib;
-       struct iscsi_node_auth  tpg_demo_auth;
-       /* Pointer to default list of iSCSI parameters for TPG */
-       struct iscsi_param_list *param_list;
-       struct iscsi_tiqn       *tpg_tiqn;
-       struct list_head        tpg_gnp_list;
-       struct list_head        tpg_list;
-} ____cacheline_aligned;
-
-struct iscsi_wwn_stat_grps {
-       struct config_group     iscsi_stat_group;
-       struct config_group     iscsi_instance_group;
-       struct config_group     iscsi_sess_err_group;
-       struct config_group     iscsi_tgt_attr_group;
-       struct config_group     iscsi_login_stats_group;
-       struct config_group     iscsi_logout_stats_group;
-};
-
-struct iscsi_tiqn {
-#define ISCSI_IQN_LEN                          224
-       unsigned char           tiqn[ISCSI_IQN_LEN];
-       enum tiqn_state_table   tiqn_state;
-       int                     tiqn_access_count;
-       u32                     tiqn_active_tpgs;
-       u32                     tiqn_ntpgs;
-       u32                     tiqn_num_tpg_nps;
-       u32                     tiqn_nsessions;
-       struct list_head        tiqn_list;
-       struct list_head        tiqn_tpg_list;
-       spinlock_t              tiqn_state_lock;
-       spinlock_t              tiqn_tpg_lock;
-       struct se_wwn           tiqn_wwn;
-       struct iscsi_wwn_stat_grps tiqn_stat_grps;
-       int                     tiqn_index;
-       struct iscsi_sess_err_stats  sess_err_stats;
-       struct iscsi_login_stats     login_stats;
-       struct iscsi_logout_stats    logout_stats;
-} ____cacheline_aligned;
-
-struct iscsit_global {
-       /* In core shutdown */
-       u32                     in_shutdown;
-       u32                     active_ts;
-       /* Unique identifier used for the authentication daemon */
-       u32                     auth_id;
-       u32                     inactive_ts;
-       /* Thread Set bitmap count */
-       int                     ts_bitmap_count;
-       /* Thread Set bitmap pointer */
-       unsigned long           *ts_bitmap;
-       /* Used for iSCSI discovery session authentication */
-       struct iscsi_node_acl   discovery_acl;
-       struct iscsi_portal_group       *discovery_tpg;
-};
-
-#endif /* ISCSI_TARGET_CORE_H */
index e93d5a7a3f8168b9330cbd2ff6238f7a588c88f4..fb3b52b124ac3772597a90ccc46cacd1acea825b 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <scsi/iscsi_proto.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_erl1.h"
 #include "iscsi_target_util.h"
index 7087c736daa520fd22306e6cc39000b7c9aa08c0..34c3cd1b05ce8a40d9911da3815bc4a44b8a479c 100644 (file)
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
index a0ae5fc0ad75b5079fc7932f6dacbb19b2c68a64..1c197bad6132be98cd267b6ea666fc24e41e00a9 100644 (file)
@@ -21,7 +21,8 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_transport.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
 #include "iscsi_target_erl0.h"
@@ -939,7 +940,8 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
 
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
-               iscsit_close_connection(conn);
+               if (conn->conn_transport->transport_type == ISCSI_TCP)
+                       iscsit_close_connection(conn);
                return;
        }
 
index cda4d80cfaef999e45e4ac8a6b894513a3a44ad6..2e561deb30a2b6cd53f186b0e388311da35ad439 100644 (file)
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
index 4ca8fd2a70db4c05597f6b4bfbac171ce58ea8c7..e24f1c7c5862d4af2f0ae53efb3e981f153080db 100644 (file)
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target_erl0.h"
index 713c0c1877ab8d16bb999ab6738b058e11c82ba9..153fb66ac1b83693b2a7ea18580ddd4c94f9b5a9 100644 (file)
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_nego.h"
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_erl2.h"
 #include "iscsi_target_login.h"
-#include "iscsi_target_stat.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
index 62a095f36bf2f78b77d2a9b75cea99b9c2f14ecb..8c02fa34716fae5a40dbf8cb09357bba5df2e7bd 100644 (file)
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nego.h"
index 16454a922e2ba9ff2bc532cb0b896997b991a02e..208cca8a363c86de490aed4dcd1103fcd74b4ecc 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <target/target_core_base.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
index 18c29260b4a213d959463b41083abe6555166985..d4f9e96456978eab75e280e123308db09f6e95d2 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <linux/slab.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_parameters.h"
 
index ca41b583f2f6d048927b318d69fd56ae9907c015..e446a09c886b1a2ca1344d87f755806b237fe406 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
index 10339551030767cd64428686bcb66e0a186c530d..5e1349a3b1438ece26d986f31608bd6912393371 100644 (file)
 #include <target/target_core_base.h>
 #include <target/configfs_macros.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #ifndef INITIAL_JIFFIES
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
deleted file mode 100644 (file)
index 3ff76b4..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef ISCSI_TARGET_STAT_H
-#define ISCSI_TARGET_STAT_H
-
-/*
- * For struct iscsi_tiqn->tiqn_wwn default groups
- */
-extern struct config_item_type iscsi_stat_instance_cit;
-extern struct config_item_type iscsi_stat_sess_err_cit;
-extern struct config_item_type iscsi_stat_tgt_attr_cit;
-extern struct config_item_type iscsi_stat_login_cit;
-extern struct config_item_type iscsi_stat_logout_cit;
-
-/*
- * For struct iscsi_session->se_sess default groups
- */
-extern struct config_item_type iscsi_stat_sess_cit;
-
-/* iSCSI session error types */
-#define ISCSI_SESS_ERR_UNKNOWN         0
-#define ISCSI_SESS_ERR_DIGEST          1
-#define ISCSI_SESS_ERR_CXN_TIMEOUT     2
-#define ISCSI_SESS_ERR_PDU_FORMAT      3
-
-/* iSCSI session error stats */
-struct iscsi_sess_err_stats {
-       spinlock_t      lock;
-       u32             digest_errors;
-       u32             cxn_timeout_errors;
-       u32             pdu_format_errors;
-       u32             last_sess_failure_type;
-       char            last_sess_fail_rem_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI login failure types (sub oids) */
-#define ISCSI_LOGIN_FAIL_OTHER         2
-#define ISCSI_LOGIN_FAIL_REDIRECT      3
-#define ISCSI_LOGIN_FAIL_AUTHORIZE     4
-#define ISCSI_LOGIN_FAIL_AUTHENTICATE  5
-#define ISCSI_LOGIN_FAIL_NEGOTIATE     6
-
-/* iSCSI login stats */
-struct iscsi_login_stats {
-       spinlock_t      lock;
-       u32             accepts;
-       u32             other_fails;
-       u32             redirects;
-       u32             authorize_fails;
-       u32             authenticate_fails;
-       u32             negotiate_fails;        /* used for notifications */
-       u64             last_fail_time;         /* time stamp (jiffies) */
-       u32             last_fail_type;
-       int             last_intr_fail_ip_family;
-       unsigned char   last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
-       char            last_intr_fail_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI logout stats */
-struct iscsi_logout_stats {
-       spinlock_t      lock;
-       u32             normal_logouts;
-       u32             abnormal_logouts;
-} ____cacheline_aligned;
-
-#endif   /*** ISCSI_TARGET_STAT_H ***/
index 78404b1cc0bf311eb80b738d68eacca99ea446d0..b0224a77e26d3aff2549b71dd4b8c6a816213557 100644 (file)
@@ -23,7 +23,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
index 9053a3c0c6e51675faf45bfbbfd990d48752f67d..bdd127c0e3aed1c718f0bd8b7d656a2c3aefcb71 100644 (file)
@@ -20,7 +20,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nodeattrib.h"
index 601e9cc61e98e754f9a4927f8be3f0ae6d2fc3c3..26aa509964737cbe570f0b87492bc30b8ec4111b 100644 (file)
 #include <linux/list.h>
 #include <linux/bitmap.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target.h"
 
-static LIST_HEAD(active_ts_list);
 static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(active_ts_lock);
 static DEFINE_SPINLOCK(inactive_ts_lock);
 static DEFINE_SPINLOCK(ts_bitmap_lock);
 
-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
-{
-       spin_lock(&active_ts_lock);
-       list_add_tail(&ts->ts_list, &active_ts_list);
-       iscsit_global->active_ts++;
-       spin_unlock(&active_ts_lock);
-}
-
 static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
 {
+       if (!list_empty(&ts->ts_list)) {
+               WARN_ON(1);
+               return;
+       }
        spin_lock(&inactive_ts_lock);
        list_add_tail(&ts->ts_list, &inactive_ts_list);
        iscsit_global->inactive_ts++;
        spin_unlock(&inactive_ts_lock);
 }
 
-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
-{
-       spin_lock(&active_ts_lock);
-       list_del(&ts->ts_list);
-       iscsit_global->active_ts--;
-       spin_unlock(&active_ts_lock);
-}
-
 static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 {
        struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 
        ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
 
-       list_del(&ts->ts_list);
+       list_del_init(&ts->ts_list);
        iscsit_global->inactive_ts--;
        spin_unlock(&inactive_ts_lock);
 
@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
 
 void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
 {
-       iscsi_add_ts_to_active_list(ts);
-
        spin_lock_bh(&ts->ts_state_lock);
        conn->thread_set = ts;
        ts->conn = conn;
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
 
        if (ts->delay_inactive && (--ts->thread_count == 0)) {
                spin_unlock_bh(&ts->ts_state_lock);
-               iscsi_del_ts_from_active_list(ts);
 
                if (!iscsit_global->in_shutdown)
                        iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
 
        if (ts->delay_inactive && (--ts->thread_count == 0)) {
                spin_unlock_bh(&ts->ts_state_lock);
-               iscsi_del_ts_from_active_list(ts);
 
                if (!iscsit_global->in_shutdown)
                        iscsi_deallocate_extra_thread_sets();
index bcd88ec99793ba554496369d8853ee391cef8c11..390df8ed72b26738c53073180063c42847850585 100644 (file)
@@ -25,7 +25,7 @@
 #include <target/target_core_configfs.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
                        init_task_tag, conn->cid);
        return NULL;
 }
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
 
 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
        struct iscsi_conn *conn,
@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
        state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
                                ISTATE_SEND_NOPIN_NO_RESPONSE;
        cmd->init_task_tag = RESERVED_ITT;
-       spin_lock_bh(&conn->sess->ttt_lock);
-       cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
-                       0xFFFFFFFF;
-       if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
-               cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-       spin_unlock_bh(&conn->sess->ttt_lock);
-
+       cmd->targ_xfer_tag = (want_response) ?
+                            session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
index a68508c4fec862b325c6a5015b9493871f166e08..1ab754a671ff301977a90c90246ae5a223352051 100644 (file)
@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
 extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                               unsigned char * ,__be32 cmdsn);
 extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
                        itt_t, u32);
 extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
index d836de200a03bcf24be54004df89c7d6d5039030..44620fb6bd45e96eae5b13dac4b9582a0d86c85e 100644 (file)
@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd)
                target_complete_cmd(cmd, SAM_STAT_GOOD);
                return 0;
        }
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with FILEIO"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index 78346b850968ed8da28d88f35cf6a3ac15512a1b..d4a4b0fb444a12907fac835f6a3db03600b722fa 100644 (file)
@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd)
        sector_t block_lba = cmd->t_task_lba;
        sector_t sectors = sbc_get_write_same_sectors(cmd);
 
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with IBLOCK"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index 283cf786ef98be3d0594e847cc9749a072986b80..2de6fb8cee8d83b8338472ee2a51dd02c078e833 100644 (file)
@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
                }
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-                       pr_err("Unable to update renaming"
-                               " APTPL metadata\n");
+                       pr_err("Unable to update renaming APTPL metadata,"
+                              " reallocating larger buffer\n");
                        ret = -EMSGSIZE;
                        goto out;
                }
@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
                        lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-                       pr_err("Unable to update renaming"
-                               " APTPL metadata\n");
+                       pr_err("Unable to update renaming APTPL metadata,"
+                              " reallocating larger buffer\n");
                        ret = -EMSGSIZE;
                        goto out;
                }
@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
 static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
 {
        unsigned char *buf;
-       int rc;
+       int rc, len = PR_APTPL_BUF_LEN;
 
        if (!aptpl) {
                char *null_buf = "No Registrations or Reservations\n";
@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
 
                return 0;
        }
-
-       buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+retry:
+       buf = vzalloc(len);
        if (!buf)
                return TCM_OUT_OF_RESOURCES;
 
-       rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+       rc = core_scsi3_update_aptpl_buf(dev, buf, len);
        if (rc < 0) {
-               kfree(buf);
-               return TCM_OUT_OF_RESOURCES;
+               vfree(buf);
+               len *= 2;
+               goto retry;
        }
 
        rc = __core_scsi3_write_aptpl_to_file(dev, buf);
        if (rc != 0) {
                pr_err("SPC-3 PR: Could not update APTPL\n");
-               kfree(buf);
+               vfree(buf);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        dev->t10_pr.pr_aptpl_active = 1;
-       kfree(buf);
+       vfree(buf);
        pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
        return 0;
 }
index cd4bed7b27579b14a0f6e517eed23dacb6c1fe02..9a2f9d3a6e70514ad9351ab6e144709e8b95f91d 100644 (file)
@@ -36,6 +36,9 @@
 #include "target_core_ua.h"
 #include "target_core_alua.h"
 
+static sense_reason_t
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
 {
@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 static sense_reason_t
 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 {
+       struct se_device *dev = cmd->se_dev;
+       sector_t end_lba = dev->transport->get_blocks(dev) + 1;
        unsigned int sectors = sbc_get_write_same_sectors(cmd);
+       sense_reason_t ret;
 
        if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
                pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                        sectors, cmd->se_dev->dev_attrib.max_write_same_len);
                return TCM_INVALID_CDB_FIELD;
        }
+       /*
+        * Sanity check for LBA wrap and request past end of device.
+        */
+       if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+           ((cmd->t_task_lba + sectors) > end_lba)) {
+               pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+                      (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+               return TCM_ADDRESS_OUT_OF_RANGE;
+       }
+
        /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
        if (flags[0] & 0x10) {
                pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                if (!ops->execute_write_same_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+               if (!dev->dev_attrib.emulate_tpws) {
+                       pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
+                              " has emulate_tpws disabled\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
                cmd->execute_cmd = ops->execute_write_same_unmap;
                return 0;
        }
        if (!ops->execute_write_same)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+       ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+       if (ret)
+               return ret;
+
        cmd->execute_cmd = ops->execute_write_same;
        return 0;
 }
@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
        return 0;
 }
 
-static bool
+static sense_reason_t
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
               u32 sectors, bool is_write)
 {
        u8 protect = cdb[1] >> 5;
 
-       if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
-               return true;
+       if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
+               if (protect && !dev->dev_attrib.pi_prot_type) {
+                       pr_err("CDB contains protect bit, but device does not"
+                              " advertise PROTECT=1 feature bit\n");
+                       return TCM_INVALID_CDB_FIELD;
+               }
+               if (cmd->prot_pto)
+                       return TCM_NO_SENSE;
+       }
 
        switch (dev->dev_attrib.pi_prot_type) {
        case TARGET_DIF_TYPE3_PROT:
@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                break;
        case TARGET_DIF_TYPE2_PROT:
                if (protect)
-                       return false;
+                       return TCM_INVALID_CDB_FIELD;
 
                cmd->reftag_seed = cmd->t_task_lba;
                break;
@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                break;
        case TARGET_DIF_TYPE0_PROT:
        default:
-               return true;
+               return TCM_NO_SENSE;
        }
 
        if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
                                   is_write, cmd))
-               return false;
+               return TCM_INVALID_CDB_FIELD;
 
        cmd->prot_type = dev->dev_attrib.pi_prot_type;
        cmd->prot_length = dev->prot_length * sectors;
@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
                 cmd->prot_op, cmd->prot_checks);
 
-       return true;
+       return TCM_NO_SENSE;
+}
+
+static int
+sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+       if (cdb[1] & 0x10) {
+               if (!dev->dev_attrib.emulate_dpo) {
+                       pr_err("Got CDB: 0x%02x with DPO bit set, but device"
+                              " does not advertise support for DPO\n", cdb[0]);
+                       return -EINVAL;
+               }
+       }
+       if (cdb[1] & 0x8) {
+               if (!dev->dev_attrib.emulate_fua_write ||
+                   !dev->dev_attrib.emulate_write_cache) {
+                       pr_err("Got CDB: 0x%02x with FUA bit set, but device"
+                              " does not advertise support for FUA write\n",
+                              cdb[0]);
+                       return -EINVAL;
+               }
+               cmd->se_cmd_flags |= SCF_FUA;
+       }
+       return 0;
 }
 
 sense_reason_t
@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return TCM_INVALID_CDB_FIELD;
                sectors = transport_get_sectors_10(cdb);
 
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
                cmd->t_task_lba = transport_lba_32(cdb);
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 
@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                cmd->transport_complete_callback = &xdreadwrite_callback;
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
        {
@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                case XDWRITEREAD_32:
                        sectors = transport_get_sectors_32(cdb);
 
+                       if (sbc_check_dpofua(dev, cmd, cdb))
+                               return TCM_INVALID_CDB_FIELD;
                        /*
                         * Use WRITE_32 and READ_32 opcodes for the emulated
                         * XDWRITE_READ_32 logic.
@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        cmd->execute_rw = ops->execute_rw;
                        cmd->execute_cmd = sbc_execute_rw;
                        cmd->transport_complete_callback = &xdreadwrite_callback;
-                       if (cdb[1] & 0x8)
-                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb);
@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (!ops->execute_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+               if (!dev->dev_attrib.emulate_tpu) {
+                       pr_err("Got UNMAP, but backend device has"
+                              " emulate_tpu disabled\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
                size = get_unaligned_be16(&cdb[7]);
                cmd->execute_cmd = ops->execute_unmap;
                break;
@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                unsigned long long end_lba;
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
-               if (cmd->t_task_lba + sectors > end_lba) {
+               if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+                   ((cmd->t_task_lba + sectors) > end_lba)) {
                        pr_err("cmd exceeds last lba %llu "
                                "(lba %llu, sectors %u)\n",
                                end_lba, cmd->t_task_lba, sectors);
index 4c71657da56ab3cdc96b5c1f8d784722f10e2c1d..460e9310947399661ce1fbd6ab4ed3e86b4a47e1 100644 (file)
@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
         * support the use of the WRITE SAME (16) command to unmap LBAs.
         */
        if (dev->dev_attrib.emulate_tpws != 0)
-               buf[5] |= 0x40;
+               buf[5] |= 0x40 | 0x20;
 
        return 0;
 }
index 25d244cbbe8fda719658e35be8f2081ee0f5989f..031018e7a65bd72a4ec8ba3a452e16f455e5f3a1 100644 (file)
@@ -262,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
        result = acpi_parse_art(priv->adev->handle, &priv->art_count,
                                &priv->arts, true);
        if (result)
-               goto free_priv;
-
+               dev_dbg(&pdev->dev, "_ART table parsing error\n");
 
        result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
                                &priv->trts, true);
        if (result)
-               goto free_art;
+               dev_dbg(&pdev->dev, "_TRT table parsing error\n");
 
        platform_set_drvdata(pdev, priv);
 
@@ -281,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
                                                &int3400_thermal_params, 0, 0);
        if (IS_ERR(priv->thermal)) {
                result = PTR_ERR(priv->thermal);
-               goto free_trt;
+               goto free_art_trt;
        }
 
        priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -295,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
 
 free_zone:
        thermal_zone_device_unregister(priv->thermal);
-free_trt:
+free_art_trt:
        kfree(priv->trts);
-free_art:
        kfree(priv->arts);
 free_priv:
        kfree(priv);
index f88b0887702568578a5d288f925ab1c5b08e7a97..1e25133d35e2cbe8e7476a382bd27433a5801b0c 100644 (file)
@@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
                                trip_cnt, GFP_KERNEL);
                if (!int34x_thermal_zone->aux_trips) {
                        ret = -ENOMEM;
-                       goto free_mem;
+                       goto err_trip_alloc;
                }
                trip_mask = BIT(trip_cnt) - 1;
                int34x_thermal_zone->aux_trip_nr = trip_cnt;
@@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
                                                0, 0);
        if (IS_ERR(int34x_thermal_zone->zone)) {
                ret = PTR_ERR(int34x_thermal_zone->zone);
-               goto free_lpat;
+               goto err_thermal_zone;
        }
 
        return int34x_thermal_zone;
 
-free_lpat:
+err_thermal_zone:
        acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
-free_mem:
+       kfree(int34x_thermal_zone->aux_trips);
+err_trip_alloc:
        kfree(int34x_thermal_zone);
        return ERR_PTR(ret);
 }
@@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
 {
        thermal_zone_device_unregister(int34x_thermal_zone->zone);
        acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+       kfree(int34x_thermal_zone->aux_trips);
        kfree(int34x_thermal_zone);
 }
 EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
index 6ceebd659dd400423c0640b1d0911da36b441b74..12623bc02f46679674d9bd1c3f1574fc21b57c37 100644 (file)
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x45},
        { X86_VENDOR_INTEL, 6, 0x46},
        { X86_VENDOR_INTEL, 6, 0x4c},
+       { X86_VENDOR_INTEL, 6, 0x4d},
        { X86_VENDOR_INTEL, 6, 0x56},
        {}
 };
index 2580a4872f90febeb5af00136e16054bb59e4903..fe4e767018c4cf73afa3c53852b6d48191e2a81e 100644 (file)
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (irq) {
-               int ret;
-
                /*
                 * platform has IRQ support.
                 * Then, driver uses common registers
-                */
-
-               ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
-                                      dev_name(dev), common);
-               if (ret) {
-                       dev_err(dev, "irq request failed\n ");
-                       return ret;
-               }
-
-               /*
                 * rcar_has_irq_support() will be enabled
                 */
                res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        }
 
        /* enable temperature comparation */
-       if (irq)
+       if (irq) {
+               ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+                                      dev_name(dev), common);
+               if (ret) {
+                       dev_err(dev, "irq request failed\n ");
+                       goto error_unregister;
+               }
+
                rcar_thermal_common_write(common, ENR, enr_bits);
+       }
 
        platform_set_drvdata(pdev, common);
 
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
 error_unregister:
        rcar_thermal_for_each_priv(priv, common) {
-               thermal_zone_device_unregister(priv->zone);
                if (rcar_has_irq_support(priv))
                        rcar_thermal_irq_disable(priv);
+               thermal_zone_device_unregister(priv->zone);
        }
 
        pm_runtime_put(dev);
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev)
        struct rcar_thermal_priv *priv;
 
        rcar_thermal_for_each_priv(priv, common) {
-               thermal_zone_device_unregister(priv->zone);
                if (rcar_has_irq_support(priv))
                        rcar_thermal_irq_disable(priv);
+               thermal_zone_device_unregister(priv->zone);
        }
 
        pm_runtime_put(dev);
index fbeedc072cc2eb1ec8c84003d90997fdceabe989..1d30b0975651547bdc1464befd517eb450957fc0 100644 (file)
@@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
 
        if (on) {
                con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+               con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
                interrupt_en =
                        (of_thermal_is_trip_valid(tz, 7)
                        << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
@@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
                        interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
        } else {
                con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+               con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
                interrupt_en = 0; /* Disable all interrupts */
        }
-       con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
 
        writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
        writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
@@ -716,7 +717,7 @@ static int exynos_get_temp(void *p, long *temp)
 {
        struct exynos_tmu_data *data = p;
 
-       if (!data)
+       if (!data || !data->tmu_read)
                return -EINVAL;
 
        mutex_lock(&data->lock);
@@ -918,34 +919,16 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
 }
 
 static const struct of_device_id exynos_tmu_match[] = {
-       {
-               .compatible = "samsung,exynos3250-tmu",
-       },
-       {
-               .compatible = "samsung,exynos4210-tmu",
-       },
-       {
-               .compatible = "samsung,exynos4412-tmu",
-       },
-       {
-               .compatible = "samsung,exynos5250-tmu",
-       },
-       {
-               .compatible = "samsung,exynos5260-tmu",
-       },
-       {
-               .compatible = "samsung,exynos5420-tmu",
-       },
-       {
-               .compatible = "samsung,exynos5420-tmu-ext-triminfo",
-       },
-       {
-               .compatible = "samsung,exynos5440-tmu",
-       },
-       {
-               .compatible = "samsung,exynos7-tmu",
-       },
-       {},
+       { .compatible = "samsung,exynos3250-tmu", },
+       { .compatible = "samsung,exynos4210-tmu", },
+       { .compatible = "samsung,exynos4412-tmu", },
+       { .compatible = "samsung,exynos5250-tmu", },
+       { .compatible = "samsung,exynos5260-tmu", },
+       { .compatible = "samsung,exynos5420-tmu", },
+       { .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
+       { .compatible = "samsung,exynos5440-tmu", },
+       { .compatible = "samsung,exynos7-tmu", },
+       { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
 
index 48491d1a81d650f1d10812c1fb8d5e05ff99cbe1..174d3bcf8bd7a16a161cdf94c53e8d14efaa55a6 100644 (file)
@@ -899,6 +899,22 @@ thermal_cooling_device_trip_point_show(struct device *dev,
                return sprintf(buf, "%d\n", instance->trip);
 }
 
+static struct attribute *cooling_device_attrs[] = {
+       &dev_attr_cdev_type.attr,
+       &dev_attr_max_state.attr,
+       &dev_attr_cur_state.attr,
+       NULL,
+};
+
+static const struct attribute_group cooling_device_attr_group = {
+       .attrs = cooling_device_attrs,
+};
+
+static const struct attribute_group *cooling_device_attr_groups[] = {
+       &cooling_device_attr_group,
+       NULL,
+};
+
 /* Device management */
 
 /**
@@ -1130,6 +1146,7 @@ __thermal_cooling_device_register(struct device_node *np,
        cdev->ops = ops;
        cdev->updated = false;
        cdev->device.class = &thermal_class;
+       cdev->device.groups = cooling_device_attr_groups;
        cdev->devdata = devdata;
        dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
        result = device_register(&cdev->device);
@@ -1139,21 +1156,6 @@ __thermal_cooling_device_register(struct device_node *np,
                return ERR_PTR(result);
        }
 
-       /* sys I/F */
-       if (type) {
-               result = device_create_file(&cdev->device, &dev_attr_cdev_type);
-               if (result)
-                       goto unregister;
-       }
-
-       result = device_create_file(&cdev->device, &dev_attr_max_state);
-       if (result)
-               goto unregister;
-
-       result = device_create_file(&cdev->device, &dev_attr_cur_state);
-       if (result)
-               goto unregister;
-
        /* Add 'this' new cdev to the global cdev list */
        mutex_lock(&thermal_list_lock);
        list_add(&cdev->node, &thermal_cdev_list);
@@ -1163,11 +1165,6 @@ __thermal_cooling_device_register(struct device_node *np,
        bind_cdev(cdev);
 
        return cdev;
-
-unregister:
-       release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
-       device_unregister(&cdev->device);
-       return ERR_PTR(result);
 }
 
 /**
index 634b6ce0e63ace5757c06513b45ac563dcfab593..62a5d449c38805019db7d554e6c0f7f43d215341 100644 (file)
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
 {
        int i;
index 3fb054a10f6a0fde450e29a98ee4cdf90e18f6c7..a38c1756442aa2611e0e207466aabba4e72b94cc 100644 (file)
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
 
        data = ti_bandgap_get_sensor_data(bgp, id);
 
-       if (data && data->cool_dev)
+       if (data)
                cpufreq_cooling_unregister(data->cool_dev);
 
        return 0;
index d7b198c400c755487309cf6e6d9b615c9cfe4a69..ce24182f851479ab61372d1091840696cfe6857f 100644 (file)
@@ -210,18 +210,6 @@ bfin_jc_chars_in_buffer(struct tty_struct *tty)
        return circ_cnt(&bfin_jc_write_buf);
 }
 
-static void
-bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
-{
-       unsigned long expire = jiffies + timeout;
-       while (!circ_empty(&bfin_jc_write_buf)) {
-               if (signal_pending(current))
-                       break;
-               if (time_after(jiffies, expire))
-                       break;
-       }
-}
-
 static const struct tty_operations bfin_jc_ops = {
        .open            = bfin_jc_open,
        .close           = bfin_jc_close,
@@ -230,7 +218,6 @@ static const struct tty_operations bfin_jc_ops = {
        .flush_chars     = bfin_jc_flush_chars,
        .write_room      = bfin_jc_write_room,
        .chars_in_buffer = bfin_jc_chars_in_buffer,
-       .wait_until_sent = bfin_jc_wait_until_sent,
 };
 
 static int __init bfin_jc_init(void)
index e3b9570a1eff8aa467ec7dabbe93369c47940e87..deae122c9c4bac6a2bb28d6bbc191bfc18d16747 100644 (file)
@@ -2138,8 +2138,8 @@ int serial8250_do_startup(struct uart_port *port)
        /*
         * Clear the interrupt registers.
         */
-       if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
-               serial_port_in(port, UART_RX);
+       serial_port_in(port, UART_LSR);
+       serial_port_in(port, UART_RX);
        serial_port_in(port, UART_IIR);
        serial_port_in(port, UART_MSR);
 
@@ -2300,8 +2300,8 @@ dont_test_tx_en:
         * saved flags to avoid getting false values from polling
         * routines or the previous session.
         */
-       if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
-               serial_port_in(port, UART_RX);
+       serial_port_in(port, UART_LSR);
+       serial_port_in(port, UART_RX);
        serial_port_in(port, UART_IIR);
        serial_port_in(port, UART_MSR);
        up->lsr_saved_flags = 0;
@@ -2394,8 +2394,7 @@ void serial8250_do_shutdown(struct uart_port *port)
         * Read data port to reset things, and then unlink from
         * the IRQ chain.
         */
-       if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
-               serial_port_in(port, UART_RX);
+       serial_port_in(port, UART_RX);
        serial8250_rpm_put(up);
 
        del_timer_sync(&up->timer);
index e60116235836498e5b20ce56219b7d8c949acd25..2ab229ddee389ee20fa7923f93e43ddda944bbf0 100644 (file)
@@ -59,6 +59,8 @@ struct dw8250_data {
        u8                      usr_reg;
        int                     last_mcr;
        int                     line;
+       int                     msr_mask_on;
+       int                     msr_mask_off;
        struct clk              *clk;
        struct clk              *pclk;
        struct reset_control    *rst;
@@ -81,6 +83,12 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
                value &= ~UART_MSR_DCTS;
        }
 
+       /* Override any modem control signals if needed */
+       if (offset == UART_MSR) {
+               value |= d->msr_mask_on;
+               value &= ~d->msr_mask_off;
+       }
+
        return value;
 }
 
@@ -334,6 +342,30 @@ static int dw8250_probe_of(struct uart_port *p,
        if (id >= 0)
                p->line = id;
 
+       if (of_property_read_bool(np, "dcd-override")) {
+               /* Always report DCD as active */
+               data->msr_mask_on |= UART_MSR_DCD;
+               data->msr_mask_off |= UART_MSR_DDCD;
+       }
+
+       if (of_property_read_bool(np, "dsr-override")) {
+               /* Always report DSR as active */
+               data->msr_mask_on |= UART_MSR_DSR;
+               data->msr_mask_off |= UART_MSR_DDSR;
+       }
+
+       if (of_property_read_bool(np, "cts-override")) {
+               /* Always report DSR as active */
+               data->msr_mask_on |= UART_MSR_DSR;
+               data->msr_mask_off |= UART_MSR_DDSR;
+       }
+
+       if (of_property_read_bool(np, "ri-override")) {
+               /* Always report Ring indicator as inactive */
+               data->msr_mask_off |= UART_MSR_RI;
+               data->msr_mask_off |= UART_MSR_TERI;
+       }
+
        /* clock got configured through clk api, all done */
        if (p->uartclk)
                return 0;
index daf2c82984e95d94d806b175bc59944ac1cd9713..892eb32cdef4bd98586d02e2e432c38aea512797 100644 (file)
@@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
               "Please send the output of lspci -vv, this\n"
               "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
               "manufacturer and name of serial board or\n"
-              "modem board to rmk+serial@arm.linux.org.uk.\n",
+              "modem board to <linux-serial@vger.kernel.org>.\n",
               pci_name(dev), str, dev->vendor, dev->device,
               dev->subsystem_vendor, dev->subsystem_device);
 }
@@ -1987,13 +1987,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = byt_serial_setup,
        },
-       {
-               .vendor         = PCI_VENDOR_ID_INTEL,
-               .device         = PCI_DEVICE_ID_INTEL_QRK_UART,
-               .subvendor      = PCI_ANY_ID,
-               .subdevice      = PCI_ANY_ID,
-               .setup          = pci_default_setup,
-       },
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = PCI_DEVICE_ID_INTEL_BSW_UART1,
@@ -2199,13 +2192,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
        /*
         * PLX
         */
-       {
-               .vendor         = PCI_VENDOR_ID_PLX,
-               .device         = PCI_DEVICE_ID_PLX_9030,
-               .subvendor      = PCI_SUBVENDOR_ID_PERLE,
-               .subdevice      = PCI_ANY_ID,
-               .setup          = pci_default_setup,
-       },
        {
                .vendor         = PCI_VENDOR_ID_PLX,
                .device         = PCI_DEVICE_ID_PLX_9050,
@@ -5415,10 +5401,6 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
-       {       PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
-               PCI_ANY_ID, PCI_ANY_ID,
-               0, 0, pbn_b0_bt_2_115200 },
-
        {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_wch384_4 },
index 846552bff67d6f005c3966e80368134dcd9ab27f..4e959c43f6804d12f8677d916e1f050bdc740457 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/err.h>
 #include <linux/irq.h>
+#include <linux/suspend.h>
 
 #include <asm/io.h>
 #include <asm/ioctls.h>
@@ -173,6 +174,12 @@ struct atmel_uart_port {
        bool                    ms_irq_enabled;
        bool                    is_usart;       /* usart or uart */
        struct timer_list       uart_timer;     /* uart timer */
+
+       bool                    suspended;
+       unsigned int            pending;
+       unsigned int            pending_status;
+       spinlock_t              lock_suspended;
+
        int (*prepare_rx)(struct uart_port *port);
        int (*prepare_tx)(struct uart_port *port);
        void (*schedule_rx)(struct uart_port *port);
@@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
 {
        struct uart_port *port = dev_id;
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-       unsigned int status, pending, pass_counter = 0;
+       unsigned int status, pending, mask, pass_counter = 0;
        bool gpio_handled = false;
 
+       spin_lock(&atmel_port->lock_suspended);
+
        do {
                status = atmel_get_lines_status(port);
-               pending = status & UART_GET_IMR(port);
+               mask = UART_GET_IMR(port);
+               pending = status & mask;
                if (!gpio_handled) {
                        /*
                         * Dealing with GPIO interrupt
@@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
                if (!pending)
                        break;
 
+               if (atmel_port->suspended) {
+                       atmel_port->pending |= pending;
+                       atmel_port->pending_status = status;
+                       UART_PUT_IDR(port, mask);
+                       pm_system_wakeup();
+                       break;
+               }
+
                atmel_handle_receive(port, pending);
                atmel_handle_status(port, pending, status);
                atmel_handle_transmit(port, pending);
        } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
 
+       spin_unlock(&atmel_port->lock_suspended);
+
        return pass_counter ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port)
        /*
         * Allocate the IRQ
         */
-       retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
+       retval = request_irq(port->irq, atmel_interrupt,
+                       IRQF_SHARED | IRQF_COND_SUSPEND,
                        tty ? tty->name : "atmel_serial", port);
        if (retval) {
                dev_err(port->dev, "atmel_startup - Can't get irq\n");
@@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
 
        /* we can not wake up if we're running on slow clock */
        atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
-       if (atmel_serial_clk_will_stop())
+       if (atmel_serial_clk_will_stop()) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+               atmel_port->suspended = true;
+               spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
                device_set_wakeup_enable(&pdev->dev, 0);
+       }
 
        uart_suspend_port(&atmel_uart, port);
 
@@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+       if (atmel_port->pending) {
+               atmel_handle_receive(port, atmel_port->pending);
+               atmel_handle_status(port, atmel_port->pending,
+                                   atmel_port->pending_status);
+               atmel_handle_transmit(port, atmel_port->pending);
+               atmel_port->pending = 0;
+       }
+       atmel_port->suspended = false;
+       spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
 
        uart_resume_port(&atmel_uart, port);
        device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
@@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
        port->backup_imr = 0;
        port->uart.line = ret;
 
+       spin_lock_init(&port->lock_suspended);
+
        ret = atmel_init_gpios(port, &pdev->dev);
        if (ret < 0)
                dev_err(&pdev->dev, "%s",
index 7ff61e24a195cbca54d60aaefd65bbdf3d0af187..33fb94f7896773c53141ec283f15a3a18be96f1a 100644 (file)
@@ -133,10 +133,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
        if (of_find_property(np, "no-loopback-test", NULL))
                port->flags |= UPF_SKIP_TEST;
 
-       ret = of_alias_get_id(np, "serial");
-       if (ret >= 0)
-               port->line = ret;
-
        port->dev = &ofdev->dev;
 
        switch (type) {
index 594b63331ef40e42fa75a87cde1c139a46014398..bca975f5093b73fd5ebf3e0f4ec8c42e97dfea35 100644 (file)
@@ -293,8 +293,10 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
 
        ims = serial_in(port, SPRD_IMSR);
 
-       if (!ims)
+       if (!ims) {
+               spin_unlock(&port->lock);
                return IRQ_NONE;
+       }
 
        serial_out(port, SPRD_ICLR, ~0);
 
index 51f066aa375e64789e03b9a527679e3a0a7e1c8d..2bb4dfc028734079f29a89bfb779bfdc7e0079eb 100644 (file)
@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty);
 /* We limit tty time update visibility to every 8 seconds or so. */
 static void tty_update_time(struct timespec *time)
 {
-       unsigned long sec = get_seconds() & ~7;
-       if ((long)(sec - time->tv_sec) > 0)
+       unsigned long sec = get_seconds();
+       if (abs(sec - time->tv_sec) & ~7)
                time->tv_sec = sec;
 }
 
index a5cf253b2544f001a775f93a7905e63475f7eb7e..632fc815206169281af9aa13f6ca345eb846eabe 100644 (file)
@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
 #endif
        if (!timeout)
                timeout = MAX_SCHEDULE_TIMEOUT;
-       if (wait_event_interruptible_timeout(tty->write_wait,
-                       !tty_chars_in_buffer(tty), timeout) >= 0) {
-               if (tty->ops->wait_until_sent)
-                       tty->ops->wait_until_sent(tty, timeout);
-       }
+
+       timeout = wait_event_interruptible_timeout(tty->write_wait,
+                       !tty_chars_in_buffer(tty), timeout);
+       if (timeout <= 0)
+               return;
+
+       if (timeout == MAX_SCHEDULE_TIMEOUT)
+               timeout = 0;
+
+       if (tty->ops->wait_until_sent)
+               tty->ops->wait_until_sent(tty, timeout);
 }
 EXPORT_SYMBOL(tty_wait_until_sent);
 
index e78720b59d67e66adebff9594475b124341b0608..683617714e7cf852fdf3fc2d4ec8590ab6d1a114 100644 (file)
@@ -1650,6 +1650,8 @@ static int acm_reset_resume(struct usb_interface *intf)
 
 static const struct usb_device_id acm_ids[] = {
        /* quirky and broken devices */
+       { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
+       .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
        { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
        .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
        { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
index 66abdbcfbfa55c08c4989d5ce4fe16b087486a79..11635537c052cdc5d4e4c78d615a09aafc4c5e83 100644 (file)
@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
        as->status = urb->status;
        signr = as->signr;
        if (signr) {
+               memset(&sinfo, 0, sizeof(sinfo));
                sinfo.si_signo = as->signr;
                sinfo.si_errno = as->status;
                sinfo.si_code = SI_ASYNCIO;
@@ -2382,6 +2383,7 @@ static void usbdev_remove(struct usb_device *udev)
                wake_up_all(&ps->wait);
                list_del_init(&ps->list);
                if (ps->discsignr) {
+                       memset(&sinfo, 0, sizeof(sinfo));
                        sinfo.si_signo = ps->discsignr;
                        sinfo.si_errno = EPIPE;
                        sinfo.si_code = SI_ASYNCIO;
index 172d64e585b61b014f3333ef200d2cb2ba48958d..52e0c4e5e48efa8cc1ccecf0ee30364ff1419634 100644 (file)
@@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
                                                omap->irq0_offset, value);
 }
 
+static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
+                                               omap->irqmisc_offset, value);
+}
+
+static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
+{
+       dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
+                                               omap->irq0_offset, value);
+}
+
 static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
        enum omap_dwc3_vbus_id_status status)
 {
@@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
 
 static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
 {
+       u32                     reg;
+
        /* disable all IRQs */
-       dwc3_omap_write_irqmisc_set(omap, 0x00);
-       dwc3_omap_write_irq0_set(omap, 0x00);
+       reg = USBOTGSS_IRQO_COREIRQ_ST;
+       dwc3_omap_write_irq0_clr(omap, reg);
+
+       reg = (USBOTGSS_IRQMISC_OEVT |
+                       USBOTGSS_IRQMISC_DRVVBUS_RISE |
+                       USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_IDPULLUP_RISE |
+                       USBOTGSS_IRQMISC_DRVVBUS_FALL |
+                       USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+       dwc3_omap_write_irqmisc_clr(omap, reg);
 }
 
 static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
index 75648145dc1b686a89cae9f8d4de451b71ef376a..c42765b3a060bc6b28b0f7a55f0231489afe651c 100644 (file)
@@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
        if (desc->opts_mutex)
                mutex_lock(desc->opts_mutex);
        memcpy(desc->ext_compat_id, page, l);
-       desc->ext_compat_id[l] = '\0';
 
        if (desc->opts_mutex)
                mutex_unlock(desc->opts_mutex);
@@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
        if (desc->opts_mutex)
                mutex_lock(desc->opts_mutex);
        memcpy(desc->ext_compat_id + 8, page, l);
-       desc->ext_compat_id[l + 8] = '\0';
 
        if (desc->opts_mutex)
                mutex_unlock(desc->opts_mutex);
index af98b096af2fde060aea8978005e8ea620762566..175c9956cbe3a36949526029103d38b4c97225c3 100644 (file)
@@ -144,10 +144,9 @@ struct ffs_io_data {
        bool read;
 
        struct kiocb *kiocb;
-       const struct iovec *iovec;
-       unsigned long nr_segs;
-       char __user *buf;
-       size_t len;
+       struct iov_iter data;
+       const void *to_free;
+       char *buf;
 
        struct mm_struct *mm;
        struct work_struct work;
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
                                         io_data->req->actual;
 
        if (io_data->read && ret > 0) {
-               int i;
-               size_t pos = 0;
-
-               /*
-                * Since req->length may be bigger than io_data->len (after
-                * being rounded up to maxpacketsize), we may end up with more
-                * data then user space has space for.
-                */
-               ret = min_t(int, ret, io_data->len);
-
                use_mm(io_data->mm);
-               for (i = 0; i < io_data->nr_segs; i++) {
-                       size_t len = min_t(size_t, ret - pos,
-                                       io_data->iovec[i].iov_len);
-                       if (!len)
-                               break;
-                       if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
-                                                &io_data->buf[pos], len))) {
-                               ret = -EFAULT;
-                               break;
-                       }
-                       pos += len;
-               }
+               ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+               if (iov_iter_count(&io_data->data))
+                       ret = -EFAULT;
                unuse_mm(io_data->mm);
        }
 
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
 
        io_data->kiocb->private = NULL;
        if (io_data->read)
-               kfree(io_data->iovec);
+               kfree(io_data->to_free);
        kfree(io_data->buf);
        kfree(io_data);
 }
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                 * before the waiting completes, so do not assign to 'gadget' earlier
                 */
                struct usb_gadget *gadget = epfile->ffs->gadget;
+               size_t copied;
 
                spin_lock_irq(&epfile->ffs->eps_lock);
                /* In the meantime, endpoint got disabled or changed. */
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                        spin_unlock_irq(&epfile->ffs->eps_lock);
                        return -ESHUTDOWN;
                }
+               data_len = iov_iter_count(&io_data->data);
                /*
                 * Controller may require buffer size to be aligned to
                 * maxpacketsize of an out endpoint.
                 */
-               data_len = io_data->read ?
-                          usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
-                          io_data->len;
+               if (io_data->read)
+                       data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
                spin_unlock_irq(&epfile->ffs->eps_lock);
 
                data = kmalloc(data_len, GFP_KERNEL);
                if (unlikely(!data))
                        return -ENOMEM;
-               if (io_data->aio && !io_data->read) {
-                       int i;
-                       size_t pos = 0;
-                       for (i = 0; i < io_data->nr_segs; i++) {
-                               if (unlikely(copy_from_user(&data[pos],
-                                            io_data->iovec[i].iov_base,
-                                            io_data->iovec[i].iov_len))) {
-                                       ret = -EFAULT;
-                                       goto error;
-                               }
-                               pos += io_data->iovec[i].iov_len;
-                       }
-               } else {
-                       if (!io_data->read &&
-                           unlikely(__copy_from_user(data, io_data->buf,
-                                                     io_data->len))) {
+               if (!io_data->read) {
+                       copied = copy_from_iter(data, data_len, &io_data->data);
+                       if (copied != data_len) {
                                ret = -EFAULT;
                                goto error;
                        }
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                                 */
                                ret = ep->status;
                                if (io_data->read && ret > 0) {
-                                       ret = min_t(size_t, ret, io_data->len);
-
-                                       if (unlikely(copy_to_user(io_data->buf,
-                                               data, ret)))
+                                       ret = copy_to_iter(data, ret, &io_data->data);
+                                       if (unlikely(iov_iter_count(&io_data->data)))
                                                ret = -EFAULT;
                                }
                        }
@@ -898,37 +864,6 @@ error:
        return ret;
 }
 
-static ssize_t
-ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
-                loff_t *ptr)
-{
-       struct ffs_io_data io_data;
-
-       ENTER();
-
-       io_data.aio = false;
-       io_data.read = false;
-       io_data.buf = (char * __user)buf;
-       io_data.len = len;
-
-       return ffs_epfile_io(file, &io_data);
-}
-
-static ssize_t
-ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
-{
-       struct ffs_io_data io_data;
-
-       ENTER();
-
-       io_data.aio = false;
-       io_data.read = true;
-       io_data.buf = buf;
-       io_data.len = len;
-
-       return ffs_epfile_io(file, &io_data);
-}
-
 static int
 ffs_epfile_open(struct inode *inode, struct file *file)
 {
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
        return value;
 }
 
-static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
-                                   const struct iovec *iovec,
-                                   unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 {
-       struct ffs_io_data *io_data;
+       struct ffs_io_data io_data, *p = &io_data;
+       ssize_t res;
 
        ENTER();
 
-       io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
-       if (unlikely(!io_data))
-               return -ENOMEM;
+       if (!is_sync_kiocb(kiocb)) {
+               p = kmalloc(sizeof(io_data), GFP_KERNEL);
+               if (unlikely(!p))
+                       return -ENOMEM;
+               p->aio = true;
+       } else {
+               p->aio = false;
+       }
 
-       io_data->aio = true;
-       io_data->read = false;
-       io_data->kiocb = kiocb;
-       io_data->iovec = iovec;
-       io_data->nr_segs = nr_segs;
-       io_data->len = kiocb->ki_nbytes;
-       io_data->mm = current->mm;
+       p->read = false;
+       p->kiocb = kiocb;
+       p->data = *from;
+       p->mm = current->mm;
 
-       kiocb->private = io_data;
+       kiocb->private = p;
 
        kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
-       return ffs_epfile_io(kiocb->ki_filp, io_data);
+       res = ffs_epfile_io(kiocb->ki_filp, p);
+       if (res == -EIOCBQUEUED)
+               return res;
+       if (p->aio)
+               kfree(p);
+       else
+               *from = p->data;
+       return res;
 }
 
-static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
-                                  const struct iovec *iovec,
-                                  unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
-       struct ffs_io_data *io_data;
-       struct iovec *iovec_copy;
+       struct ffs_io_data io_data, *p = &io_data;
+       ssize_t res;
 
        ENTER();
 
-       iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
-       if (unlikely(!iovec_copy))
-               return -ENOMEM;
-
-       memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
-
-       io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
-       if (unlikely(!io_data)) {
-               kfree(iovec_copy);
-               return -ENOMEM;
+       if (!is_sync_kiocb(kiocb)) {
+               p = kmalloc(sizeof(io_data), GFP_KERNEL);
+               if (unlikely(!p))
+                       return -ENOMEM;
+               p->aio = true;
+       } else {
+               p->aio = false;
        }
 
-       io_data->aio = true;
-       io_data->read = true;
-       io_data->kiocb = kiocb;
-       io_data->iovec = iovec_copy;
-       io_data->nr_segs = nr_segs;
-       io_data->len = kiocb->ki_nbytes;
-       io_data->mm = current->mm;
+       p->read = true;
+       p->kiocb = kiocb;
+       if (p->aio) {
+               p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
+               if (!p->to_free) {
+                       kfree(p);
+                       return -ENOMEM;
+               }
+       } else {
+               p->data = *to;
+               p->to_free = NULL;
+       }
+       p->mm = current->mm;
 
-       kiocb->private = io_data;
+       kiocb->private = p;
 
        kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
-       return ffs_epfile_io(kiocb->ki_filp, io_data);
+       res = ffs_epfile_io(kiocb->ki_filp, p);
+       if (res == -EIOCBQUEUED)
+               return res;
+
+       if (p->aio) {
+               kfree(p->to_free);
+               kfree(p);
+       } else {
+               *to = p->data;
+       }
+       return res;
 }
 
 static int
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
        .llseek =       no_llseek,
 
        .open =         ffs_epfile_open,
-       .write =        ffs_epfile_write,
-       .read =         ffs_epfile_read,
-       .aio_write =    ffs_epfile_aio_write,
-       .aio_read =     ffs_epfile_aio_read,
+       .write =        new_sync_write,
+       .read =         new_sync_read,
+       .write_iter =   ffs_epfile_write_iter,
+       .read_iter =    ffs_epfile_read_iter,
        .release =      ffs_epfile_release,
        .unlocked_ioctl =       ffs_epfile_ioctl,
 };
index 426d69a9c018a0a651beac78ce63709c4b283613..a2612fb79eff261ee336b73940f88e7514f9c5e0 100644 (file)
@@ -569,7 +569,7 @@ fail:
        return status;
 }
 
-const struct file_operations f_hidg_fops = {
+static const struct file_operations f_hidg_fops = {
        .owner          = THIS_MODULE,
        .open           = f_hidg_open,
        .release        = f_hidg_release,
index c89e96cfa3e47b95d179d06910d528710e59d5de..c0c3ef272714b58ea4980ba25cf24076b502a105 100644 (file)
@@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
                        return -EINVAL;
 
                spin_lock(&port->lock);
-               __pn_reset(f);
+
+               if (fp->in_ep->driver_data)
+                       __pn_reset(f);
+
                if (alt == 1) {
                        int i;
 
index e07c50ced64ddc09256b271f72a5baacfc3b8f18..e3dae47baef3d3bfcf8d7dc5124cc22bbcc8a272 100644 (file)
@@ -344,7 +344,7 @@ static struct usb_endpoint_descriptor ss_int_source_desc = {
        .bInterval =            USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
 };
 
-struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
        .bLength =              USB_DT_SS_EP_COMP_SIZE,
        .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 
@@ -362,7 +362,7 @@ static struct usb_endpoint_descriptor ss_int_sink_desc = {
        .bInterval =            USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
 };
 
-struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
        .bLength =              USB_DT_SS_EP_COMP_SIZE,
        .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 
index 33e16658e5cfeb50d01c44c7a63a2379634e82b7..6d3eb8b00a488446db954334e80ac12eccf0d5cf 100644 (file)
@@ -54,7 +54,7 @@
 #define UNFLW_CTRL     8
 #define OVFLW_CTRL     10
 
-const char *uac2_name = "snd_uac2";
+static const char *uac2_name = "snd_uac2";
 
 struct uac2_req {
        struct uac2_rtd_params *pp; /* parent param */
@@ -634,7 +634,7 @@ static struct usb_interface_descriptor std_ac_if_desc = {
 };
 
 /* Clock source for IN traffic */
-struct uac_clock_source_descriptor in_clk_src_desc = {
+static struct uac_clock_source_descriptor in_clk_src_desc = {
        .bLength = sizeof in_clk_src_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -646,7 +646,7 @@ struct uac_clock_source_descriptor in_clk_src_desc = {
 };
 
 /* Clock source for OUT traffic */
-struct uac_clock_source_descriptor out_clk_src_desc = {
+static struct uac_clock_source_descriptor out_clk_src_desc = {
        .bLength = sizeof out_clk_src_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -658,7 +658,7 @@ struct uac_clock_source_descriptor out_clk_src_desc = {
 };
 
 /* Input Terminal for USB_OUT */
-struct uac2_input_terminal_descriptor usb_out_it_desc = {
+static struct uac2_input_terminal_descriptor usb_out_it_desc = {
        .bLength = sizeof usb_out_it_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -672,7 +672,7 @@ struct uac2_input_terminal_descriptor usb_out_it_desc = {
 };
 
 /* Input Terminal for I/O-In */
-struct uac2_input_terminal_descriptor io_in_it_desc = {
+static struct uac2_input_terminal_descriptor io_in_it_desc = {
        .bLength = sizeof io_in_it_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -686,7 +686,7 @@ struct uac2_input_terminal_descriptor io_in_it_desc = {
 };
 
 /* Ouput Terminal for USB_IN */
-struct uac2_output_terminal_descriptor usb_in_ot_desc = {
+static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
        .bLength = sizeof usb_in_ot_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -700,7 +700,7 @@ struct uac2_output_terminal_descriptor usb_in_ot_desc = {
 };
 
 /* Ouput Terminal for I/O-Out */
-struct uac2_output_terminal_descriptor io_out_ot_desc = {
+static struct uac2_output_terminal_descriptor io_out_ot_desc = {
        .bLength = sizeof io_out_ot_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -713,7 +713,7 @@ struct uac2_output_terminal_descriptor io_out_ot_desc = {
        .bmControls = (CONTROL_RDWR << COPY_CTRL),
 };
 
-struct uac2_ac_header_descriptor ac_hdr_desc = {
+static struct uac2_ac_header_descriptor ac_hdr_desc = {
        .bLength = sizeof ac_hdr_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -751,7 +751,7 @@ static struct usb_interface_descriptor std_as_out_if1_desc = {
 };
 
 /* Audio Stream OUT Intface Desc */
-struct uac2_as_header_descriptor as_out_hdr_desc = {
+static struct uac2_as_header_descriptor as_out_hdr_desc = {
        .bLength = sizeof as_out_hdr_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -764,7 +764,7 @@ struct uac2_as_header_descriptor as_out_hdr_desc = {
 };
 
 /* Audio USB_OUT Format */
-struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
+static struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
        .bLength = sizeof as_out_fmt1_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
        .bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -772,7 +772,7 @@ struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
 };
 
 /* STD AS ISO OUT Endpoint */
-struct usb_endpoint_descriptor fs_epout_desc = {
+static struct usb_endpoint_descriptor fs_epout_desc = {
        .bLength = USB_DT_ENDPOINT_SIZE,
        .bDescriptorType = USB_DT_ENDPOINT,
 
@@ -782,7 +782,7 @@ struct usb_endpoint_descriptor fs_epout_desc = {
        .bInterval = 1,
 };
 
-struct usb_endpoint_descriptor hs_epout_desc = {
+static struct usb_endpoint_descriptor hs_epout_desc = {
        .bLength = USB_DT_ENDPOINT_SIZE,
        .bDescriptorType = USB_DT_ENDPOINT,
 
@@ -828,7 +828,7 @@ static struct usb_interface_descriptor std_as_in_if1_desc = {
 };
 
 /* Audio Stream IN Intface Desc */
-struct uac2_as_header_descriptor as_in_hdr_desc = {
+static struct uac2_as_header_descriptor as_in_hdr_desc = {
        .bLength = sizeof as_in_hdr_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -841,7 +841,7 @@ struct uac2_as_header_descriptor as_in_hdr_desc = {
 };
 
 /* Audio USB_IN Format */
-struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
+static struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
        .bLength = sizeof as_in_fmt1_desc,
        .bDescriptorType = USB_DT_CS_INTERFACE,
        .bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -849,7 +849,7 @@ struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
 };
 
 /* STD AS ISO IN Endpoint */
-struct usb_endpoint_descriptor fs_epin_desc = {
+static struct usb_endpoint_descriptor fs_epin_desc = {
        .bLength = USB_DT_ENDPOINT_SIZE,
        .bDescriptorType = USB_DT_ENDPOINT,
 
@@ -859,7 +859,7 @@ struct usb_endpoint_descriptor fs_epin_desc = {
        .bInterval = 1,
 };
 
-struct usb_endpoint_descriptor hs_epin_desc = {
+static struct usb_endpoint_descriptor hs_epin_desc = {
        .bLength = USB_DT_ENDPOINT_SIZE,
        .bDescriptorType = USB_DT_ENDPOINT,
 
@@ -1563,7 +1563,7 @@ static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
                agdev->out_ep->driver_data = NULL;
 }
 
-struct usb_function *afunc_alloc(struct usb_function_instance *fi)
+static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
 {
        struct audio_dev *agdev;
        struct f_uac2_opts *opts;
index 5aad7fededa589bdb7b143a9f6dfbfb65c81cedd..8b818fd027b3380772030c2a650c67df830a4e56 100644 (file)
@@ -27,6 +27,7 @@
 #include "uvc.h"
 #include "uvc_queue.h"
 #include "uvc_video.h"
+#include "uvc_v4l2.h"
 
 /* --------------------------------------------------------------------------
  * Requests handling
index 9cb86bc1a9a5444b4ed7895489c2b0c5860bac37..50a5e637ca35ad804925112675e54b2ee115b80f 100644 (file)
@@ -21,6 +21,7 @@
 
 #include "uvc.h"
 #include "uvc_queue.h"
+#include "uvc_video.h"
 
 /* --------------------------------------------------------------------------
  * Video codecs
index 06acfa55864a639a6015689770b4ccb06f01ad40..b01b88e1b716a5902d5276196ef459663545a216 100644 (file)
@@ -133,7 +133,9 @@ struct gfs_configuration {
        struct usb_configuration c;
        int (*eth)(struct usb_configuration *c);
        int num;
-} gfs_configurations[] = {
+};
+
+static struct gfs_configuration gfs_configurations[] = {
 #ifdef CONFIG_USB_FUNCTIONFS_RNDIS
        {
                .eth            = bind_rndis_config,
@@ -278,7 +280,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev)
        if (!try_module_get(THIS_MODULE))
                return ERR_PTR(-ENOENT);
        
-       return 0;
+       return NULL;
 }
 
 static void functionfs_release_dev(struct ffs_dev *dev)
index db49ec4c748e9469bd694645c8cfc22df7c829fa..200f9a584064fd9199ba99ff75a2e26a33c788f7 100644 (file)
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
 MODULE_AUTHOR ("David Brownell");
 MODULE_LICENSE ("GPL");
 
+static int ep_open(struct inode *, struct file *);
+
 
 /*----------------------------------------------------------------------*/
 
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
  * still need dev->lock to use epdata->ep.
  */
 static int
-get_ready_ep (unsigned f_flags, struct ep_data *epdata)
+get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
 {
        int     val;
 
        if (f_flags & O_NONBLOCK) {
                if (!mutex_trylock(&epdata->lock))
                        goto nonblock;
-               if (epdata->state != STATE_EP_ENABLED) {
+               if (epdata->state != STATE_EP_ENABLED &&
+                   (!is_write || epdata->state != STATE_EP_READY)) {
                        mutex_unlock(&epdata->lock);
 nonblock:
                        val = -EAGAIN;
@@ -305,18 +308,20 @@ nonblock:
 
        switch (epdata->state) {
        case STATE_EP_ENABLED:
+               return 0;
+       case STATE_EP_READY:                    /* not configured yet */
+               if (is_write)
+                       return 0;
+               // FALLTHRU
+       case STATE_EP_UNBOUND:                  /* clean disconnect */
                break;
        // case STATE_EP_DISABLED:              /* "can't happen" */
-       // case STATE_EP_READY:                 /* "can't happen" */
        default:                                /* error! */
                pr_debug ("%s: ep %p not available, state %d\n",
                                shortname, epdata, epdata->state);
-               // FALLTHROUGH
-       case STATE_EP_UNBOUND:                  /* clean disconnect */
-               val = -ENODEV;
-               mutex_unlock(&epdata->lock);
        }
-       return val;
+       mutex_unlock(&epdata->lock);
+       return -ENODEV;
 }
 
 static ssize_t
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
        return value;
 }
 
-
-/* handle a synchronous OUT bulk/intr/iso transfer */
-static ssize_t
-ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
-{
-       struct ep_data          *data = fd->private_data;
-       void                    *kbuf;
-       ssize_t                 value;
-
-       if ((value = get_ready_ep (fd->f_flags, data)) < 0)
-               return value;
-
-       /* halt any endpoint by doing a "wrong direction" i/o call */
-       if (usb_endpoint_dir_in(&data->desc)) {
-               if (usb_endpoint_xfer_isoc(&data->desc)) {
-                       mutex_unlock(&data->lock);
-                       return -EINVAL;
-               }
-               DBG (data->dev, "%s halt\n", data->name);
-               spin_lock_irq (&data->dev->lock);
-               if (likely (data->ep != NULL))
-                       usb_ep_set_halt (data->ep);
-               spin_unlock_irq (&data->dev->lock);
-               mutex_unlock(&data->lock);
-               return -EBADMSG;
-       }
-
-       /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
-
-       value = -ENOMEM;
-       kbuf = kmalloc (len, GFP_KERNEL);
-       if (unlikely (!kbuf))
-               goto free1;
-
-       value = ep_io (data, kbuf, len);
-       VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
-               data->name, len, (int) value);
-       if (value >= 0 && copy_to_user (buf, kbuf, value))
-               value = -EFAULT;
-
-free1:
-       mutex_unlock(&data->lock);
-       kfree (kbuf);
-       return value;
-}
-
-/* handle a synchronous IN bulk/intr/iso transfer */
-static ssize_t
-ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
-{
-       struct ep_data          *data = fd->private_data;
-       void                    *kbuf;
-       ssize_t                 value;
-
-       if ((value = get_ready_ep (fd->f_flags, data)) < 0)
-               return value;
-
-       /* halt any endpoint by doing a "wrong direction" i/o call */
-       if (!usb_endpoint_dir_in(&data->desc)) {
-               if (usb_endpoint_xfer_isoc(&data->desc)) {
-                       mutex_unlock(&data->lock);
-                       return -EINVAL;
-               }
-               DBG (data->dev, "%s halt\n", data->name);
-               spin_lock_irq (&data->dev->lock);
-               if (likely (data->ep != NULL))
-                       usb_ep_set_halt (data->ep);
-               spin_unlock_irq (&data->dev->lock);
-               mutex_unlock(&data->lock);
-               return -EBADMSG;
-       }
-
-       /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
-
-       value = -ENOMEM;
-       kbuf = memdup_user(buf, len);
-       if (IS_ERR(kbuf)) {
-               value = PTR_ERR(kbuf);
-               kbuf = NULL;
-               goto free1;
-       }
-
-       value = ep_io (data, kbuf, len);
-       VDEBUG (data->dev, "%s write %zu IN, status %d\n",
-               data->name, len, (int) value);
-free1:
-       mutex_unlock(&data->lock);
-       kfree (kbuf);
-       return value;
-}
-
 static int
 ep_release (struct inode *inode, struct file *fd)
 {
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
        struct ep_data          *data = fd->private_data;
        int                     status;
 
-       if ((status = get_ready_ep (fd->f_flags, data)) < 0)
+       if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
                return status;
 
        spin_lock_irq (&data->dev->lock);
@@ -517,8 +431,8 @@ struct kiocb_priv {
        struct mm_struct        *mm;
        struct work_struct      work;
        void                    *buf;
-       const struct iovec      *iv;
-       unsigned long           nr_segs;
+       struct iov_iter         to;
+       const void              *to_free;
        unsigned                actual;
 };
 
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
        return value;
 }
 
-static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
-{
-       ssize_t                 len, total;
-       void                    *to_copy;
-       int                     i;
-
-       /* copy stuff into user buffers */
-       total = priv->actual;
-       len = 0;
-       to_copy = priv->buf;
-       for (i=0; i < priv->nr_segs; i++) {
-               ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
-
-               if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
-                       if (len == 0)
-                               len = -EFAULT;
-                       break;
-               }
-
-               total -= this;
-               len += this;
-               to_copy += this;
-               if (total == 0)
-                       break;
-       }
-
-       return len;
-}
-
 static void ep_user_copy_worker(struct work_struct *work)
 {
        struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
        size_t ret;
 
        use_mm(mm);
-       ret = ep_copy_to_user(priv);
+       ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
        unuse_mm(mm);
+       if (!ret)
+               ret = -EFAULT;
 
        /* completing the iocb can drop the ctx and mm, don't touch mm after */
        aio_complete(iocb, ret, ret);
 
        kfree(priv->buf);
+       kfree(priv->to_free);
        kfree(priv);
 }
 
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
         * don't need to copy anything to userspace, so we can
         * complete the aio request immediately.
         */
-       if (priv->iv == NULL || unlikely(req->actual == 0)) {
+       if (priv->to_free == NULL || unlikely(req->actual == 0)) {
                kfree(req->buf);
+               kfree(priv->to_free);
                kfree(priv);
                iocb->private = NULL;
                /* aio_complete() reports bytes-transferred _and_ faults */
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
 
                priv->buf = req->buf;
                priv->actual = req->actual;
+               INIT_WORK(&priv->work, ep_user_copy_worker);
                schedule_work(&priv->work);
        }
        spin_unlock(&epdata->dev->lock);
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
        put_ep(epdata);
 }
 
-static ssize_t
-ep_aio_rwtail(
-       struct kiocb    *iocb,
-       char            *buf,
-       size_t          len,
-       struct ep_data  *epdata,
-       const struct iovec *iv,
-       unsigned long   nr_segs
-)
+static ssize_t ep_aio(struct kiocb *iocb,
+                     struct kiocb_priv *priv,
+                     struct ep_data *epdata,
+                     char *buf,
+                     size_t len)
 {
-       struct kiocb_priv       *priv;
-       struct usb_request      *req;
-       ssize_t                 value;
+       struct usb_request *req;
+       ssize_t value;
 
-       priv = kmalloc(sizeof *priv, GFP_KERNEL);
-       if (!priv) {
-               value = -ENOMEM;
-fail:
-               kfree(buf);
-               return value;
-       }
        iocb->private = priv;
        priv->iocb = iocb;
-       priv->iv = iv;
-       priv->nr_segs = nr_segs;
-       INIT_WORK(&priv->work, ep_user_copy_worker);
-
-       value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
-       if (unlikely(value < 0)) {
-               kfree(priv);
-               goto fail;
-       }
 
        kiocb_set_cancel_fn(iocb, ep_aio_cancel);
        get_ep(epdata);
@@ -669,75 +538,154 @@ fail:
         * allocate or submit those if the host disconnected.
         */
        spin_lock_irq(&epdata->dev->lock);
-       if (likely(epdata->ep)) {
-               req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
-               if (likely(req)) {
-                       priv->req = req;
-                       req->buf = buf;
-                       req->length = len;
-                       req->complete = ep_aio_complete;
-                       req->context = iocb;
-                       value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
-                       if (unlikely(0 != value))
-                               usb_ep_free_request(epdata->ep, req);
-               } else
-                       value = -EAGAIN;
-       } else
-               value = -ENODEV;
-       spin_unlock_irq(&epdata->dev->lock);
+       value = -ENODEV;
+       if (unlikely(epdata->ep))
+               goto fail;
 
-       mutex_unlock(&epdata->lock);
+       req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
+       value = -ENOMEM;
+       if (unlikely(!req))
+               goto fail;
 
-       if (unlikely(value)) {
-               kfree(priv);
-               put_ep(epdata);
-       } else
-               value = -EIOCBQUEUED;
+       priv->req = req;
+       req->buf = buf;
+       req->length = len;
+       req->complete = ep_aio_complete;
+       req->context = iocb;
+       value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
+       if (unlikely(0 != value)) {
+               usb_ep_free_request(epdata->ep, req);
+               goto fail;
+       }
+       spin_unlock_irq(&epdata->dev->lock);
+       return -EIOCBQUEUED;
+
+fail:
+       spin_unlock_irq(&epdata->dev->lock);
+       kfree(priv->to_free);
+       kfree(priv);
+       put_ep(epdata);
        return value;
 }
 
 static ssize_t
-ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t o)
+ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       struct ep_data          *epdata = iocb->ki_filp->private_data;
-       char                    *buf;
+       struct file *file = iocb->ki_filp;
+       struct ep_data *epdata = file->private_data;
+       size_t len = iov_iter_count(to);
+       ssize_t value;
+       char *buf;
 
-       if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
-               return -EINVAL;
+       if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
+               return value;
 
-       buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
-       if (unlikely(!buf))
-               return -ENOMEM;
+       /* halt any endpoint by doing a "wrong direction" i/o call */
+       if (usb_endpoint_dir_in(&epdata->desc)) {
+               if (usb_endpoint_xfer_isoc(&epdata->desc) ||
+                   !is_sync_kiocb(iocb)) {
+                       mutex_unlock(&epdata->lock);
+                       return -EINVAL;
+               }
+               DBG (epdata->dev, "%s halt\n", epdata->name);
+               spin_lock_irq(&epdata->dev->lock);
+               if (likely(epdata->ep != NULL))
+                       usb_ep_set_halt(epdata->ep);
+               spin_unlock_irq(&epdata->dev->lock);
+               mutex_unlock(&epdata->lock);
+               return -EBADMSG;
+       }
 
-       return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
+       buf = kmalloc(len, GFP_KERNEL);
+       if (unlikely(!buf)) {
+               mutex_unlock(&epdata->lock);
+               return -ENOMEM;
+       }
+       if (is_sync_kiocb(iocb)) {
+               value = ep_io(epdata, buf, len);
+               if (value >= 0 && copy_to_iter(buf, value, to))
+                       value = -EFAULT;
+       } else {
+               struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
+               value = -ENOMEM;
+               if (!priv)
+                       goto fail;
+               priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
+               if (!priv->to_free) {
+                       kfree(priv);
+                       goto fail;
+               }
+               value = ep_aio(iocb, priv, epdata, buf, len);
+               if (value == -EIOCBQUEUED)
+                       buf = NULL;
+       }
+fail:
+       kfree(buf);
+       mutex_unlock(&epdata->lock);
+       return value;
 }
 
+static ssize_t ep_config(struct ep_data *, const char *, size_t);
+
 static ssize_t
-ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t o)
+ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
-       struct ep_data          *epdata = iocb->ki_filp->private_data;
-       char                    *buf;
-       size_t                  len = 0;
-       int                     i = 0;
+       struct file *file = iocb->ki_filp;
+       struct ep_data *epdata = file->private_data;
+       size_t len = iov_iter_count(from);
+       bool configured;
+       ssize_t value;
+       char *buf;
+
+       if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
+               return value;
 
-       if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
-               return -EINVAL;
+       configured = epdata->state == STATE_EP_ENABLED;
 
-       buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
-       if (unlikely(!buf))
+       /* halt any endpoint by doing a "wrong direction" i/o call */
+       if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
+               if (usb_endpoint_xfer_isoc(&epdata->desc) ||
+                   !is_sync_kiocb(iocb)) {
+                       mutex_unlock(&epdata->lock);
+                       return -EINVAL;
+               }
+               DBG (epdata->dev, "%s halt\n", epdata->name);
+               spin_lock_irq(&epdata->dev->lock);
+               if (likely(epdata->ep != NULL))
+                       usb_ep_set_halt(epdata->ep);
+               spin_unlock_irq(&epdata->dev->lock);
+               mutex_unlock(&epdata->lock);
+               return -EBADMSG;
+       }
+
+       buf = kmalloc(len, GFP_KERNEL);
+       if (unlikely(!buf)) {
+               mutex_unlock(&epdata->lock);
                return -ENOMEM;
+       }
 
-       for (i=0; i < nr_segs; i++) {
-               if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
-                               iov[i].iov_len) != 0)) {
-                       kfree(buf);
-                       return -EFAULT;
+       if (unlikely(copy_from_iter(buf, len, from) != len)) {
+               value = -EFAULT;
+               goto out;
+       }
+
+       if (unlikely(!configured)) {
+               value = ep_config(epdata, buf, len);
+       } else if (is_sync_kiocb(iocb)) {
+               value = ep_io(epdata, buf, len);
+       } else {
+               struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
+               value = -ENOMEM;
+               if (priv) {
+                       value = ep_aio(iocb, priv, epdata, buf, len);
+                       if (value == -EIOCBQUEUED)
+                               buf = NULL;
                }
-               len += iov[i].iov_len;
        }
-       return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
+out:
+       kfree(buf);
+       mutex_unlock(&epdata->lock);
+       return value;
 }
 
 /*----------------------------------------------------------------------*/
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
 /* used after endpoint configuration */
 static const struct file_operations ep_io_operations = {
        .owner =        THIS_MODULE,
-       .llseek =       no_llseek,
 
-       .read =         ep_read,
-       .write =        ep_write,
-       .unlocked_ioctl = ep_ioctl,
+       .open =         ep_open,
        .release =      ep_release,
-
-       .aio_read =     ep_aio_read,
-       .aio_write =    ep_aio_write,
+       .llseek =       no_llseek,
+       .read =         new_sync_read,
+       .write =        new_sync_write,
+       .unlocked_ioctl = ep_ioctl,
+       .read_iter =    ep_read_iter,
+       .write_iter =   ep_write_iter,
 };
 
 /* ENDPOINT INITIALIZATION
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
  * speed descriptor, then optional high speed descriptor.
  */
 static ssize_t
-ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ep_config (struct ep_data *data, const char *buf, size_t len)
 {
-       struct ep_data          *data = fd->private_data;
        struct usb_ep           *ep;
        u32                     tag;
        int                     value, length = len;
 
-       value = mutex_lock_interruptible(&data->lock);
-       if (value < 0)
-               return value;
-
        if (data->state != STATE_EP_READY) {
                value = -EL2HLT;
                goto fail;
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                goto fail0;
 
        /* we might need to change message format someday */
-       if (copy_from_user (&tag, buf, 4)) {
-               goto fail1;
-       }
+       memcpy(&tag, buf, 4);
        if (tag != 1) {
                DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
                goto fail0;
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
         */
 
        /* full/low speed descriptor, then high speed */
-       if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
-               goto fail1;
-       }
+       memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
        if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
                        || data->desc.bDescriptorType != USB_DT_ENDPOINT)
                goto fail0;
        if (len != USB_DT_ENDPOINT_SIZE) {
                if (len != 2 * USB_DT_ENDPOINT_SIZE)
                        goto fail0;
-               if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
-                                       USB_DT_ENDPOINT_SIZE)) {
-                       goto fail1;
-               }
+               memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
+                       USB_DT_ENDPOINT_SIZE);
                if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
                                || data->hs_desc.bDescriptorType
                                        != USB_DT_ENDPOINT) {
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        case USB_SPEED_LOW:
        case USB_SPEED_FULL:
                ep->desc = &data->desc;
-               value = usb_ep_enable(ep);
-               if (value == 0)
-                       data->state = STATE_EP_ENABLED;
                break;
        case USB_SPEED_HIGH:
                /* fails if caller didn't provide that descriptor... */
                ep->desc = &data->hs_desc;
-               value = usb_ep_enable(ep);
-               if (value == 0)
-                       data->state = STATE_EP_ENABLED;
                break;
        default:
                DBG(data->dev, "unconnected, %s init abandoned\n",
                                data->name);
                value = -EINVAL;
+               goto gone;
        }
+       value = usb_ep_enable(ep);
        if (value == 0) {
-               fd->f_op = &ep_io_operations;
+               data->state = STATE_EP_ENABLED;
                value = length;
        }
 gone:
@@ -867,14 +800,10 @@ fail:
                data->desc.bDescriptorType = 0;
                data->hs_desc.bDescriptorType = 0;
        }
-       mutex_unlock(&data->lock);
        return value;
 fail0:
        value = -EINVAL;
        goto fail;
-fail1:
-       value = -EFAULT;
-       goto fail;
 }
 
 static int
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
        return value;
 }
 
-/* used before endpoint configuration */
-static const struct file_operations ep_config_operations = {
-       .llseek =       no_llseek,
-
-       .open =         ep_open,
-       .write =        ep_config,
-       .release =      ep_release,
-};
-
 /*----------------------------------------------------------------------*/
 
 /* EP0 IMPLEMENTATION can be partly in userspace.
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
        enum ep0_state                  state;
 
        spin_lock_irq (&dev->lock);
+       if (dev->state <= STATE_DEV_OPENED) {
+               retval = -EINVAL;
+               goto done;
+       }
 
        /* report fd mode change before acting on it */
        if (dev->setup_abort) {
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        struct dev_data         *dev = fd->private_data;
        ssize_t                 retval = -ESRCH;
 
-       spin_lock_irq (&dev->lock);
-
        /* report fd mode change before acting on it */
        if (dev->setup_abort) {
                dev->setup_abort = 0;
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        } else
                DBG (dev, "fail %s, state %d\n", __func__, dev->state);
 
-       spin_unlock_irq (&dev->lock);
        return retval;
 }
 
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
        struct dev_data         *dev = fd->private_data;
        int                     mask = 0;
 
+       if (dev->state <= STATE_DEV_OPENED)
+               return DEFAULT_POLLMASK;
+
        poll_wait(fd, &dev->wait, wait);
 
        spin_lock_irq (&dev->lock);
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
        return ret;
 }
 
-/* used after device configuration */
-static const struct file_operations ep0_io_operations = {
-       .owner =        THIS_MODULE,
-       .llseek =       no_llseek,
-
-       .read =         ep0_read,
-       .write =        ep0_write,
-       .fasync =       ep0_fasync,
-       .poll =         ep0_poll,
-       .unlocked_ioctl =       dev_ioctl,
-       .release =      dev_release,
-};
-
 /*----------------------------------------------------------------------*/
 
 /* The in-kernel gadget driver handles most ep0 issues, in particular
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
                        goto enomem1;
 
                data->dentry = gadgetfs_create_file (dev->sb, data->name,
-                               data, &ep_config_operations);
+                               data, &ep_io_operations);
                if (!data->dentry)
                        goto enomem2;
                list_add_tail (&data->epfiles, &dev->epfiles);
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        u32                     tag;
        char                    *kbuf;
 
+       spin_lock_irq(&dev->lock);
+       if (dev->state > STATE_DEV_OPENED) {
+               value = ep0_write(fd, buf, len, ptr);
+               spin_unlock_irq(&dev->lock);
+               return value;
+       }
+       spin_unlock_irq(&dev->lock);
+
        if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
                return -EINVAL;
 
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                 * on, they can work ... except in cleanup paths that
                 * kick in after the ep0 descriptor is closed.
                 */
-               fd->f_op = &ep0_io_operations;
                value = len;
        }
        return value;
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
        return value;
 }
 
-static const struct file_operations dev_init_operations = {
+static const struct file_operations ep0_operations = {
        .llseek =       no_llseek,
 
        .open =         dev_open,
+       .read =         ep0_read,
        .write =        dev_config,
        .fasync =       ep0_fasync,
+       .poll =         ep0_poll,
        .unlocked_ioctl = dev_ioctl,
        .release =      dev_release,
 };
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
                goto Enomem;
 
        dev->sb = sb;
-       dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
+       dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
        if (!dev->dentry) {
                put_dev(dev);
                goto Enomem;
index 7f76c8a12f89db425e19c4f3a2a5200de542dbd7..fd53c9ebd662a5fb4593c99ce5dd7c2552ad83c5 100644 (file)
@@ -37,6 +37,9 @@
 
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI     0x8c31
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
+#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI            0x22b5
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+               (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+               xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_EJ168) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                "QUIRK: Resetting on resume");
 }
 
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct xhci_hcd *xhci)
+{
+       u32 val;
+       void __iomem *reg;
+
+       reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+       val = readl(reg);
+       writel(val | BIT(28), reg);
+       readl(reg);
+}
+
 /* called during probe() after chip reset completes */
 static int xhci_pci_setup(struct usb_hcd *hcd)
 {
@@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                pdev->no_d3cold = true;
 
+       if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+               xhci_pme_quirk(xhci);
+
        return xhci_suspend(xhci, do_wakeup);
 }
 
@@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL)
                usb_enable_intel_xhci_ports(pdev);
 
+       if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+               xhci_pme_quirk(xhci);
+
        retval = xhci_resume(xhci, hibernated);
        return retval;
 }
index 08d402b15482d936ca36d6d20cc381d8279a35a2..0e11d61408ff3f95a3110636e858119f4037cf0c 100644 (file)
@@ -83,16 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
        if (irq < 0)
                return -ENODEV;
 
-
-       if (of_device_is_compatible(pdev->dev.of_node,
-                                   "marvell,armada-375-xhci") ||
-           of_device_is_compatible(pdev->dev.of_node,
-                                   "marvell,armada-380-xhci")) {
-               ret = xhci_mvebu_mbus_init_quirk(pdev);
-               if (ret)
-                       return ret;
-       }
-
        /* Initialize dma_mask and coherent_dma_mask to 32-bits */
        ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (ret)
@@ -127,6 +117,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
                        goto put_hcd;
        }
 
+       if (of_device_is_compatible(pdev->dev.of_node,
+                                   "marvell,armada-375-xhci") ||
+           of_device_is_compatible(pdev->dev.of_node,
+                                   "marvell,armada-380-xhci")) {
+               ret = xhci_mvebu_mbus_init_quirk(pdev);
+               if (ret)
+                       goto disable_clk;
+       }
+
        ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (ret)
                goto disable_clk;
index 88da8d6298201fa5c7b7e4a93ff67fe303cb84b5..5fb66db89e05549da9cd4e94a15abbbcc3ba5a19 100644 (file)
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
        if (!command)
                return;
 
-       ep->ep_state |= EP_HALTED;
+       ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED;
        ep->stopped_stream = stream_id;
 
        xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
@@ -1946,7 +1946,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
        if (event_trb != ep_ring->dequeue) {
                /* The event was for the status stage */
                if (event_trb == td->last_trb) {
-                       if (td->urb->actual_length != 0) {
+                       if (td->urb_length_set) {
                                /* Don't overwrite a previously set error code
                                 */
                                if ((*status == -EINPROGRESS || *status == 0) &&
@@ -1960,7 +1960,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                        td->urb->transfer_buffer_length;
                        }
                } else {
-               /* Maybe the event was for the data stage? */
+                       /*
+                        * Maybe the event was for the data stage? If so, update
+                        * already the actual_length of the URB and flag it as
+                        * set, so that it is not overwritten in the event for
+                        * the last TRB.
+                        */
+                       td->urb_length_set = true;
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
index ec8ac16748547a2ac87bf9aa225ed0a36c0bf7df..b06d1a53652da3ff9446e99c870c4c56fe044e5e 100644 (file)
@@ -1338,6 +1338,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
                goto exit;
        }
 
+       /* Reject urb if endpoint is in soft reset, queue must stay empty */
+       if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) {
+               xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n");
+               ret = -EINVAL;
+       }
+
        if (usb_endpoint_xfer_isoc(&urb->ep->desc))
                size = urb->number_of_packets;
        else
@@ -2948,23 +2954,36 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
        }
 }
 
-/* Called when clearing halted device. The core should have sent the control
+/* Called after clearing a halted device. USB core should have sent the control
  * message to clear the device halt condition. The host side of the halt should
- * already be cleared with a reset endpoint command issued when the STALL tx
- * event was received.
- *
- * Context: in_interrupt
+ * already be cleared with a reset endpoint command issued immediately when the
+ * STALL tx event was received.
  */
 
 void xhci_endpoint_reset(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep)
 {
        struct xhci_hcd *xhci;
+       struct usb_device *udev;
+       struct xhci_virt_device *virt_dev;
+       struct xhci_virt_ep *virt_ep;
+       struct xhci_input_control_ctx *ctrl_ctx;
+       struct xhci_command *command;
+       unsigned int ep_index, ep_state;
+       unsigned long flags;
+       u32 ep_flag;
 
        xhci = hcd_to_xhci(hcd);
+       udev = (struct usb_device *) ep->hcpriv;
+       if (!ep->hcpriv)
+               return;
+       virt_dev = xhci->devs[udev->slot_id];
+       ep_index = xhci_get_endpoint_index(&ep->desc);
+       virt_ep = &virt_dev->eps[ep_index];
+       ep_state = virt_ep->ep_state;
 
        /*
-        * We might need to implement the config ep cmd in xhci 4.8.1 note:
+        * Implement the config ep command in xhci 4.6.8 additional note:
         * The Reset Endpoint Command may only be issued to endpoints in the
         * Halted state. If software wishes reset the Data Toggle or Sequence
         * Number of an endpoint that isn't in the Halted state, then software
@@ -2972,9 +2991,72 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
         * for the target endpoint. that is in the Stopped state.
         */
 
-       /* For now just print debug to follow the situation */
-       xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
-                ep->desc.bEndpointAddress);
+       if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) {
+               virt_ep->ep_state &= ~EP_RECENTLY_HALTED;
+               xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n");
+               return;
+       }
+
+       /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */
+       if (usb_endpoint_xfer_control(&ep->desc) ||
+           usb_endpoint_xfer_isoc(&ep->desc))
+               return;
+
+       ep_flag = xhci_get_endpoint_flag(&ep->desc);
+
+       if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
+               return;
+
+       command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT);
+       if (!command) {
+               xhci_err(xhci, "Could not allocate xHCI command structure.\n");
+               return;
+       }
+
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       /* block ringing ep doorbell */
+       virt_ep->ep_state |= EP_CONFIG_PENDING;
+
+       /*
+        * Make sure endpoint ring is empty before resetting the toggle/seq.
+        * Driver is required to synchronously cancel all transfer request.
+        *
+        * xhci 4.6.6 says we can issue a configure endpoint command on a
+        * running endpoint ring as long as it's idle (queue empty)
+        */
+
+       if (!list_empty(&virt_ep->ring->td_list)) {
+               dev_err(&udev->dev, "EP not empty, refuse reset\n");
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               goto cleanup;
+       }
+
+       xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n",
+                udev->slot_id, ep_index);
+
+       ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+       if (!ctrl_ctx) {
+               xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n",
+                        virt_dev, virt_dev->in_ctx);
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               goto cleanup;
+       }
+       xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
+                                          virt_dev->out_ctx, ctrl_ctx,
+                                          ep_flag, ep_flag);
+       xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index);
+
+       xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma,
+                                    udev->slot_id, false);
+       xhci_ring_cmd_db(xhci);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       wait_for_completion(command->completion);
+
+cleanup:
+       virt_ep->ep_state &= ~EP_CONFIG_PENDING;
+       xhci_free_command(xhci, command);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
index 974514762a1402aee4164cf5556c9321bea49d44..265ab1771d24c887d1ba950f2c77f9ee38144438 100644 (file)
@@ -1,3 +1,4 @@
+
 /*
  * xHCI host controller driver
  *
@@ -88,9 +89,10 @@ struct xhci_cap_regs {
 #define HCS_IST(p)             (((p) >> 0) & 0xf)
 /* bits 4:7, max number of Event Ring segments */
 #define HCS_ERST_MAX(p)                (((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
 /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p)   (((p) >> 27) & 0x1f)
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p)   ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
 
 /* HCSPARAMS3 - hcs_params3 - bitmasks */
 /* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -863,6 +865,8 @@ struct xhci_virt_ep {
 #define EP_HAS_STREAMS         (1 << 4)
 /* Transitioning the endpoint to not using streams, don't enqueue URBs */
 #define EP_GETTING_NO_STREAMS  (1 << 5)
+#define EP_RECENTLY_HALTED     (1 << 6)
+#define EP_CONFIG_PENDING      (1 << 7)
        /* ----  Related to URB cancellation ---- */
        struct list_head        cancelled_td_list;
        struct xhci_td          *stopped_td;
@@ -1288,6 +1292,8 @@ struct xhci_td {
        struct xhci_segment     *start_seg;
        union xhci_trb          *first_trb;
        union xhci_trb          *last_trb;
+       /* actual_length of the URB has already been set */
+       bool                    urb_length_set;
 };
 
 /* xHCI command default timeout value */
@@ -1560,6 +1566,7 @@ struct xhci_hcd {
 #define XHCI_SPURIOUS_WAKEUP   (1 << 18)
 /* For controllers with a broken beyond repair streams implementation */
 #define XHCI_BROKEN_STREAMS    (1 << 19)
+#define XHCI_PME_STUCK_QUIRK   (1 << 20)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index eba9b82e2d70c19c5b75dc1b6c3d6a0182ee4d5e..3cb98b1d5d2960171bea26d4fff70e2217d9e7f0 100644 (file)
@@ -1274,7 +1274,7 @@ static void errata2_function(unsigned long data)
        for (slot = 0; slot < 32; slot++)
                if (priv->atl_slots[slot].qh && time_after(jiffies,
                                        priv->atl_slots[slot].timestamp +
-                                       SLOT_TIMEOUT * HZ / 1000)) {
+                                       msecs_to_jiffies(SLOT_TIMEOUT))) {
                        ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
                        if (!FROM_DW0_VALID(ptd.dw0) &&
                                        !FROM_DW3_ACTIVE(ptd.dw3))
@@ -1286,7 +1286,7 @@ static void errata2_function(unsigned long data)
 
        spin_unlock_irqrestore(&priv->lock, spinflags);
 
-       errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+       errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
        add_timer(&errata2_timer);
 }
 
@@ -1336,7 +1336,7 @@ static int isp1760_run(struct usb_hcd *hcd)
                return retval;
 
        setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
-       errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+       errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
        add_timer(&errata2_timer);
 
        chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
index e6f4cbfeed9736006e852c306aed7f4839987a95..067920f2d570fb77be332cf16e885bf63e0a352c 100644 (file)
@@ -1969,10 +1969,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
                goto fail0;
        }
 
-       pm_runtime_use_autosuspend(musb->controller);
-       pm_runtime_set_autosuspend_delay(musb->controller, 200);
-       pm_runtime_enable(musb->controller);
-
        spin_lock_init(&musb->lock);
        musb->board_set_power = plat->set_power;
        musb->min_power = plat->min_power;
@@ -1991,6 +1987,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_readl = musb_default_readl;
        musb_writel = musb_default_writel;
 
+       /* We need musb_read/write functions initialized for PM */
+       pm_runtime_use_autosuspend(musb->controller);
+       pm_runtime_set_autosuspend_delay(musb->controller, 200);
+       pm_runtime_irq_safe(musb->controller);
+       pm_runtime_enable(musb->controller);
+
        /* The musb_platform_init() call:
         *   - adjusts musb->mregs
         *   - sets the musb->isr
index 53bd0e71d19f02e3582db6ac21d0e617765c0ad1..a900c9877195ad6ef2048ddb8c2813c8e196feed 100644 (file)
@@ -457,12 +457,27 @@ static int dsps_musb_init(struct musb *musb)
        if (IS_ERR(musb->xceiv))
                return PTR_ERR(musb->xceiv);
 
+       musb->phy = devm_phy_get(dev->parent, "usb2-phy");
+
        /* Returns zero if e.g. not clocked */
        rev = dsps_readl(reg_base, wrp->revision);
        if (!rev)
                return -ENODEV;
 
        usb_phy_init(musb->xceiv);
+       if (IS_ERR(musb->phy))  {
+               musb->phy = NULL;
+       } else {
+               ret = phy_init(musb->phy);
+               if (ret < 0)
+                       return ret;
+               ret = phy_power_on(musb->phy);
+               if (ret) {
+                       phy_exit(musb->phy);
+                       return ret;
+               }
+       }
+
        setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
 
        /* Reset the musb */
@@ -502,6 +517,8 @@ static int dsps_musb_exit(struct musb *musb)
 
        del_timer_sync(&glue->timer);
        usb_phy_shutdown(musb->xceiv);
+       phy_power_off(musb->phy);
+       phy_exit(musb->phy);
        debugfs_remove_recursive(glue->dbgfs_root);
 
        return 0;
@@ -610,7 +627,7 @@ static int dsps_musb_reset(struct musb *musb)
        struct device *dev = musb->controller;
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
        const struct dsps_musb_wrapper *wrp = glue->wrp;
-       int session_restart = 0;
+       int session_restart = 0, error;
 
        if (glue->sw_babble_enabled)
                session_restart = sw_babble_control(musb);
@@ -624,8 +641,14 @@ static int dsps_musb_reset(struct musb *musb)
                dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset));
                usleep_range(100, 200);
                usb_phy_shutdown(musb->xceiv);
+               error = phy_power_off(musb->phy);
+               if (error)
+                       dev_err(dev, "phy shutdown failed: %i\n", error);
                usleep_range(100, 200);
                usb_phy_init(musb->xceiv);
+               error = phy_power_on(musb->phy);
+               if (error)
+                       dev_err(dev, "phy powerup failed: %i\n", error);
                session_restart = 1;
        }
 
@@ -687,7 +710,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
        struct musb_hdrc_config *config;
        struct platform_device *musb;
        struct device_node *dn = parent->dev.of_node;
-       int ret;
+       int ret, val;
 
        memset(resources, 0, sizeof(resources));
        res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
@@ -739,7 +762,10 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
        pdata.mode = get_musb_port_mode(dev);
        /* DT keeps this entry in mA, musb expects it as per USB spec */
        pdata.power = get_int_prop(dn, "mentor,power") / 2;
-       config->multipoint = of_property_read_bool(dn, "mentor,multipoint");
+
+       ret = of_property_read_u32(dn, "mentor,multipoint", &val);
+       if (!ret && val)
+               config->multipoint = true;
 
        ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
        if (ret) {
index 883a9adfdfff5f0c1643036e0be7d7d22d1e73a8..c3d5fc9dfb5bd56f5adc0a3cee337ffd1c31541f 100644 (file)
@@ -2613,7 +2613,7 @@ static const struct hc_driver musb_hc_driver = {
        .description            = "musb-hcd",
        .product_desc           = "MUSB HDRC host driver",
        .hcd_priv_size          = sizeof(struct musb *),
-       .flags                  = HCD_USB2 | HCD_MEMORY,
+       .flags                  = HCD_USB2 | HCD_MEMORY | HCD_BH,
 
        /* not using irq handler or reset hooks from usbcore, since
         * those must be shared with peripheral code for OTG configs
index 763649eb4987d99cafdc8aa5d3eb77590171150d..cc752d8c7773176d1338d5927c7b7c3bf4fc5ccd 100644 (file)
@@ -516,7 +516,7 @@ static int omap2430_probe(struct platform_device *pdev)
        struct omap2430_glue            *glue;
        struct device_node              *np = pdev->dev.of_node;
        struct musb_hdrc_config         *config;
-       int                             ret = -ENOMEM;
+       int                             ret = -ENOMEM, val;
 
        glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
        if (!glue)
@@ -559,7 +559,10 @@ static int omap2430_probe(struct platform_device *pdev)
                of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
                of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
                of_property_read_u32(np, "power", (u32 *)&pdata->power);
-               config->multipoint = of_property_read_bool(np, "multipoint");
+
+               ret = of_property_read_u32(np, "multipoint", &val);
+               if (!ret && val)
+                       config->multipoint = true;
 
                pdata->board_data       = data;
                pdata->config           = config;
index de83b9d0cd5c39449eb71e237ecd2dfa272cdf8c..ebc99ee076ce337275bbea3869abaa830b007111 100644 (file)
@@ -6,6 +6,7 @@ config USB_RENESAS_USBHS
        tristate 'Renesas USBHS controller'
        depends on USB_GADGET
        depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+       depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in
        default n
        help
          Renesas USBHS is a discrete USB host and peripheral controller chip
index 9374bd2aba20759f4bb4b03d2a9d51058aead9fd..8936a83c96cd60c8a226e0c96a9908c06d356773 100644 (file)
@@ -38,56 +38,51 @@ static int usb_serial_device_match(struct device *dev,
        return 0;
 }
 
-static ssize_t port_number_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct usb_serial_port *port = to_usb_serial_port(dev);
-
-       return sprintf(buf, "%d\n", port->port_number);
-}
-static DEVICE_ATTR_RO(port_number);
-
 static int usb_serial_device_probe(struct device *dev)
 {
        struct usb_serial_driver *driver;
        struct usb_serial_port *port;
+       struct device *tty_dev;
        int retval = 0;
        int minor;
 
        port = to_usb_serial_port(dev);
-       if (!port) {
-               retval = -ENODEV;
-               goto exit;
-       }
+       if (!port)
+               return -ENODEV;
 
        /* make sure suspend/resume doesn't race against port_probe */
        retval = usb_autopm_get_interface(port->serial->interface);
        if (retval)
-               goto exit;
+               return retval;
 
        driver = port->serial->type;
        if (driver->port_probe) {
                retval = driver->port_probe(port);
                if (retval)
-                       goto exit_with_autopm;
+                       goto err_autopm_put;
        }
 
-       retval = device_create_file(dev, &dev_attr_port_number);
-       if (retval) {
-               if (driver->port_remove)
-                       retval = driver->port_remove(port);
-               goto exit_with_autopm;
+       minor = port->minor;
+       tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
+       if (IS_ERR(tty_dev)) {
+               retval = PTR_ERR(tty_dev);
+               goto err_port_remove;
        }
 
-       minor = port->minor;
-       tty_register_device(usb_serial_tty_driver, minor, dev);
+       usb_autopm_put_interface(port->serial->interface);
+
        dev_info(&port->serial->dev->dev,
                 "%s converter now attached to ttyUSB%d\n",
                 driver->description, minor);
 
-exit_with_autopm:
+       return 0;
+
+err_port_remove:
+       if (driver->port_remove)
+               driver->port_remove(port);
+err_autopm_put:
        usb_autopm_put_interface(port->serial->interface);
-exit:
+
        return retval;
 }
 
@@ -114,8 +109,6 @@ static int usb_serial_device_remove(struct device *dev)
        minor = port->minor;
        tty_unregister_device(usb_serial_tty_driver, minor);
 
-       device_remove_file(&port->dev, &dev_attr_port_number);
-
        driver = port->serial->type;
        if (driver->port_remove)
                retval = driver->port_remove(port);
index 2d72aa3564a31e9eeade77423ec16de3d743c706..ede4f5fcfadda11dcdb7ab954769f827bd3c312a 100644 (file)
@@ -84,6 +84,10 @@ struct ch341_private {
        u8 line_status; /* active status of modem control inputs */
 };
 
+static void ch341_set_termios(struct tty_struct *tty,
+                             struct usb_serial_port *port,
+                             struct ktermios *old_termios);
+
 static int ch341_control_out(struct usb_device *dev, u8 request,
                             u16 value, u16 index)
 {
@@ -309,19 +313,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
        struct ch341_private *priv = usb_get_serial_port_data(port);
        int r;
 
-       priv->baud_rate = DEFAULT_BAUD_RATE;
-
        r = ch341_configure(serial->dev, priv);
        if (r)
                goto out;
 
-       r = ch341_set_handshake(serial->dev, priv->line_control);
-       if (r)
-               goto out;
-
-       r = ch341_set_baudrate(serial->dev, priv);
-       if (r)
-               goto out;
+       if (tty)
+               ch341_set_termios(tty, port, NULL);
 
        dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
        r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
index 29fa1c3d0089bee738ed4f54a8b65d4f82dd0c03..3806e7014199d13c892f32e8a03c1b37e286bddb 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/tty.h>
 #include <linux/console.h>
@@ -144,6 +145,7 @@ static int usb_console_setup(struct console *co, char *options)
                        init_ldsem(&tty->ldisc_sem);
                        INIT_LIST_HEAD(&tty->tty_files);
                        kref_get(&tty->driver->kref);
+                       __module_get(tty->driver->owner);
                        tty->ops = &usb_console_fake_tty_ops;
                        if (tty_init_termios(tty)) {
                                retval = -ENOMEM;
index f40c856ff758d959318ae4d251eea6cd96f5819a..84ce2d74894c9c3b25c7bcf0185f23887492eb94 100644 (file)
@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
        { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
        { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
+       { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
+       { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
index 1ebb351b9e9a59c9dbd90ffda56cae1769de764e..3086dec0ef53bbd5d5d3d21087b91469983492fb 100644 (file)
@@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
+       { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
        /* GE Healthcare devices */
        { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
+       /* Active Research (Actisense) devices */
+       { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
+       { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
+       { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
        { }                                     /* Terminating entry */
 };
 
index e52409c9be999f817cbdb627f4be8ec6f127cbe6..56b1b55c4751696b2e89633ea90bfe1c2940c436 100644 (file)
@@ -38,6 +38,9 @@
 
 #define FTDI_LUMEL_PD12_PID    0x6002
 
+/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
+#define CYBER_CORTEX_AV_PID    0x8698
+
 /*
  * Marvell OpenRD Base, Client
  * http://www.open-rd.org
  */
 #define GE_HEALTHCARE_VID              0x1901
 #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015
+
+/*
+ * Active Research (Actisense) devices
+ */
+#define ACTISENSE_NDC_PID              0xD9A8 /* NDC USB Serial Adapter */
+#define ACTISENSE_USG_PID              0xD9A9 /* USG USB Serial Adapter */
+#define ACTISENSE_NGT_PID              0xD9AA /* NGT NMEA2000 Interface */
+#define ACTISENSE_NGW_PID              0xD9AB /* NGW NMEA2000 Gateway */
+#define ACTISENSE_D9AC_PID             0xD9AC /* Actisense Reserved */
+#define ACTISENSE_D9AD_PID             0xD9AD /* Actisense Reserved */
+#define ACTISENSE_D9AE_PID             0xD9AE /* Actisense Reserved */
+#define ACTISENSE_D9AF_PID             0xD9AF /* Actisense Reserved */
+#define CHETCO_SEAGAUGE_PID            0xA548 /* SeaGauge USB Adapter */
+#define CHETCO_SEASWITCH_PID           0xA549 /* SeaSwitch USB Adapter */
+#define CHETCO_SEASMART_NMEA2000_PID   0xA54A /* SeaSmart NMEA2000 Gateway */
+#define CHETCO_SEASMART_ETHERNET_PID   0xA54B /* SeaSmart Ethernet Gateway */
+#define CHETCO_SEASMART_WIFI_PID       0xA5AC /* SeaSmart Wifi Gateway */
+#define CHETCO_SEASMART_DISPLAY_PID    0xA5AD /* SeaSmart NMEA2000 Display */
+#define CHETCO_SEASMART_LITE_PID       0xA5AE /* SeaSmart Lite USB Adapter */
+#define CHETCO_SEASMART_ANALOG_PID     0xA5AF /* SeaSmart Analog Adapter */
index ccf1df7c4b80f3f7a596fa3d1d8904eb64a78db6..54e170dd3dad0cec058e29674b98f685f90274df 100644 (file)
@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
         * character or at least one jiffy.
         */
        period = max_t(unsigned long, (10 * HZ / bps), 1);
-       period = min_t(unsigned long, period, timeout);
+       if (timeout)
+               period = min_t(unsigned long, period, timeout);
 
        dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
                                        __func__, jiffies_to_msecs(timeout),
@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
                schedule_timeout_interruptible(period);
                if (signal_pending(current))
                        break;
-               if (time_after(jiffies, expire))
+               if (timeout && time_after(jiffies, expire))
                        break;
        }
 }
index ab1d690274ae52b3230f04acbd8194a49f68b7ba..460a40669967855cfeea7c6cc911064822c6882a 100644 (file)
@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
        }
 
        /* Initial port termios */
-       mxuport_set_termios(tty, port, NULL);
+       if (tty)
+               mxuport_set_termios(tty, port, NULL);
 
        /*
         * TODO: use RQ_VENDOR_GET_MSR, once we know what it
index 0f872e6b2c878f9b8de56fc6ae0cddda56468589..829604d11f3fa72a6b5bec5580f150c27c513cbf 100644 (file)
@@ -132,6 +132,7 @@ MODULE_DEVICE_TABLE(usb, id_table);
 #define UART_OVERRUN_ERROR             0x40
 #define UART_CTS                       0x80
 
+static void pl2303_set_break(struct usb_serial_port *port, bool enable);
 
 enum pl2303_type {
        TYPE_01,        /* Type 0 and 1 (difference unknown) */
@@ -615,6 +616,7 @@ static void pl2303_close(struct usb_serial_port *port)
 {
        usb_serial_generic_close(port);
        usb_kill_urb(port->interrupt_in_urb);
+       pl2303_set_break(port, false);
 }
 
 static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -741,17 +743,16 @@ static int pl2303_ioctl(struct tty_struct *tty,
        return -ENOIOCTLCMD;
 }
 
-static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
+static void pl2303_set_break(struct usb_serial_port *port, bool enable)
 {
-       struct usb_serial_port *port = tty->driver_data;
        struct usb_serial *serial = port->serial;
        u16 state;
        int result;
 
-       if (break_state == 0)
-               state = BREAK_OFF;
-       else
+       if (enable)
                state = BREAK_ON;
+       else
+               state = BREAK_OFF;
 
        dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
                        state == BREAK_OFF ? "off" : "on");
@@ -763,6 +764,13 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
                dev_err(&port->dev, "error sending break = %d\n", result);
 }
 
+static void pl2303_break_ctl(struct tty_struct *tty, int state)
+{
+       struct usb_serial_port *port = tty->driver_data;
+
+       pl2303_set_break(port, state);
+}
+
 static void pl2303_update_line_status(struct usb_serial_port *port,
                                      unsigned char *data,
                                      unsigned int actual_length)
index 475723c006f955923d255be9abc408c7fd29027d..529066bbc7e81be1bb67e398f58425febef6a8eb 100644 (file)
@@ -687,6 +687,21 @@ static void serial_port_dtr_rts(struct tty_port *port, int on)
                drv->dtr_rts(p, on);
 }
 
+static ssize_t port_number_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct usb_serial_port *port = to_usb_serial_port(dev);
+
+       return sprintf(buf, "%u\n", port->port_number);
+}
+static DEVICE_ATTR_RO(port_number);
+
+static struct attribute *usb_serial_port_attrs[] = {
+       &dev_attr_port_number.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(usb_serial_port);
+
 static const struct tty_port_operations serial_port_ops = {
        .carrier_raised         = serial_port_carrier_raised,
        .dtr_rts                = serial_port_dtr_rts,
@@ -902,6 +917,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                port->dev.driver = NULL;
                port->dev.bus = &usb_serial_bus_type;
                port->dev.release = &usb_serial_port_release;
+               port->dev.groups = usb_serial_port_groups;
                device_initialize(&port->dev);
        }
 
@@ -940,8 +956,9 @@ static int usb_serial_probe(struct usb_interface *interface,
                port = serial->port[i];
                if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
                        goto probe_error;
-               buffer_size = max_t(int, serial->type->bulk_out_size,
-                                               usb_endpoint_maxp(endpoint));
+               buffer_size = serial->type->bulk_out_size;
+               if (!buffer_size)
+                       buffer_size = usb_endpoint_maxp(endpoint);
                port->bulk_out_size = buffer_size;
                port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
 
index dbc00e56c7f5c106a67028e1b6da7dfee2c39e5e..82570425fdfe388a5fd05ee6b13439deb676c4fc 100644 (file)
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
+UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
+               "JMicron",
+               "JMS539",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                "JMicron",
index d468d02179f4707d308192ed3578665a41ee96f9..5600c33fcadb219e52e8f985bf692c6ff3a1025e 100644 (file)
@@ -889,6 +889,12 @@ static void usb_stor_scan_dwork(struct work_struct *work)
            !(us->fflags & US_FL_SCM_MULT_TARG)) {
                mutex_lock(&us->dev_mutex);
                us->max_lun = usb_stor_Bulk_max_lun(us);
+               /*
+                * Allow proper scanning of devices that present more than 8 LUNs
+                * While not affecting other devices that may need the previous behavior
+                */
+               if (us->max_lun >= 8)
+                       us_to_host(us)->max_lun = us->max_lun+1;
                mutex_unlock(&us->dev_mutex);
        }
        scsi_scan_host(us_to_host(us));
index 7cc0122a18cecbb7ef45cf8e438112ec2fb4ff00..f8a186381ae8726887657c6c868a8b2ceeef2e54 100644 (file)
@@ -239,9 +239,12 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
 
                        return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
                }
-       } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX)
+       } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
                if (pci_is_pcie(vdev->pdev))
                        return 1;
+       } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
+               return 1;
+       }
 
        return 0;
 }
@@ -464,6 +467,7 @@ static long vfio_pci_ioctl(void *device_data,
 
                switch (info.index) {
                case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+               case VFIO_PCI_REQ_IRQ_INDEX:
                        break;
                case VFIO_PCI_ERR_IRQ_INDEX:
                        if (pci_is_pcie(vdev->pdev))
@@ -828,6 +832,20 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
                               req_len, vma->vm_page_prot);
 }
 
+static void vfio_pci_request(void *device_data, unsigned int count)
+{
+       struct vfio_pci_device *vdev = device_data;
+
+       mutex_lock(&vdev->igate);
+
+       if (vdev->req_trigger) {
+               dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+               eventfd_signal(vdev->req_trigger, 1);
+       }
+
+       mutex_unlock(&vdev->igate);
+}
+
 static const struct vfio_device_ops vfio_pci_ops = {
        .name           = "vfio-pci",
        .open           = vfio_pci_open,
@@ -836,6 +854,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
        .read           = vfio_pci_read,
        .write          = vfio_pci_write,
        .mmap           = vfio_pci_mmap,
+       .request        = vfio_pci_request,
 };
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index e8d695b3f54e0fdbf74e567dc5ae109cb8ea5da2..2027a27546ef4f7a816c08f7598e9c796e6c5ad7 100644 (file)
@@ -763,46 +763,70 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
        return 0;
 }
 
-static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
-                                   unsigned index, unsigned start,
-                                   unsigned count, uint32_t flags, void *data)
+static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
+                                          uint32_t flags, void *data)
 {
        int32_t fd = *(int32_t *)data;
 
-       if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
-           !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
+       if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
                return -EINVAL;
 
        /* DATA_NONE/DATA_BOOL enables loopback testing */
        if (flags & VFIO_IRQ_SET_DATA_NONE) {
-               if (vdev->err_trigger)
-                       eventfd_signal(vdev->err_trigger, 1);
+               if (*ctx)
+                       eventfd_signal(*ctx, 1);
                return 0;
        } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
                uint8_t trigger = *(uint8_t *)data;
-               if (trigger && vdev->err_trigger)
-                       eventfd_signal(vdev->err_trigger, 1);
+               if (trigger && *ctx)
+                       eventfd_signal(*ctx, 1);
                return 0;
        }
 
        /* Handle SET_DATA_EVENTFD */
        if (fd == -1) {
-               if (vdev->err_trigger)
-                       eventfd_ctx_put(vdev->err_trigger);
-               vdev->err_trigger = NULL;
+               if (*ctx)
+                       eventfd_ctx_put(*ctx);
+               *ctx = NULL;
                return 0;
        } else if (fd >= 0) {
                struct eventfd_ctx *efdctx;
                efdctx = eventfd_ctx_fdget(fd);
                if (IS_ERR(efdctx))
                        return PTR_ERR(efdctx);
-               if (vdev->err_trigger)
-                       eventfd_ctx_put(vdev->err_trigger);
-               vdev->err_trigger = efdctx;
+               if (*ctx)
+                       eventfd_ctx_put(*ctx);
+               *ctx = efdctx;
                return 0;
        } else
                return -EINVAL;
 }
+
+static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+                                   unsigned index, unsigned start,
+                                   unsigned count, uint32_t flags, void *data)
+{
+       if (index != VFIO_PCI_ERR_IRQ_INDEX)
+               return -EINVAL;
+
+       /*
+        * We should sanitize start & count, but that wasn't caught
+        * originally, so this IRQ index must forever ignore them :-(
+        */
+
+       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+}
+
+static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
+                                   unsigned index, unsigned start,
+                                   unsigned count, uint32_t flags, void *data)
+{
+       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+               return -EINVAL;
+
+       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+}
+
 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
                            unsigned index, unsigned start, unsigned count,
                            void *data)
@@ -844,6 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
                                func = vfio_pci_set_err_trigger;
                        break;
                }
+               break;
+       case VFIO_PCI_REQ_IRQ_INDEX:
+               switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+               case VFIO_IRQ_SET_ACTION_TRIGGER:
+                       func = vfio_pci_set_req_trigger;
+                       break;
+               }
+               break;
        }
 
        if (!func)
index 671c17a6e6d029dfdffe5d7243150ed757e44cf4..c9f9b323f152733685f9dafc67793472921658ec 100644 (file)
@@ -58,6 +58,7 @@ struct vfio_pci_device {
        struct pci_saved_state  *pci_saved_state;
        int                     refcnt;
        struct eventfd_ctx      *err_trigger;
+       struct eventfd_ctx      *req_trigger;
 };
 
 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
index f018d8d0f975360a091339699d12348c5c93a12b..4cde8550144406c02715448b8fbc4a88b538189b 100644 (file)
@@ -63,6 +63,11 @@ struct vfio_container {
        void                            *iommu_data;
 };
 
+struct vfio_unbound_dev {
+       struct device                   *dev;
+       struct list_head                unbound_next;
+};
+
 struct vfio_group {
        struct kref                     kref;
        int                             minor;
@@ -75,6 +80,8 @@ struct vfio_group {
        struct notifier_block           nb;
        struct list_head                vfio_next;
        struct list_head                container_next;
+       struct list_head                unbound_list;
+       struct mutex                    unbound_lock;
        atomic_t                        opened;
 };
 
@@ -204,6 +211,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
        kref_init(&group->kref);
        INIT_LIST_HEAD(&group->device_list);
        mutex_init(&group->device_lock);
+       INIT_LIST_HEAD(&group->unbound_list);
+       mutex_init(&group->unbound_lock);
        atomic_set(&group->container_users, 0);
        atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
@@ -264,13 +273,22 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 static void vfio_group_release(struct kref *kref)
 {
        struct vfio_group *group = container_of(kref, struct vfio_group, kref);
+       struct vfio_unbound_dev *unbound, *tmp;
+       struct iommu_group *iommu_group = group->iommu_group;
 
        WARN_ON(!list_empty(&group->device_list));
 
+       list_for_each_entry_safe(unbound, tmp,
+                                &group->unbound_list, unbound_next) {
+               list_del(&unbound->unbound_next);
+               kfree(unbound);
+       }
+
        device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
        list_del(&group->vfio_next);
        vfio_free_group_minor(group->minor);
        vfio_group_unlock_and_free(group);
+       iommu_group_put(iommu_group);
 }
 
 static void vfio_group_put(struct vfio_group *group)
@@ -440,17 +458,36 @@ static bool vfio_whitelisted_driver(struct device_driver *drv)
 }
 
 /*
- * A vfio group is viable for use by userspace if all devices are either
- * driver-less or bound to a vfio or whitelisted driver.  We test the
- * latter by the existence of a struct vfio_device matching the dev.
+ * A vfio group is viable for use by userspace if all devices are in
+ * one of the following states:
+ *  - driver-less
+ *  - bound to a vfio driver
+ *  - bound to a whitelisted driver
+ *
+ * We use two methods to determine whether a device is bound to a vfio
+ * driver.  The first is to test whether the device exists in the vfio
+ * group.  The second is to test if the device exists on the group
+ * unbound_list, indicating it's in the middle of transitioning from
+ * a vfio driver to driver-less.
  */
 static int vfio_dev_viable(struct device *dev, void *data)
 {
        struct vfio_group *group = data;
        struct vfio_device *device;
        struct device_driver *drv = ACCESS_ONCE(dev->driver);
+       struct vfio_unbound_dev *unbound;
+       int ret = -EINVAL;
 
-       if (!drv || vfio_whitelisted_driver(drv))
+       mutex_lock(&group->unbound_lock);
+       list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
+               if (dev == unbound->dev) {
+                       ret = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&group->unbound_lock);
+
+       if (!ret || !drv || vfio_whitelisted_driver(drv))
                return 0;
 
        device = vfio_group_get_device(group, dev);
@@ -459,7 +496,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
                return 0;
        }
 
-       return -EINVAL;
+       return ret;
 }
 
 /**
@@ -501,6 +538,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
 {
        struct vfio_group *group = container_of(nb, struct vfio_group, nb);
        struct device *dev = data;
+       struct vfio_unbound_dev *unbound;
 
        /*
         * Need to go through a group_lock lookup to get a reference or we
@@ -550,6 +588,17 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
                 * stop the system to maintain isolation.  At a minimum, we'd
                 * want a toggle to disable driver auto probe for this device.
                 */
+
+               mutex_lock(&group->unbound_lock);
+               list_for_each_entry(unbound,
+                                   &group->unbound_list, unbound_next) {
+                       if (dev == unbound->dev) {
+                               list_del(&unbound->unbound_next);
+                               kfree(unbound);
+                               break;
+                       }
+               }
+               mutex_unlock(&group->unbound_lock);
                break;
        }
 
@@ -578,6 +627,12 @@ int vfio_add_group_dev(struct device *dev,
                        iommu_group_put(iommu_group);
                        return PTR_ERR(group);
                }
+       } else {
+               /*
+                * A found vfio_group already holds a reference to the
+                * iommu_group.  A created vfio_group keeps the reference.
+                */
+               iommu_group_put(iommu_group);
        }
 
        device = vfio_group_get_device(group, dev);
@@ -586,21 +641,19 @@ int vfio_add_group_dev(struct device *dev,
                     dev_name(dev), iommu_group_id(iommu_group));
                vfio_device_put(device);
                vfio_group_put(group);
-               iommu_group_put(iommu_group);
                return -EBUSY;
        }
 
        device = vfio_group_create_device(group, dev, ops, device_data);
        if (IS_ERR(device)) {
                vfio_group_put(group);
-               iommu_group_put(iommu_group);
                return PTR_ERR(device);
        }
 
        /*
-        * Added device holds reference to iommu_group and vfio_device
-        * (which in turn holds reference to vfio_group).  Drop extra
-        * group reference used while acquiring device.
+        * Drop all but the vfio_device reference.  The vfio_device holds
+        * a reference to the vfio_group, which holds a reference to the
+        * iommu_group.
         */
        vfio_group_put(group);
 
@@ -655,8 +708,9 @@ void *vfio_del_group_dev(struct device *dev)
 {
        struct vfio_device *device = dev_get_drvdata(dev);
        struct vfio_group *group = device->group;
-       struct iommu_group *iommu_group = group->iommu_group;
        void *device_data = device->device_data;
+       struct vfio_unbound_dev *unbound;
+       unsigned int i = 0;
 
        /*
         * The group exists so long as we have a device reference.  Get
@@ -664,14 +718,49 @@ void *vfio_del_group_dev(struct device *dev)
         */
        vfio_group_get(group);
 
+       /*
+        * When the device is removed from the group, the group suddenly
+        * becomes non-viable; the device has a driver (until the unbind
+        * completes), but it's not present in the group.  This is bad news
+        * for any external users that need to re-acquire a group reference
+        * in order to match and release their existing reference.  To
+        * solve this, we track such devices on the unbound_list to bridge
+        * the gap until they're fully unbound.
+        */
+       unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
+       if (unbound) {
+               unbound->dev = dev;
+               mutex_lock(&group->unbound_lock);
+               list_add(&unbound->unbound_next, &group->unbound_list);
+               mutex_unlock(&group->unbound_lock);
+       }
+       WARN_ON(!unbound);
+
        vfio_device_put(device);
 
-       /* TODO send a signal to encourage this to be released */
-       wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+       /*
+        * If the device is still present in the group after the above
+        * 'put', then it is in use and we need to request it from the
+        * bus driver.  The driver may in turn need to request the
+        * device from the user.  We send the request on an arbitrary
+        * interval with counter to allow the driver to take escalating
+        * measures to release the device if it has the ability to do so.
+        */
+       do {
+               device = vfio_group_get_device(group, dev);
+               if (!device)
+                       break;
 
-       vfio_group_put(group);
+               if (device->ops->request)
+                       device->ops->request(device_data, i++);
 
-       iommu_group_put(iommu_group);
+               vfio_device_put(device);
+
+       } while (wait_event_interruptible_timeout(vfio.release_q,
+                                                 !vfio_dev_present(group, dev),
+                                                 HZ * 10) <= 0);
+
+       vfio_group_put(group);
 
        return device_data;
 }
index 4a9d666f1e9186ed07071c0d909ba167e814998a..57d8c37a002b0e0b8d49891536b8ddfab64afc6b 100644 (file)
@@ -66,6 +66,7 @@ struct vfio_domain {
        struct list_head        next;
        struct list_head        group_list;
        int                     prot;           /* IOMMU_CACHE */
+       bool                    fgsp;           /* Fine-grained super pages */
 };
 
 struct vfio_dma {
@@ -264,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        bool lock_cap = capable(CAP_IPC_LOCK);
        long ret, i;
+       bool rsvd;
 
        if (!current->mm)
                return -ENODEV;
@@ -272,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        if (ret)
                return ret;
 
-       if (is_invalid_reserved_pfn(*pfn_base))
-               return 1;
+       rsvd = is_invalid_reserved_pfn(*pfn_base);
 
-       if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+       if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
                put_pfn(*pfn_base, prot);
                pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
                        limit << PAGE_SHIFT);
@@ -283,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        }
 
        if (unlikely(disable_hugepages)) {
-               vfio_lock_acct(1);
+               if (!rsvd)
+                       vfio_lock_acct(1);
                return 1;
        }
 
@@ -295,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
                if (ret)
                        break;
 
-               if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+               if (pfn != *pfn_base + i ||
+                   rsvd != is_invalid_reserved_pfn(pfn)) {
                        put_pfn(pfn, prot);
                        break;
                }
 
-               if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
+               if (!rsvd && !lock_cap &&
+                   current->mm->locked_vm + i + 1 > limit) {
                        put_pfn(pfn, prot);
                        pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
                                __func__, limit << PAGE_SHIFT);
@@ -308,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
                }
        }
 
-       vfio_lock_acct(i);
+       if (!rsvd)
+               vfio_lock_acct(i);
 
        return i;
 }
@@ -346,12 +351,14 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
        domain = d = list_first_entry(&iommu->domain_list,
                                      struct vfio_domain, next);
 
-       list_for_each_entry_continue(d, &iommu->domain_list, next)
+       list_for_each_entry_continue(d, &iommu->domain_list, next) {
                iommu_unmap(d->domain, dma->iova, dma->size);
+               cond_resched();
+       }
 
        while (iova < end) {
-               size_t unmapped;
-               phys_addr_t phys;
+               size_t unmapped, len;
+               phys_addr_t phys, next;
 
                phys = iommu_iova_to_phys(domain->domain, iova);
                if (WARN_ON(!phys)) {
@@ -359,7 +366,19 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                        continue;
                }
 
-               unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE);
+               /*
+                * To optimize for fewer iommu_unmap() calls, each of which
+                * may require hardware cache flushing, try to find the
+                * largest contiguous physical memory chunk to unmap.
+                */
+               for (len = PAGE_SIZE;
+                    !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
+                       next = iommu_iova_to_phys(domain->domain, iova + len);
+                       if (next != phys + len)
+                               break;
+               }
+
+               unmapped = iommu_unmap(domain->domain, iova, len);
                if (WARN_ON(!unmapped))
                        break;
 
@@ -367,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                                             unmapped >> PAGE_SHIFT,
                                             dma->prot, false);
                iova += unmapped;
+
+               cond_resched();
        }
 
        vfio_lock_acct(-unlocked);
@@ -511,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
                            map_try_harder(d, iova, pfn, npage, prot))
                                goto unwind;
                }
+
+               cond_resched();
        }
 
        return 0;
@@ -665,6 +688,39 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
        return 0;
 }
 
+/*
+ * We change our unmap behavior slightly depending on whether the IOMMU
+ * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
+ * for practically any contiguous power-of-two mapping we give it.  This means
+ * we don't need to look for contiguous chunks ourselves to make unmapping
+ * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
+ * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
+ * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
+ * hugetlbfs is in use.
+ */
+static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+{
+       struct page *pages;
+       int ret, order = get_order(PAGE_SIZE * 2);
+
+       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!pages)
+               return;
+
+       ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
+                       IOMMU_READ | IOMMU_WRITE | domain->prot);
+       if (!ret) {
+               size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+
+               if (unmapped == PAGE_SIZE)
+                       iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
+               else
+                       domain->fgsp = true;
+       }
+
+       __free_pages(pages, order);
+}
+
 static int vfio_iommu_type1_attach_group(void *iommu_data,
                                         struct iommu_group *iommu_group)
 {
@@ -758,6 +814,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
                }
        }
 
+       vfio_test_domain_fgsp(domain);
+
        /* replay mappings on new domains */
        ret = vfio_iommu_replay(iommu, domain);
        if (ret)
index afa06d28725dad3960aed1df9dd4f9e9ddc53493..2bbfc25e582cb8b334a1ef4083b22da56c48cc65 100644 (file)
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
                         * TODO: support TSO.
                         */
                        iov_iter_advance(&msg.msg_iter, vhost_hlen);
-               } else {
-                       /* It'll come from socket; we'll need to patch
-                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
-                        */
-                       iov_iter_advance(&fixup, sizeof(hdr));
                }
                err = sock->ops->recvmsg(NULL, sock, &msg,
                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
                        continue;
                }
                /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
-               if (unlikely(vhost_hlen) &&
-                   copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
-                       vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
-                              vq->iov->iov_base);
-                       break;
+               if (unlikely(vhost_hlen)) {
+                       if (copy_to_iter(&hdr, sizeof(hdr),
+                                        &fixup) != sizeof(hdr)) {
+                               vq_err(vq, "Unable to write vnet_hdr "
+                                      "at addr %p\n", vq->iov->iov_base);
+                               break;
+                       }
+               } else {
+                       /* Header came from socket; we'll need to patch
+                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+                        */
+                       iov_iter_advance(&fixup, sizeof(hdr));
                }
                /* TODO: Should check and handle checksum. */
 
                num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   copy_to_iter(&num_buffers, 2, &fixup) != 2) {
+                   copy_to_iter(&num_buffers, sizeof num_buffers,
+                                &fixup) != sizeof num_buffers) {
                        vq_err(vq, "Failed num_buffers write");
                        vhost_discard_vq_desc(vq, headcount);
                        break;
index dc78d87e0fc2c5e14832adf674899823ac223c5f..8d4f3f1ff799fb1d7b4bb5a2184144b515676a2c 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
-#include <scsi/scsi_tcq.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 
 #include "vhost.h"
 
-#define TCM_VHOST_VERSION  "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
-#define TCM_VHOST_DEFAULT_TAGS 256
-#define TCM_VHOST_PREALLOC_SGLS 2048
-#define TCM_VHOST_PREALLOC_UPAGES 2048
-#define TCM_VHOST_PREALLOC_PROT_SGLS 512
+#define VHOST_SCSI_VERSION  "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_DEFAULT_TAGS 256
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
 
 struct vhost_scsi_inflight {
        /* Wait for the flush operation to finish */
@@ -67,11 +66,13 @@ struct vhost_scsi_inflight {
        struct kref kref;
 };
 
-struct tcm_vhost_cmd {
+struct vhost_scsi_cmd {
        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
        int tvc_vq_desc;
        /* virtio-scsi initiator task attribute */
        int tvc_task_attr;
+       /* virtio-scsi response incoming iovecs */
+       int tvc_in_iovs;
        /* virtio-scsi initiator data direction */
        enum dma_data_direction tvc_data_direction;
        /* Expected data transfer length from virtio-scsi header */
@@ -81,26 +82,26 @@ struct tcm_vhost_cmd {
        /* The number of scatterlists associated with this cmd */
        u32 tvc_sgl_count;
        u32 tvc_prot_sgl_count;
-       /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+       /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
        u32 tvc_lun;
        /* Pointer to the SGL formatted memory from virtio-scsi */
        struct scatterlist *tvc_sgl;
        struct scatterlist *tvc_prot_sgl;
        struct page **tvc_upages;
-       /* Pointer to response */
-       struct virtio_scsi_cmd_resp __user *tvc_resp;
+       /* Pointer to response header iovec */
+       struct iovec *tvc_resp_iov;
        /* Pointer to vhost_scsi for our device */
        struct vhost_scsi *tvc_vhost;
        /* Pointer to vhost_virtqueue for the cmd */
        struct vhost_virtqueue *tvc_vq;
        /* Pointer to vhost nexus memory */
-       struct tcm_vhost_nexus *tvc_nexus;
+       struct vhost_scsi_nexus *tvc_nexus;
        /* The TCM I/O descriptor that is accessed via container_of() */
        struct se_cmd tvc_se_cmd;
-       /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+       /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
        struct work_struct work;
        /* Copy of the incoming SCSI command descriptor block (CDB) */
-       unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+       unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
        /* Sense buffer that will be mapped into outgoing status */
        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
        /* Completed commands list, serviced from vhost worker thread */
@@ -109,53 +110,53 @@ struct tcm_vhost_cmd {
        struct vhost_scsi_inflight *inflight;
 };
 
-struct tcm_vhost_nexus {
+struct vhost_scsi_nexus {
        /* Pointer to TCM session for I_T Nexus */
        struct se_session *tvn_se_sess;
 };
 
-struct tcm_vhost_nacl {
+struct vhost_scsi_nacl {
        /* Binary World Wide unique Port Name for Vhost Initiator port */
        u64 iport_wwpn;
        /* ASCII formatted WWPN for Sas Initiator port */
-       char iport_name[TCM_VHOST_NAMELEN];
-       /* Returned by tcm_vhost_make_nodeacl() */
+       char iport_name[VHOST_SCSI_NAMELEN];
+       /* Returned by vhost_scsi_make_nodeacl() */
        struct se_node_acl se_node_acl;
 };
 
-struct tcm_vhost_tpg {
+struct vhost_scsi_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
        int tv_tpg_port_count;
        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
        int tv_tpg_vhost_count;
-       /* list for tcm_vhost_list */
+       /* list for vhost_scsi_list */
        struct list_head tv_tpg_list;
        /* Used to protect access for tpg_nexus */
        struct mutex tv_tpg_mutex;
        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
-       struct tcm_vhost_nexus *tpg_nexus;
-       /* Pointer back to tcm_vhost_tport */
-       struct tcm_vhost_tport *tport;
-       /* Returned by tcm_vhost_make_tpg() */
+       struct vhost_scsi_nexus *tpg_nexus;
+       /* Pointer back to vhost_scsi_tport */
+       struct vhost_scsi_tport *tport;
+       /* Returned by vhost_scsi_make_tpg() */
        struct se_portal_group se_tpg;
        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
        struct vhost_scsi *vhost_scsi;
 };
 
-struct tcm_vhost_tport {
+struct vhost_scsi_tport {
        /* SCSI protocol the tport is providing */
        u8 tport_proto_id;
        /* Binary World Wide unique Port Name for Vhost Target port */
        u64 tport_wwpn;
        /* ASCII formatted WWPN for Vhost Target port */
-       char tport_name[TCM_VHOST_NAMELEN];
-       /* Returned by tcm_vhost_make_tport() */
+       char tport_name[VHOST_SCSI_NAMELEN];
+       /* Returned by vhost_scsi_make_tport() */
        struct se_wwn tport_wwn;
 };
 
-struct tcm_vhost_evt {
+struct vhost_scsi_evt {
        /* event to be sent to guest */
        struct virtio_scsi_event event;
        /* event list, serviced from vhost worker thread */
@@ -171,7 +172,9 @@ enum {
 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 enum {
        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
-                                              (1ULL << VIRTIO_SCSI_F_T10_PI)
+                                              (1ULL << VIRTIO_SCSI_F_T10_PI) |
+                                              (1ULL << VIRTIO_F_ANY_LAYOUT) |
+                                              (1ULL << VIRTIO_F_VERSION_1)
 };
 
 #define VHOST_SCSI_MAX_TARGET  256
@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue {
 
 struct vhost_scsi {
        /* Protected by vhost_scsi->dev.mutex */
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tpg **vs_tpg;
        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 
        struct vhost_dev dev;
@@ -212,21 +215,21 @@ struct vhost_scsi {
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
 
-static struct workqueue_struct *tcm_vhost_workqueue;
+static struct workqueue_struct *vhost_scsi_workqueue;
 
-/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
-static DEFINE_MUTEX(tcm_vhost_mutex);
-static LIST_HEAD(tcm_vhost_list);
+/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(vhost_scsi_mutex);
+static LIST_HEAD(vhost_scsi_list);
 
-static int iov_num_pages(struct iovec *iov)
+static int iov_num_pages(void __user *iov_base, size_t iov_len)
 {
-       return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
-              ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+       return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
+              ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-static void tcm_vhost_done_inflight(struct kref *kref)
+static void vhost_scsi_done_inflight(struct kref *kref)
 {
        struct vhost_scsi_inflight *inflight;
 
@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref)
        complete(&inflight->comp);
 }
 
-static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
+static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
                                    struct vhost_scsi_inflight *old_inflight[])
 {
        struct vhost_scsi_inflight *new_inflight;
@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
 }
 
 static struct vhost_scsi_inflight *
-tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
+vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 {
        struct vhost_scsi_inflight *inflight;
        struct vhost_scsi_virtqueue *svq;
@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
        return inflight;
 }
 
-static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
+static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 {
-       kref_put(&inflight->kref, tcm_vhost_done_inflight);
+       kref_put(&inflight->kref, vhost_scsi_done_inflight);
 }
 
-static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
-static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 {
        return 0;
 }
 
-static char *tcm_vhost_get_fabric_name(void)
+static char *vhost_scsi_get_fabric_name(void)
 {
        return "vhost";
 }
 
-static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
        return sas_get_fabric_proto_ident(se_tpg);
 }
 
-static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        return &tport->tport_name[0];
 }
 
-static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
        return tpg->tport_tpgt;
 }
 
-static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
 static u32
-tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
                              struct se_node_acl *se_nacl,
                              struct t10_pr_registration *pr_reg,
                              int *format_code,
                              unsigned char *buf)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
 }
 
 static u32
-tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
                                  struct se_node_acl *se_nacl,
                                  struct t10_pr_registration *pr_reg,
                                  int *format_code)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 }
 
 static char *
-tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
                                    const char *buf,
                                    u32 *out_tid_len,
                                    char **port_nexus_ptr)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 }
 
 static struct se_node_acl *
-tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
+vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_nacl *nacl;
+       struct vhost_scsi_nacl *nacl;
 
-       nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+       nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
        if (!nacl) {
-               pr_err("Unable to allocate struct tcm_vhost_nacl\n");
+               pr_err("Unable to allocate struct vhost_scsi_nacl\n");
                return NULL;
        }
 
@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
 }
 
 static void
-tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
+vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
                             struct se_node_acl *se_nacl)
 {
-       struct tcm_vhost_nacl *nacl = container_of(se_nacl,
-                       struct tcm_vhost_nacl, se_node_acl);
+       struct vhost_scsi_nacl *nacl = container_of(se_nacl,
+                       struct vhost_scsi_nacl, se_node_acl);
        kfree(nacl);
 }
 
-static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
-static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
-       struct se_session *se_sess = se_cmd->se_sess;
+       struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
+       struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
        int i;
 
        if (tv_cmd->tvc_sgl_count) {
@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
        }
 
-       tcm_vhost_put_inflight(tv_cmd->inflight);
+       vhost_scsi_put_inflight(tv_cmd->inflight);
        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+static int vhost_scsi_shutdown_session(struct se_session *se_sess)
 {
        return 0;
 }
 
-static void tcm_vhost_close_session(struct se_session *se_sess)
+static void vhost_scsi_close_session(struct se_session *se_sess)
 {
        return;
 }
 
-static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 {
        return 0;
 }
 
-static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 {
        /* Go ahead and process the write immediately */
        target_execute_cmd(se_cmd);
        return 0;
 }
 
-static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 {
        return;
 }
 
-static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 {
        struct vhost_scsi *vs = cmd->tvc_vhost;
 
@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 }
 
-static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
+       struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
        vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
-static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
+       struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
        vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
-static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 {
        return;
 }
 
-static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 {
        return;
 }
 
-static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
        vs->vs_events_nr--;
        kfree(evt);
 }
 
-static struct tcm_vhost_evt *
-tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+static struct vhost_scsi_evt *
+vhost_scsi_allocate_evt(struct vhost_scsi *vs,
                       u32 event, u32 reason)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
 
        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
                vs->vs_events_missed = true;
@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 
        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
        if (!evt) {
-               vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+               vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
                vs->vs_events_missed = true;
                return NULL;
        }
@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
        return evt;
 }
 
-static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 {
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 
@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 }
 
 static void
-tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct virtio_scsi_event *event = &evt->event;
@@ -646,24 +649,24 @@ again:
        if (!ret)
                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
        else
-               vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+               vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 }
 
-static void tcm_vhost_evt_work(struct vhost_work *work)
+static void vhost_scsi_evt_work(struct vhost_work *work)
 {
        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
                                        vs_event_work);
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
        struct llist_node *llnode;
 
        mutex_lock(&vq->mutex);
        llnode = llist_del_all(&vs->vs_event_list);
        while (llnode) {
-               evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+               evt = llist_entry(llnode, struct vhost_scsi_evt, list);
                llnode = llist_next(llnode);
-               tcm_vhost_do_evt_work(vs, evt);
-               tcm_vhost_free_evt(vs, evt);
+               vhost_scsi_do_evt_work(vs, evt);
+               vhost_scsi_free_evt(vs, evt);
        }
        mutex_unlock(&vq->mutex);
 }
@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                        vs_completion_work);
        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
        struct virtio_scsi_cmd_resp v_rsp;
-       struct tcm_vhost_cmd *cmd;
+       struct vhost_scsi_cmd *cmd;
        struct llist_node *llnode;
        struct se_cmd *se_cmd;
+       struct iov_iter iov_iter;
        int ret, vq;
 
        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
        llnode = llist_del_all(&vs->vs_completion_list);
        while (llnode) {
-               cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+               cmd = llist_entry(llnode, struct vhost_scsi_cmd,
                                     tvc_completion_list);
                llnode = llist_next(llnode);
                se_cmd = &cmd->tvc_se_cmd;
@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                                 se_cmd->scsi_sense_length);
                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       se_cmd->scsi_sense_length);
-               ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
-               if (likely(ret == 0)) {
+
+               iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+                             cmd->tvc_in_iovs, sizeof(v_rsp));
+               ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+               if (likely(ret == sizeof(v_rsp))) {
                        struct vhost_scsi_virtqueue *q;
                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 }
 
-static struct tcm_vhost_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
+static struct vhost_scsi_cmd *
+vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
                   u32 exp_data_len, int data_direction)
 {
-       struct tcm_vhost_cmd *cmd;
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_cmd *cmd;
+       struct vhost_scsi_nexus *tv_nexus;
        struct se_session *se_sess;
        struct scatterlist *sg, *prot_sg;
        struct page **pages;
@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
 
        tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
-               pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+               pr_err("Unable to locate active struct vhost_scsi_nexus\n");
                return ERR_PTR(-EIO);
        }
        se_sess = tv_nexus->tvn_se_sess;
 
        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0) {
-               pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
+               pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
+       cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
        sg = cmd->tvc_sgl;
        prot_sg = cmd->tvc_prot_sgl;
        pages = cmd->tvc_upages;
-       memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
+       memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 
        cmd->tvc_sgl = sg;
        cmd->tvc_prot_sgl = prot_sg;
@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
        cmd->tvc_exp_data_len = exp_data_len;
        cmd->tvc_data_direction = data_direction;
        cmd->tvc_nexus = tv_nexus;
-       cmd->inflight = tcm_vhost_get_inflight(vq);
+       cmd->inflight = vhost_scsi_get_inflight(vq);
 
-       memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
+       memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 
        return cmd;
 }
@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
  * Returns the number of scatterlist entries used or -errno on error.
  */
 static int
-vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
+                     void __user *ptr,
+                     size_t len,
                      struct scatterlist *sgl,
-                     unsigned int sgl_count,
-                     struct iovec *iov,
-                     struct page **pages,
                      bool write)
 {
-       unsigned int npages = 0, pages_nr, offset, nbytes;
+       unsigned int npages = 0, offset, nbytes;
+       unsigned int pages_nr = iov_num_pages(ptr, len);
        struct scatterlist *sg = sgl;
-       void __user *ptr = iov->iov_base;
-       size_t len = iov->iov_len;
+       struct page **pages = cmd->tvc_upages;
        int ret, i;
 
-       pages_nr = iov_num_pages(iov);
-       if (pages_nr > sgl_count) {
-               pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " sgl_count: %u\n", pages_nr, sgl_count);
-               return -ENOBUFS;
-       }
-       if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
+       if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
-                       pages_nr, TCM_VHOST_PREALLOC_UPAGES);
+                      " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
+                       pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
                return -ENOBUFS;
        }
 
@@ -829,84 +829,94 @@ out:
 }
 
 static int
-vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
-                         struct iovec *iov,
-                         int niov,
-                         bool write)
+vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 {
-       struct scatterlist *sg = cmd->tvc_sgl;
-       unsigned int sgl_count = 0;
-       int ret, i;
+       int sgl_count = 0;
 
-       for (i = 0; i < niov; i++)
-               sgl_count += iov_num_pages(&iov[i]);
+       if (!iter || !iter->iov) {
+               pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
+                      " present\n", __func__, bytes);
+               return -EINVAL;
+       }
 
-       if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
-               pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
-                       " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
-                       sgl_count, TCM_VHOST_PREALLOC_SGLS);
-               return -ENOBUFS;
+       sgl_count = iov_iter_npages(iter, 0xffff);
+       if (sgl_count > max_sgls) {
+               pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+                      " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+               return -EINVAL;
        }
+       return sgl_count;
+}
 
-       pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
-       sg_init_table(sg, sgl_count);
-       cmd->tvc_sgl_count = sgl_count;
+static int
+vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+                     struct iov_iter *iter,
+                     struct scatterlist *sg, int sg_count)
+{
+       size_t off = iter->iov_offset;
+       int i, ret;
 
-       pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
+       for (i = 0; i < iter->nr_segs; i++) {
+               void __user *base = iter->iov[i].iov_base + off;
+               size_t len = iter->iov[i].iov_len - off;
 
-       for (i = 0; i < niov; i++) {
-               ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
-                                           cmd->tvc_upages, write);
+               ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
                if (ret < 0) {
-                       for (i = 0; i < cmd->tvc_sgl_count; i++)
-                               put_page(sg_page(&cmd->tvc_sgl[i]));
-
-                       cmd->tvc_sgl_count = 0;
+                       for (i = 0; i < sg_count; i++) {
+                               struct page *page = sg_page(&sg[i]);
+                               if (page)
+                                       put_page(page);
+                       }
                        return ret;
                }
                sg += ret;
-               sgl_count -= ret;
+               off = 0;
        }
        return 0;
 }
 
 static int
-vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
-                          struct iovec *iov,
-                          int niov,
-                          bool write)
-{
-       struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
-       unsigned int prot_sgl_count = 0;
-       int ret, i;
-
-       for (i = 0; i < niov; i++)
-               prot_sgl_count += iov_num_pages(&iov[i]);
-
-       if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
-               pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
-                       " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
-                       prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
-               return -ENOBUFS;
-       }
-
-       pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
-                prot_sg, prot_sgl_count);
-       sg_init_table(prot_sg, prot_sgl_count);
-       cmd->tvc_prot_sgl_count = prot_sgl_count;
-
-       for (i = 0; i < niov; i++) {
-               ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
-                                           cmd->tvc_upages, write);
+vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
+                size_t prot_bytes, struct iov_iter *prot_iter,
+                size_t data_bytes, struct iov_iter *data_iter)
+{
+       int sgl_count, ret;
+       bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
+
+       if (prot_bytes) {
+               sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
+                                                VHOST_SCSI_PREALLOC_PROT_SGLS);
+               if (sgl_count < 0)
+                       return sgl_count;
+
+               sg_init_table(cmd->tvc_prot_sgl, sgl_count);
+               cmd->tvc_prot_sgl_count = sgl_count;
+               pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+                        cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
+
+               ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
+                                           cmd->tvc_prot_sgl,
+                                           cmd->tvc_prot_sgl_count);
                if (ret < 0) {
-                       for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
-                               put_page(sg_page(&cmd->tvc_prot_sgl[i]));
-
                        cmd->tvc_prot_sgl_count = 0;
                        return ret;
                }
-               prot_sg += ret;
-               prot_sgl_count -= ret;
+       }
+       sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
+                                        VHOST_SCSI_PREALLOC_SGLS);
+       if (sgl_count < 0)
+               return sgl_count;
+
+       sg_init_table(cmd->tvc_sgl, sgl_count);
+       cmd->tvc_sgl_count = sgl_count;
+       pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+                 cmd->tvc_sgl, cmd->tvc_sgl_count);
+
+       ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
+                                   cmd->tvc_sgl, cmd->tvc_sgl_count);
+       if (ret < 0) {
+               cmd->tvc_sgl_count = 0;
+               return ret;
        }
        return 0;
 }
@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
        return TCM_SIMPLE_TAG;
 }
 
-static void tcm_vhost_submission_work(struct work_struct *work)
+static void vhost_scsi_submission_work(struct work_struct *work)
 {
-       struct tcm_vhost_cmd *cmd =
-               container_of(work, struct tcm_vhost_cmd, work);
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_cmd *cmd =
+               container_of(work, struct vhost_scsi_cmd, work);
+       struct vhost_scsi_nexus *tv_nexus;
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
        int rc;
@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 static void
 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tpg **vs_tpg, *tpg;
        struct virtio_scsi_cmd_req v_req;
        struct virtio_scsi_cmd_req_pi v_req_pi;
-       struct tcm_vhost_tpg *tpg;
-       struct tcm_vhost_cmd *cmd;
+       struct vhost_scsi_cmd *cmd;
+       struct iov_iter out_iter, in_iter, prot_iter, data_iter;
        u64 tag;
-       u32 exp_data_len, data_first, data_num, data_direction, prot_first;
-       unsigned out, in, i;
-       int head, ret, data_niov, prot_niov, prot_bytes;
-       size_t req_size;
+       u32 exp_data_len, data_direction;
+       unsigned out, in;
+       int head, ret, prot_bytes;
+       size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+       size_t out_size, in_size;
        u16 lun;
        u8 *target, *lunp, task_attr;
-       bool hdr_pi;
+       bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
        void *req, *cdb;
 
        mutex_lock(&vq->mutex);
@@ -1014,10 +1025,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 
        for (;;) {
                head = vhost_get_vq_desc(vq, vq->iov,
-                                       ARRAY_SIZE(vq->iov), &out, &in,
-                                       NULL, NULL);
+                                        ARRAY_SIZE(vq->iov), &out, &in,
+                                        NULL, NULL);
                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
-                                       head, out, in);
+                        head, out, in);
                /* On error, stop handling until the next kick. */
                if (unlikely(head < 0))
                        break;
@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        }
                        break;
                }
-
-               /* FIXME: BIDI operation */
-               if (out == 1 && in == 1) {
-                       data_direction = DMA_NONE;
-                       data_first = 0;
-                       data_num = 0;
-               } else if (out == 1 && in > 1) {
-                       data_direction = DMA_FROM_DEVICE;
-                       data_first = out + 1;
-                       data_num = in - 1;
-               } else if (out > 1 && in == 1) {
-                       data_direction = DMA_TO_DEVICE;
-                       data_first = 1;
-                       data_num = out - 1;
-               } else {
-                       vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
-                                       out, in);
-                       break;
-               }
-
                /*
-                * Check for a sane resp buffer so we can report errors to
-                * the guest.
+                * Check for a sane response buffer so we can report early
+                * errors back to the guest.
                 */
-               if (unlikely(vq->iov[out].iov_len !=
-                                       sizeof(struct virtio_scsi_cmd_resp))) {
-                       vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
-                               " bytes\n", vq->iov[out].iov_len);
+               if (unlikely(vq->iov[out].iov_len < rsp_size)) {
+                       vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
+                               " size, got %zu bytes\n", vq->iov[out].iov_len);
                        break;
                }
-
-               if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
+               /*
+                * Setup pointers and values based upon different virtio-scsi
+                * request header if T10_PI is enabled in KVM guest.
+                */
+               if (t10_pi) {
                        req = &v_req_pi;
+                       req_size = sizeof(v_req_pi);
                        lunp = &v_req_pi.lun[0];
                        target = &v_req_pi.lun[1];
-                       req_size = sizeof(v_req_pi);
-                       hdr_pi = true;
                } else {
                        req = &v_req;
+                       req_size = sizeof(v_req);
                        lunp = &v_req.lun[0];
                        target = &v_req.lun[1];
-                       req_size = sizeof(v_req);
-                       hdr_pi = false;
                }
+               /*
+                * FIXME: Not correct for BIDI operation
+                */
+               out_size = iov_length(vq->iov, out);
+               in_size = iov_length(&vq->iov[out], in);
 
-               if (unlikely(vq->iov[0].iov_len < req_size)) {
-                       pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
-                              req_size, vq->iov[0].iov_len);
-                       break;
-               }
-               ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
-               if (unlikely(ret)) {
-                       vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
-                       break;
-               }
+               /*
+                * Copy over the virtio-scsi request header, which for a
+                * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+                * single iovec may contain both the header + outgoing
+                * WRITE payloads.
+                *
+                * copy_from_iter() will advance out_iter, so that it will
+                * point at the start of the outgoing WRITE payload, if
+                * DMA_TO_DEVICE is set.
+                */
+               iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 
+               ret = copy_from_iter(req, req_size, &out_iter);
+               if (unlikely(ret != req_size)) {
+                       vq_err(vq, "Faulted on copy_from_iter\n");
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
+               }
                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
                if (unlikely(*lunp != 1)) {
+                       vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
 
                tpg = ACCESS_ONCE(vs_tpg[*target]);
-
-               /* Target does not exist, fail the request */
                if (unlikely(!tpg)) {
+                       /* Target does not exist, fail the request */
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
-
-               data_niov = data_num;
-               prot_niov = prot_first = prot_bytes = 0;
                /*
-                * Determine if any protection information iovecs are preceeding
-                * the actual data payload, and adjust data_first + data_niov
-                * values accordingly for vhost_scsi_map_iov_to_sgl() below.
+                * Determine data_direction by calculating the total outgoing
+                * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
+                * response headers respectively.
                 *
-                * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
+                * For DMA_TO_DEVICE this is out_iter, which is already pointing
+                * to the right place.
+                *
+                * For DMA_FROM_DEVICE, the iovec will be just past the end
+                * of the virtio-scsi response header in either the same
+                * or immediately following iovec.
+                *
+                * Any associated T10_PI bytes for the outgoing / incoming
+                * payloads are included in calculation of exp_data_len here.
                 */
-               if (hdr_pi) {
+               prot_bytes = 0;
+
+               if (out_size > req_size) {
+                       data_direction = DMA_TO_DEVICE;
+                       exp_data_len = out_size - req_size;
+                       data_iter = out_iter;
+               } else if (in_size > rsp_size) {
+                       data_direction = DMA_FROM_DEVICE;
+                       exp_data_len = in_size - rsp_size;
+
+                       iov_iter_init(&in_iter, READ, &vq->iov[out], in,
+                                     rsp_size + exp_data_len);
+                       iov_iter_advance(&in_iter, rsp_size);
+                       data_iter = in_iter;
+               } else {
+                       data_direction = DMA_NONE;
+                       exp_data_len = 0;
+               }
+               /*
+                * If T10_PI header + payload is present, setup prot_iter values
+                * and recalculate data_iter for vhost_scsi_mapal() mapping to
+                * host scatterlists via get_user_pages_fast().
+                */
+               if (t10_pi) {
                        if (v_req_pi.pi_bytesout) {
                                if (data_direction != DMA_TO_DEVICE) {
-                                       vq_err(vq, "Received non zero do_pi_niov"
-                                               ", but wrong data_direction\n");
-                                       goto err_cmd;
+                                       vq_err(vq, "Received non zero pi_bytesout,"
+                                               " but wrong data_direction\n");
+                                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                                       continue;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
                        } else if (v_req_pi.pi_bytesin) {
                                if (data_direction != DMA_FROM_DEVICE) {
-                                       vq_err(vq, "Received non zero di_pi_niov"
-                                               ", but wrong data_direction\n");
-                                       goto err_cmd;
+                                       vq_err(vq, "Received non zero pi_bytesin,"
+                                               " but wrong data_direction\n");
+                                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                                       continue;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
                        }
+                       /*
+                        * Set prot_iter to data_iter, and advance past any
+                        * preceeding prot_bytes that may be present.
+                        *
+                        * Also fix up the exp_data_len to reflect only the
+                        * actual data payload length.
+                        */
                        if (prot_bytes) {
-                               int tmp = 0;
-
-                               for (i = 0; i < data_num; i++) {
-                                       tmp += vq->iov[data_first + i].iov_len;
-                                       prot_niov++;
-                                       if (tmp >= prot_bytes)
-                                               break;
-                               }
-                               prot_first = data_first;
-                               data_first += prot_niov;
-                               data_niov = data_num - prot_niov;
+                               exp_data_len -= prot_bytes;
+                               prot_iter = data_iter;
+                               iov_iter_advance(&data_iter, prot_bytes);
                        }
                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
                        task_attr = v_req_pi.task_attr;
@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        cdb = &v_req.cdb[0];
                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
                }
-               exp_data_len = 0;
-               for (i = 0; i < data_niov; i++)
-                       exp_data_len += vq->iov[data_first + i].iov_len;
                /*
-                * Check that the recieved CDB size does not exceeded our
-                * hardcoded max for vhost-scsi
+                * Check that the received CDB size does not exceeded our
+                * hardcoded max for vhost-scsi, then get a pre-allocated
+                * cmd descriptor for the new virtio-scsi tag.
                 *
                 * TODO what if cdb was too small for varlen cdb header?
                 */
-               if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
+               if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
-                               scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
-                       goto err_cmd;
+                               scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
                }
-
                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
                                         exp_data_len + prot_bytes,
                                         data_direction);
                if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
-                                       PTR_ERR(cmd));
-                       goto err_cmd;
+                              PTR_ERR(cmd));
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
                }
-
-               pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
-                       ": %d\n", cmd, exp_data_len, data_direction);
-
                cmd->tvc_vhost = vs;
                cmd->tvc_vq = vq;
-               cmd->tvc_resp = vq->iov[out].iov_base;
+               cmd->tvc_resp_iov = &vq->iov[out];
+               cmd->tvc_in_iovs = in;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
-                       cmd->tvc_cdb[0], cmd->tvc_lun);
+                        cmd->tvc_cdb[0], cmd->tvc_lun);
+               pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
+                        " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
 
-               if (prot_niov) {
-                       ret = vhost_scsi_map_iov_to_prot(cmd,
-                                       &vq->iov[prot_first], prot_niov,
-                                       data_direction == DMA_FROM_DEVICE);
-                       if (unlikely(ret)) {
-                               vq_err(vq, "Failed to map iov to"
-                                       " prot_sgl\n");
-                               goto err_free;
-                       }
-               }
                if (data_direction != DMA_NONE) {
-                       ret = vhost_scsi_map_iov_to_sgl(cmd,
-                                       &vq->iov[data_first], data_niov,
-                                       data_direction == DMA_FROM_DEVICE);
+                       ret = vhost_scsi_mapal(cmd,
+                                              prot_bytes, &prot_iter,
+                                              exp_data_len, &data_iter);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
-                               goto err_free;
+                               vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
+                               vhost_scsi_send_bad_target(vs, vq, head, out);
+                               continue;
                        }
                }
                /*
                 * Save the descriptor from vhost_get_vq_desc() to be used to
                 * complete the virtio-scsi request in TCM callback context via
-                * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+                * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
                 */
                cmd->tvc_vq_desc = head;
                /*
-                * Dispatch tv_cmd descriptor for cmwq execution in process
-                * context provided by tcm_vhost_workqueue.  This also ensures
-                * tv_cmd is executed on the same kworker CPU as this vhost
-                * thread to gain positive L2 cache locality effects..
+                * Dispatch cmd descriptor for cmwq execution in process
+                * context provided by vhost_scsi_workqueue.  This also ensures
+                * cmd is executed on the same kworker CPU as this vhost
+                * thread to gain positive L2 cache locality effects.
                 */
-               INIT_WORK(&cmd->work, tcm_vhost_submission_work);
-               queue_work(tcm_vhost_workqueue, &cmd->work);
+               INIT_WORK(&cmd->work, vhost_scsi_submission_work);
+               queue_work(vhost_scsi_workqueue, &cmd->work);
        }
-
-       mutex_unlock(&vq->mutex);
-       return;
-
-err_free:
-       vhost_scsi_free_cmd(cmd);
-err_cmd:
-       vhost_scsi_send_bad_target(vs, vq, head, out);
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 }
 
 static void
-tcm_vhost_send_evt(struct vhost_scsi *vs,
-                  struct tcm_vhost_tpg *tpg,
+vhost_scsi_send_evt(struct vhost_scsi *vs,
+                  struct vhost_scsi_tpg *tpg,
                   struct se_lun *lun,
                   u32 event,
                   u32 reason)
 {
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
 
-       evt = tcm_vhost_allocate_evt(vs, event, reason);
+       evt = vhost_scsi_allocate_evt(vs, event, reason);
        if (!evt)
                return;
 
@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
                 * lun[4-7] need to be zero according to virtio-scsi spec.
                 */
                evt->event.lun[0] = 0x01;
-               evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+               evt->event.lun[1] = tpg->tport_tpgt;
                if (lun->unpacked_lun >= 256)
                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
                goto out;
 
        if (vs->vs_events_missed)
-               tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+               vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
        int i;
 
        /* Init new inflight and remember the old inflight */
-       tcm_vhost_init_inflight(vs, old_inflight);
+       vhost_scsi_init_inflight(vs, old_inflight);
 
        /*
         * The inflight->kref was initialized to 1. We decrement it here to
@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
         * when all the reqs are finished.
         */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
-               kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
+               kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
 
        /* Flush both the vhost poll and vhost work */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
- * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ * vhost_scsi_tpg with an active struct vhost_scsi_nexus
  *
  *  The lock nesting rule is:
- *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int
 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                        struct vhost_scsi_target *t)
 {
        struct se_portal_group *se_tpg;
-       struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tpg;
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tport *tv_tport;
+       struct vhost_scsi_tpg *tpg;
+       struct vhost_scsi_tpg **vs_tpg;
        struct vhost_virtqueue *vq;
        int index, ret, i, len;
        bool match = false;
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
 
        /* Verify that ring has been setup correctly. */
@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
        if (vs->vs_tpg)
                memcpy(vs_tpg, vs->vs_tpg, len);
 
-       list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
+       list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
                mutex_lock(&tpg->tv_tpg_mutex);
                if (!tpg->tpg_nexus) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
 
 out:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                          struct vhost_scsi_target *t)
 {
        struct se_portal_group *se_tpg;
-       struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tpg;
+       struct vhost_scsi_tport *tv_tport;
+       struct vhost_scsi_tpg *tpg;
        struct vhost_virtqueue *vq;
        bool match = false;
        int index, ret, i;
        u8 target;
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
        /* Verify that ring has been setup correctly. */
        for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
        vs->vs_tpg = NULL;
        WARN_ON(vs->vs_events_nr);
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return 0;
 
 err_tpg:
        mutex_unlock(&tpg->tv_tpg_mutex);
 err_dev:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                goto err_vqs;
 
        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
-       vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
+       vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
 
        vs->vs_events_nr = 0;
        vs->vs_events_missed = false;
@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        }
        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
 
-       tcm_vhost_init_inflight(vs, NULL);
+       vhost_scsi_init_inflight(vs, NULL);
 
        f->private_data = vs;
        return 0;
@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void)
        return misc_deregister(&vhost_scsi_misc);
 }
 
-static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
 {
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
 }
 
 static void
-tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
                  struct se_lun *lun, bool plug)
 {
 
@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        mutex_lock(&vq->mutex);
        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
-               tcm_vhost_send_evt(vs, tpg, lun,
+               vhost_scsi_send_evt(vs, tpg, lun,
                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
        mutex_unlock(&vq->mutex);
        mutex_unlock(&vs->dev.mutex);
 }
 
-static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-       tcm_vhost_do_plug(tpg, lun, true);
+       vhost_scsi_do_plug(tpg, lun, true);
 }
 
-static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-       tcm_vhost_do_plug(tpg, lun, false);
+       vhost_scsi_do_plug(tpg, lun, false);
 }
 
-static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
+static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
                               struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count++;
        mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotplug(tpg, lun);
+       vhost_scsi_hotplug(tpg, lun);
 
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
 
        return 0;
 }
 
-static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
+static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
                                  struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count--;
        mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotunplug(tpg, lun);
+       vhost_scsi_hotunplug(tpg, lun);
 
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
 }
 
 static struct se_node_acl *
-tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
+vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
                       struct config_group *group,
                       const char *name)
 {
        struct se_node_acl *se_nacl, *se_nacl_new;
-       struct tcm_vhost_nacl *nacl;
+       struct vhost_scsi_nacl *nacl;
        u64 wwpn = 0;
        u32 nexus_depth;
 
-       /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+       /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
                return ERR_PTR(-EINVAL); */
-       se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+       se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
        if (!se_nacl_new)
                return ERR_PTR(-ENOMEM);
 
@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
                                name, nexus_depth);
        if (IS_ERR(se_nacl)) {
-               tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+               vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
                return se_nacl;
        }
        /*
-        * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+        * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
         */
-       nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+       nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
        nacl->iport_wwpn = wwpn;
 
        return se_nacl;
 }
 
-static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
 {
-       struct tcm_vhost_nacl *nacl = container_of(se_acl,
-                               struct tcm_vhost_nacl, se_node_acl);
+       struct vhost_scsi_nacl *nacl = container_of(se_acl,
+                               struct vhost_scsi_nacl, se_node_acl);
        core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
        kfree(nacl);
 }
 
-static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
+static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
                                       struct se_session *se_sess)
 {
-       struct tcm_vhost_cmd *tv_cmd;
+       struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
 
        if (!se_sess->sess_cmd_map)
                return;
 
-       for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-               tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+       for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+               tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
                kfree(tv_cmd->tvc_sgl);
                kfree(tv_cmd->tvc_prot_sgl);
@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
        }
 }
 
-static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
                                const char *name)
 {
        struct se_portal_group *se_tpg;
        struct se_session *se_sess;
-       struct tcm_vhost_nexus *tv_nexus;
-       struct tcm_vhost_cmd *tv_cmd;
+       struct vhost_scsi_nexus *tv_nexus;
+       struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
 
        mutex_lock(&tpg->tv_tpg_mutex);
@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        }
        se_tpg = &tpg->se_tpg;
 
-       tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+       tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
        if (!tv_nexus) {
                mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+               pr_err("Unable to allocate struct vhost_scsi_nexus\n");
                return -ENOMEM;
        }
        /*
         *  Initialize the struct se_session pointer and setup tagpool
-        *  for struct tcm_vhost_cmd descriptors
+        *  for struct vhost_scsi_cmd descriptors
         */
        tv_nexus->tvn_se_sess = transport_init_session_tags(
-                                       TCM_VHOST_DEFAULT_TAGS,
-                                       sizeof(struct tcm_vhost_cmd),
+                                       VHOST_SCSI_DEFAULT_TAGS,
+                                       sizeof(struct vhost_scsi_cmd),
                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
                mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                return -ENOMEM;
        }
        se_sess = tv_nexus->tvn_se_sess;
-       for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-               tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+       for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+               tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_sgl) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                }
 
                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-                                       TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
                if (!tv_cmd->tvc_upages) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                }
 
                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_prot_sgl) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        }
        /*
         * Since we are running in 'demo mode' this call with generate a
-        * struct se_node_acl for the tcm_vhost struct se_portal_group with
+        * struct se_node_acl for the vhost_scsi struct se_portal_group with
         * the SCSI Initiator port name of the passed configfs group 'name'.
         */
        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        return 0;
 
 out:
-       tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
        transport_free_session(se_sess);
        kfree(tv_nexus);
        return -ENOMEM;
 }
 
-static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
+static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
 {
        struct se_session *se_sess;
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_nexus *tv_nexus;
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tv_nexus = tpg->tpg_nexus;
@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
        }
 
        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
-               " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+               " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 
-       tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
        /*
         * Release the SCSI I_T Nexus to the emulated vhost Target Port
         */
@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
        return 0;
 }
 
-static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
                                        char *page)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_nexus *tv_nexus;
        ssize_t ret;
 
        mutex_lock(&tpg->tv_tpg_mutex);
@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
        return ret;
 }
 
-static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
                                         const char *page,
                                         size_t count)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport_wwn = tpg->tport;
-       unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport_wwn = tpg->tport;
+       unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
        int ret;
        /*
         * Shutdown the active I_T nexus if 'NULL' is passed..
         */
        if (!strncmp(page, "NULL", 4)) {
-               ret = tcm_vhost_drop_nexus(tpg);
+               ret = vhost_scsi_drop_nexus(tpg);
                return (!ret) ? count : ret;
        }
        /*
         * Otherwise make sure the passed virtual Initiator port WWN matches
-        * the fabric protocol_id set in tcm_vhost_make_tport(), and call
-        * tcm_vhost_make_nexus().
+        * the fabric protocol_id set in vhost_scsi_make_tport(), and call
+        * vhost_scsi_make_nexus().
         */
-       if (strlen(page) >= TCM_VHOST_NAMELEN) {
+       if (strlen(page) >= VHOST_SCSI_NAMELEN) {
                pr_err("Emulated NAA Sas Address: %s, exceeds"
-                               " max: %d\n", page, TCM_VHOST_NAMELEN);
+                               " max: %d\n", page, VHOST_SCSI_NAMELEN);
                return -EINVAL;
        }
-       snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+       snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
 
        ptr = strstr(i_port, "naa.");
        if (ptr) {
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
                        pr_err("Passed SAS Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[0];
@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
                        pr_err("Passed FCP Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
                        pr_err("Passed iSCSI Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[0];
@@ -2101,40 +2115,40 @@ check_newline:
        if (i_port[strlen(i_port)-1] == '\n')
                i_port[strlen(i_port)-1] = '\0';
 
-       ret = tcm_vhost_make_nexus(tpg, port_ptr);
+       ret = vhost_scsi_make_nexus(tpg, port_ptr);
        if (ret < 0)
                return ret;
 
        return count;
 }
 
-TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
 
-static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
-       &tcm_vhost_tpg_nexus.attr,
+static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
+       &vhost_scsi_tpg_nexus.attr,
        NULL,
 };
 
 static struct se_portal_group *
-tcm_vhost_make_tpg(struct se_wwn *wwn,
+vhost_scsi_make_tpg(struct se_wwn *wwn,
                   struct config_group *group,
                   const char *name)
 {
-       struct tcm_vhost_tport *tport = container_of(wwn,
-                       struct tcm_vhost_tport, tport_wwn);
+       struct vhost_scsi_tport *tport = container_of(wwn,
+                       struct vhost_scsi_tport, tport_wwn);
 
-       struct tcm_vhost_tpg *tpg;
-       unsigned long tpgt;
+       struct vhost_scsi_tpg *tpg;
+       u16 tpgt;
        int ret;
 
        if (strstr(name, "tpgt_") != name)
                return ERR_PTR(-EINVAL);
-       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+       if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
                return ERR_PTR(-EINVAL);
 
-       tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+       tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
        if (!tpg) {
-               pr_err("Unable to allocate struct tcm_vhost_tpg");
+               pr_err("Unable to allocate struct vhost_scsi_tpg");
                return ERR_PTR(-ENOMEM);
        }
        mutex_init(&tpg->tv_tpg_mutex);
@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
-       ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+       ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
                                &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
        }
-       mutex_lock(&tcm_vhost_mutex);
-       list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
+       list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
+       mutex_unlock(&vhost_scsi_mutex);
 
        return &tpg->se_tpg;
 }
 
-static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        list_del(&tpg->tv_tpg_list);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        /*
         * Release the virtual I_T Nexus for this vhost TPG
         */
-       tcm_vhost_drop_nexus(tpg);
+       vhost_scsi_drop_nexus(tpg);
        /*
         * Deregister the se_tpg from TCM..
         */
@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
 }
 
 static struct se_wwn *
-tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+vhost_scsi_make_tport(struct target_fabric_configfs *tf,
                     struct config_group *group,
                     const char *name)
 {
-       struct tcm_vhost_tport *tport;
+       struct vhost_scsi_tport *tport;
        char *ptr;
        u64 wwpn = 0;
        int off = 0;
 
-       /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+       /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
                return ERR_PTR(-EINVAL); */
 
-       tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+       tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
        if (!tport) {
-               pr_err("Unable to allocate struct tcm_vhost_tport");
+               pr_err("Unable to allocate struct vhost_scsi_tport");
                return ERR_PTR(-ENOMEM);
        }
        tport->tport_wwpn = wwpn;
@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
        return ERR_PTR(-EINVAL);
 
 check_len:
-       if (strlen(name) >= TCM_VHOST_NAMELEN) {
+       if (strlen(name) >= VHOST_SCSI_NAMELEN) {
                pr_err("Emulated %s Address: %s, exceeds"
-                       " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
-                       TCM_VHOST_NAMELEN);
+                       " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+                       VHOST_SCSI_NAMELEN);
                kfree(tport);
                return ERR_PTR(-EINVAL);
        }
-       snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+       snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
 
        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
-               " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+               " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
 
        return &tport->tport_wwn;
 }
 
-static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+static void vhost_scsi_drop_tport(struct se_wwn *wwn)
 {
-       struct tcm_vhost_tport *tport = container_of(wwn,
-                               struct tcm_vhost_tport, tport_wwn);
+       struct vhost_scsi_tport *tport = container_of(wwn,
+                               struct vhost_scsi_tport, tport_wwn);
 
        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
-               " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+               " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
                tport->tport_name);
 
        kfree(tport);
 }
 
 static ssize_t
-tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
+vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
                                char *page)
 {
        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
-               "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+               "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
 }
 
-TF_WWN_ATTR_RO(tcm_vhost, version);
+TF_WWN_ATTR_RO(vhost_scsi, version);
 
-static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
-       &tcm_vhost_wwn_version.attr,
+static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
+       &vhost_scsi_wwn_version.attr,
        NULL,
 };
 
-static struct target_core_fabric_ops tcm_vhost_ops = {
-       .get_fabric_name                = tcm_vhost_get_fabric_name,
-       .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
-       .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
-       .tpg_get_tag                    = tcm_vhost_get_tag,
-       .tpg_get_default_depth          = tcm_vhost_get_default_depth,
-       .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
-       .tpg_check_demo_mode            = tcm_vhost_check_true,
-       .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
-       .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
-       .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
-       .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
-       .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
-       .release_cmd                    = tcm_vhost_release_cmd,
+static struct target_core_fabric_ops vhost_scsi_ops = {
+       .get_fabric_name                = vhost_scsi_get_fabric_name,
+       .get_fabric_proto_ident         = vhost_scsi_get_fabric_proto_ident,
+       .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
+       .tpg_get_tag                    = vhost_scsi_get_tpgt,
+       .tpg_get_default_depth          = vhost_scsi_get_default_depth,
+       .tpg_get_pr_transport_id        = vhost_scsi_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = vhost_scsi_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = vhost_scsi_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = vhost_scsi_check_true,
+       .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
+       .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
+       .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+       .tpg_alloc_fabric_acl           = vhost_scsi_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = vhost_scsi_release_fabric_acl,
+       .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
+       .release_cmd                    = vhost_scsi_release_cmd,
        .check_stop_free                = vhost_scsi_check_stop_free,
-       .shutdown_session               = tcm_vhost_shutdown_session,
-       .close_session                  = tcm_vhost_close_session,
-       .sess_get_index                 = tcm_vhost_sess_get_index,
+       .shutdown_session               = vhost_scsi_shutdown_session,
+       .close_session                  = vhost_scsi_close_session,
+       .sess_get_index                 = vhost_scsi_sess_get_index,
        .sess_get_initiator_sid         = NULL,
-       .write_pending                  = tcm_vhost_write_pending,
-       .write_pending_status           = tcm_vhost_write_pending_status,
-       .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
-       .get_task_tag                   = tcm_vhost_get_task_tag,
-       .get_cmd_state                  = tcm_vhost_get_cmd_state,
-       .queue_data_in                  = tcm_vhost_queue_data_in,
-       .queue_status                   = tcm_vhost_queue_status,
-       .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
-       .aborted_task                   = tcm_vhost_aborted_task,
+       .write_pending                  = vhost_scsi_write_pending,
+       .write_pending_status           = vhost_scsi_write_pending_status,
+       .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
+       .get_task_tag                   = vhost_scsi_get_task_tag,
+       .get_cmd_state                  = vhost_scsi_get_cmd_state,
+       .queue_data_in                  = vhost_scsi_queue_data_in,
+       .queue_status                   = vhost_scsi_queue_status,
+       .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
+       .aborted_task                   = vhost_scsi_aborted_task,
        /*
         * Setup callers for generic logic in target_core_fabric_configfs.c
         */
-       .fabric_make_wwn                = tcm_vhost_make_tport,
-       .fabric_drop_wwn                = tcm_vhost_drop_tport,
-       .fabric_make_tpg                = tcm_vhost_make_tpg,
-       .fabric_drop_tpg                = tcm_vhost_drop_tpg,
-       .fabric_post_link               = tcm_vhost_port_link,
-       .fabric_pre_unlink              = tcm_vhost_port_unlink,
+       .fabric_make_wwn                = vhost_scsi_make_tport,
+       .fabric_drop_wwn                = vhost_scsi_drop_tport,
+       .fabric_make_tpg                = vhost_scsi_make_tpg,
+       .fabric_drop_tpg                = vhost_scsi_drop_tpg,
+       .fabric_post_link               = vhost_scsi_port_link,
+       .fabric_pre_unlink              = vhost_scsi_port_unlink,
        .fabric_make_np                 = NULL,
        .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
-       .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
+       .fabric_make_nodeacl            = vhost_scsi_make_nodeacl,
+       .fabric_drop_nodeacl            = vhost_scsi_drop_nodeacl,
 };
 
-static int tcm_vhost_register_configfs(void)
+static int vhost_scsi_register_configfs(void)
 {
        struct target_fabric_configfs *fabric;
        int ret;
 
-       pr_debug("TCM_VHOST fabric module %s on %s/%s"
-               " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+       pr_debug("vhost-scsi fabric module %s on %s/%s"
+               " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
        /*
         * Register the top level struct config_item_type with TCM core
@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void)
                return PTR_ERR(fabric);
        }
        /*
-        * Setup fabric->tf_ops from our local tcm_vhost_ops
+        * Setup fabric->tf_ops from our local vhost_scsi_ops
         */
-       fabric->tf_ops = tcm_vhost_ops;
+       fabric->tf_ops = vhost_scsi_ops;
        /*
         * Setup default attribute lists for various fabric->tf_cit_tmpl
         */
-       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
-       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
        fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
        fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
        fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void)
        /*
         * Setup our local pointer to *fabric
         */
-       tcm_vhost_fabric_configfs = fabric;
-       pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+       vhost_scsi_fabric_configfs = fabric;
+       pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
        return 0;
 };
 
-static void tcm_vhost_deregister_configfs(void)
+static void vhost_scsi_deregister_configfs(void)
 {
-       if (!tcm_vhost_fabric_configfs)
+       if (!vhost_scsi_fabric_configfs)
                return;
 
-       target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
-       tcm_vhost_fabric_configfs = NULL;
-       pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+       target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
+       vhost_scsi_fabric_configfs = NULL;
+       pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
 };
 
-static int __init tcm_vhost_init(void)
+static int __init vhost_scsi_init(void)
 {
        int ret = -ENOMEM;
        /*
         * Use our own dedicated workqueue for submitting I/O into
         * target core to avoid contention within system_wq.
         */
-       tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
-       if (!tcm_vhost_workqueue)
+       vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
+       if (!vhost_scsi_workqueue)
                goto out;
 
        ret = vhost_scsi_register();
        if (ret < 0)
                goto out_destroy_workqueue;
 
-       ret = tcm_vhost_register_configfs();
+       ret = vhost_scsi_register_configfs();
        if (ret < 0)
                goto out_vhost_scsi_deregister;
 
@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void)
 out_vhost_scsi_deregister:
        vhost_scsi_deregister();
 out_destroy_workqueue:
-       destroy_workqueue(tcm_vhost_workqueue);
+       destroy_workqueue(vhost_scsi_workqueue);
 out:
        return ret;
 };
 
-static void tcm_vhost_exit(void)
+static void vhost_scsi_exit(void)
 {
-       tcm_vhost_deregister_configfs();
+       vhost_scsi_deregister_configfs();
        vhost_scsi_deregister();
-       destroy_workqueue(tcm_vhost_workqueue);
+       destroy_workqueue(vhost_scsi_workqueue);
 };
 
 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
 MODULE_ALIAS("tcm_vhost");
 MODULE_LICENSE("GPL");
-module_init(tcm_vhost_init);
-module_exit(tcm_vhost_exit);
+module_init(vhost_scsi_init);
+module_exit(vhost_scsi_exit);
index 32c0b6b28097f115f5a701d7a7d59f50d92d147d..9362424c2340490585fe02e4dfe950f53f2097af 100644 (file)
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
 
        len = clcdfb_snprintf_mode(NULL, 0, mode);
        name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+
        clcdfb_snprintf_mode(name, len + 1, mode);
        mode->name = name;
 
index 95338593ebf4bc8bfc280bf1567c5fa90d712916..868facdec6384da049eb130097eaf76c994b95d0 100644 (file)
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
        int num = 0, i, first = 1;
        int ver, rev;
 
-       ver = edid[EDID_STRUCT_VERSION];
-       rev = edid[EDID_STRUCT_REVISION];
-
        mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
        if (mode == NULL)
                return NULL;
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize,
                return NULL;
        }
 
+       ver = edid[EDID_STRUCT_VERSION];
+       rev = edid[EDID_STRUCT_REVISION];
+
        *dbsize = 0;
 
        DPRINTK("   Detailed Timings\n");
index 5a2095a98ed868016f0f5283c431b012bf2eac08..12186557a9d4d5030d23dd91453029a3d2cc8a3c 100644 (file)
 #include <video/omapdss.h>
 #include "dss.h"
 
-static struct omap_dss_device *to_dss_device_sysfs(struct device *dev)
+static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = NULL;
-
-       for_each_dss_dev(dssdev) {
-               if (dssdev->dev == dev) {
-                       omap_dss_put_device(dssdev);
-                       return dssdev;
-               }
-       }
-
-       return NULL;
-}
-
-static ssize_t display_name_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
-
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        dssdev->name ?
                        dssdev->name : "");
 }
 
-static ssize_t display_enabled_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
-
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        omapdss_device_is_enabled(dssdev));
 }
 
-static ssize_t display_enabled_store(struct device *dev,
-               struct device_attribute *attr,
+static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
                const char *buf, size_t size)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        int r;
        bool enable;
 
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev,
        return size;
 }
 
-static ssize_t display_tear_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        dssdev->driver->get_te ?
                        dssdev->driver->get_te(dssdev) : 0);
 }
 
-static ssize_t display_tear_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_tear_store(struct omap_dss_device *dssdev,
+       const char *buf, size_t size)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        int r;
        bool te;
 
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev,
        return size;
 }
 
-static ssize_t display_timings_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        struct omap_video_timings t;
 
        if (!dssdev->driver->get_timings)
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev,
                        t.y_res, t.vfp, t.vbp, t.vsw);
 }
 
-static ssize_t display_timings_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_timings_store(struct omap_dss_device *dssdev,
+       const char *buf, size_t size)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        struct omap_video_timings t = dssdev->panel.timings;
        int r, found;
 
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev,
        return size;
 }
 
-static ssize_t display_rotate_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        int rotate;
        if (!dssdev->driver->get_rotate)
                return -ENOENT;
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
 }
 
-static ssize_t display_rotate_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
+       const char *buf, size_t size)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        int rot, r;
 
        if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev,
        return size;
 }
 
-static ssize_t display_mirror_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        int mirror;
        if (!dssdev->driver->get_mirror)
                return -ENOENT;
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
 }
 
-static ssize_t display_mirror_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
+       const char *buf, size_t size)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        int r;
        bool mirror;
 
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev,
        return size;
 }
 
-static ssize_t display_wss_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        unsigned int wss;
 
        if (!dssdev->driver->get_wss)
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
 }
 
-static ssize_t display_wss_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_wss_store(struct omap_dss_device *dssdev,
+       const char *buf, size_t size)
 {
-       struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
        u32 wss;
        int r;
 
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev,
        return size;
 }
 
-static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+struct display_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct omap_dss_device *, char *);
+       ssize_t (*store)(struct omap_dss_device *, const char *, size_t);
+};
+
+#define DISPLAY_ATTR(_name, _mode, _show, _store) \
+       struct display_attribute display_attr_##_name = \
+       __ATTR(_name, _mode, _show, _store)
+
+static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
+static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
+static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
                display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
                display_tear_show, display_tear_store);
-static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
                display_timings_show, display_timings_store);
-static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
                display_rotate_show, display_rotate_store);
-static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
                display_mirror_show, display_mirror_store);
-static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
                display_wss_show, display_wss_store);
 
-static const struct attribute *display_sysfs_attrs[] = {
-       &dev_attr_display_name.attr,
-       &dev_attr_enabled.attr,
-       &dev_attr_tear_elim.attr,
-       &dev_attr_timings.attr,
-       &dev_attr_rotate.attr,
-       &dev_attr_mirror.attr,
-       &dev_attr_wss.attr,
+static struct attribute *display_sysfs_attrs[] = {
+       &display_attr_name.attr,
+       &display_attr_display_name.attr,
+       &display_attr_enabled.attr,
+       &display_attr_tear_elim.attr,
+       &display_attr_timings.attr,
+       &display_attr_rotate.attr,
+       &display_attr_mirror.attr,
+       &display_attr_wss.attr,
        NULL
 };
 
+static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
+               char *buf)
+{
+       struct omap_dss_device *dssdev;
+       struct display_attribute *display_attr;
+
+       dssdev = container_of(kobj, struct omap_dss_device, kobj);
+       display_attr = container_of(attr, struct display_attribute, attr);
+
+       if (!display_attr->show)
+               return -ENOENT;
+
+       return display_attr->show(dssdev, buf);
+}
+
+static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
+               const char *buf, size_t size)
+{
+       struct omap_dss_device *dssdev;
+       struct display_attribute *display_attr;
+
+       dssdev = container_of(kobj, struct omap_dss_device, kobj);
+       display_attr = container_of(attr, struct display_attribute, attr);
+
+       if (!display_attr->store)
+               return -ENOENT;
+
+       return display_attr->store(dssdev, buf, size);
+}
+
+static const struct sysfs_ops display_sysfs_ops = {
+       .show = display_attr_show,
+       .store = display_attr_store,
+};
+
+static struct kobj_type display_ktype = {
+       .sysfs_ops = &display_sysfs_ops,
+       .default_attrs = display_sysfs_attrs,
+};
+
 int display_init_sysfs(struct platform_device *pdev)
 {
        struct omap_dss_device *dssdev = NULL;
        int r;
 
        for_each_dss_dev(dssdev) {
-               struct kobject *kobj = &dssdev->dev->kobj;
-
-               r = sysfs_create_files(kobj, display_sysfs_attrs);
+               r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
+                       &pdev->dev.kobj, dssdev->alias);
                if (r) {
                        DSSERR("failed to create sysfs files\n");
-                       goto err;
-               }
-
-               r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias);
-               if (r) {
-                       sysfs_remove_files(kobj, display_sysfs_attrs);
-
-                       DSSERR("failed to create sysfs display link\n");
+                       omap_dss_put_device(dssdev);
                        goto err;
                }
        }
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev)
        struct omap_dss_device *dssdev = NULL;
 
        for_each_dss_dev(dssdev) {
-               sysfs_remove_link(&pdev->dev.kobj, dssdev->alias);
-               sysfs_remove_files(&dssdev->dev->kobj,
-                               display_sysfs_attrs);
+               if (kobject_name(&dssdev->kobj) == NULL)
+                       continue;
+
+               kobject_del(&dssdev->kobj);
+               kobject_put(&dssdev->kobj);
+
+               memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
        }
 }
index 0413157f3b49c230aaa7a775564086144977803f..6a356e344f82c3d5b594abeeb47c453e868c4f9f 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/balloon_compaction.h>
 #include <linux/oom.h>
+#include <linux/wait.h>
 
 /*
  * Balloon device works in 4K page units.  So each page is pointed to by
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
 static int balloon(void *_vballoon)
 {
        struct virtio_balloon *vb = _vballoon;
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        set_freezable();
        while (!kthread_should_stop()) {
                s64 diff;
 
                try_to_freeze();
-               wait_event_interruptible(vb->config_change,
-                                        (diff = towards_target(vb)) != 0
-                                        || vb->need_stats_update
-                                        || kthread_should_stop()
-                                        || freezing(current));
+
+               add_wait_queue(&vb->config_change, &wait);
+               for (;;) {
+                       if ((diff = towards_target(vb)) != 0 ||
+                           vb->need_stats_update ||
+                           kthread_should_stop() ||
+                           freezing(current))
+                               break;
+                       wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+               }
+               remove_wait_queue(&vb->config_change, &wait);
+
                if (vb->need_stats_update)
                        stats_handle_request(vb);
                if (diff > 0)
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
        if (err < 0)
                goto out_oom_notify;
 
+       virtio_device_ready(vdev);
+
        vb->thread = kthread_run(balloon, vb, "vballoon");
        if (IS_ERR(vb->thread)) {
                err = PTR_ERR(vb->thread);
index cad569890908de40ba4d72edd6562fb87b6e14b5..6010d7ec0a0f899b7b7690d202e9b57c6dbc7193 100644 (file)
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
                   void *buf, unsigned len)
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-       u8 *ptr = buf;
-       int i;
+       void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
+       u8 b;
+       __le16 w;
+       __le32 l;
 
-       for (i = 0; i < len; i++)
-               ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
+       if (vm_dev->version == 1) {
+               u8 *ptr = buf;
+               int i;
+
+               for (i = 0; i < len; i++)
+                       ptr[i] = readb(base + offset + i);
+               return;
+       }
+
+       switch (len) {
+       case 1:
+               b = readb(base + offset);
+               memcpy(buf, &b, sizeof b);
+               break;
+       case 2:
+               w = cpu_to_le16(readw(base + offset));
+               memcpy(buf, &w, sizeof w);
+               break;
+       case 4:
+               l = cpu_to_le32(readl(base + offset));
+               memcpy(buf, &l, sizeof l);
+               break;
+       case 8:
+               l = cpu_to_le32(readl(base + offset));
+               memcpy(buf, &l, sizeof l);
+               l = cpu_to_le32(ioread32(base + offset + sizeof l));
+               memcpy(buf + sizeof l, &l, sizeof l);
+               break;
+       default:
+               BUG();
+       }
 }
 
 static void vm_set(struct virtio_device *vdev, unsigned offset,
                   const void *buf, unsigned len)
 {
        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-       const u8 *ptr = buf;
-       int i;
+       void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
+       u8 b;
+       __le16 w;
+       __le32 l;
 
-       for (i = 0; i < len; i++)
-               writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
+       if (vm_dev->version == 1) {
+               const u8 *ptr = buf;
+               int i;
+
+               for (i = 0; i < len; i++)
+                       writeb(ptr[i], base + offset + i);
+
+               return;
+       }
+
+       switch (len) {
+       case 1:
+               memcpy(&b, buf, sizeof b);
+               writeb(b, base + offset);
+               break;
+       case 2:
+               memcpy(&w, buf, sizeof w);
+               writew(le16_to_cpu(w), base + offset);
+               break;
+       case 4:
+               memcpy(&l, buf, sizeof l);
+               writel(le32_to_cpu(l), base + offset);
+               break;
+       case 8:
+               memcpy(&l, buf, sizeof l);
+               writel(le32_to_cpu(l), base + offset);
+               memcpy(&l, buf + sizeof l, sizeof l);
+               writel(le32_to_cpu(l), base + offset + sizeof l);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static u32 vm_generation(struct virtio_device *vdev)
+{
+       struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+       if (vm_dev->version == 1)
+               return 0;
+       else
+               return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
 }
 
 static u8 vm_get_status(struct virtio_device *vdev)
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
 static const struct virtio_config_ops virtio_mmio_config_ops = {
        .get            = vm_get,
        .set            = vm_set,
+       .generation     = vm_generation,
        .get_status     = vm_get_status,
        .set_status     = vm_set_status,
        .reset          = vm_reset,
index 6df940528fd21b0cef8524004645fe472b7f7061..1443b3c391de497c05fe332f1c4cdd067bc5f5c9 100644 (file)
@@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
 
        if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
                err = request_irq(wdt->irq, wdt_interrupt,
-                                 IRQF_SHARED | IRQF_IRQPOLL,
+                                 IRQF_SHARED | IRQF_IRQPOLL |
+                                 IRQF_NO_SUSPEND,
                                  pdev->name, wdt);
                if (err)
                        return err;
index 2140398a2a8c6b5f521c6250bb56a56ad41c9df7..2ccd3592d41f549967f7597ed86bd77aef8556bf 100644 (file)
@@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
 obj-$(CONFIG_HOTPLUG_CPU)              += cpu_hotplug.o
 endif
 obj-$(CONFIG_X86)                      += fallback.o
-obj-y  += grant-table.o features.o balloon.o manage.o
+obj-y  += grant-table.o features.o balloon.o manage.o preempt.o
 obj-y  += events/
 obj-y  += xenbus/
 
index b4bca2d4a7e53c7675b25d632b560e1369b19020..70fba973a107165c2c29b2104d8f4d438faddc2b 100644 (file)
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq)
        pirq_query_unmask(irq);
 
        rc = set_evtchn_to_irq(evtchn, irq);
-       if (rc != 0) {
-               pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
-                      irq, rc);
-               xen_evtchn_close(evtchn);
-               return 0;
-       }
+       if (rc)
+               goto err;
+
        bind_evtchn_to_cpu(evtchn, 0);
        info->evtchn = evtchn;
 
+       rc = xen_evtchn_port_setup(info);
+       if (rc)
+               goto err;
+
 out:
        unmask_evtchn(evtchn);
        eoi_pirq(irq_get_irq_data(irq));
 
        return 0;
+
+err:
+       pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
+       xen_evtchn_close(evtchn);
+       return 0;
 }
 
 static unsigned int startup_pirq(struct irq_data *data)
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
new file mode 100644 (file)
index 0000000..a1800c1
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Preemptible hypercalls
+ *
+ * Copyright (C) 2014 Citrix Systems R&D ltd.
+ *
+ * This source code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <xen/xen-ops.h>
+
+#ifndef CONFIG_PREEMPT
+
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+asmlinkage __visible void xen_maybe_preempt_hcall(void)
+{
+       if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+                    && should_resched())) {
+               /*
+                * Clear flag as we may be rescheduled on a different
+                * cpu.
+                */
+               __this_cpu_write(xen_in_preemptible_hcall, false);
+               _cond_resched();
+               __this_cpu_write(xen_in_preemptible_hcall, true);
+       }
+}
+#endif /* CONFIG_PREEMPT */
index 569a13b9e856de5c3900050d583844f401243e96..59ac71c4a04352d055f40435444e7ccc46debf31 100644 (file)
@@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata)
        if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
                return -EFAULT;
 
+       xen_preemptible_hcall_begin();
        ret = privcmd_call(hypercall.op,
                           hypercall.arg[0], hypercall.arg[1],
                           hypercall.arg[2], hypercall.arg[3],
                           hypercall.arg[4]);
+       xen_preemptible_hcall_end();
 
        return ret;
 }
index 46ae0f9f02adcca044734c58f56a93ca12801f85..75fe3d466515a08cf8ec8da7eebafd8c5b903895 100644 (file)
@@ -16,7 +16,7 @@
 #include "conf_space.h"
 #include "conf_space_quirks.h"
 
-static bool permissive;
+bool permissive;
 module_param(permissive, bool, 0644);
 
 /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
index e56c934ad137be00ce34e5099726e31472e86a93..2e1d73d1d5d09393ebf7e2ab21a026b709e5bb5f 100644 (file)
@@ -64,6 +64,8 @@ struct config_field_entry {
        void *data;
 };
 
+extern bool permissive;
+
 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
 
 /* Add fields to a device - the add_fields macro expects to get a pointer to
index c5ee82587e8cc3b5feb5e594763da576765fd268..2d7369391472fd572dd3f5b837be3a2625ab4a93 100644 (file)
 #include "pciback.h"
 #include "conf_space.h"
 
+struct pci_cmd_info {
+       u16 val;
+};
+
 struct pci_bar_info {
        u32 val;
        u32 len_val;
@@ -20,22 +24,36 @@ struct pci_bar_info {
 #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
 #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
 
-static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+/* Bits guests are allowed to control in permissive mode. */
+#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
+                          PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
+                          PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
+
+static void *command_init(struct pci_dev *dev, int offset)
 {
-       int i;
-       int ret;
-
-       ret = xen_pcibk_read_config_word(dev, offset, value, data);
-       if (!pci_is_enabled(dev))
-               return ret;
-
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-               if (dev->resource[i].flags & IORESOURCE_IO)
-                       *value |= PCI_COMMAND_IO;
-               if (dev->resource[i].flags & IORESOURCE_MEM)
-                       *value |= PCI_COMMAND_MEMORY;
+       struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+       int err;
+
+       if (!cmd)
+               return ERR_PTR(-ENOMEM);
+
+       err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
+       if (err) {
+               kfree(cmd);
+               return ERR_PTR(err);
        }
 
+       return cmd;
+}
+
+static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+{
+       int ret = pci_read_config_word(dev, offset, value);
+       const struct pci_cmd_info *cmd = data;
+
+       *value &= PCI_COMMAND_GUEST;
+       *value |= cmd->val & ~PCI_COMMAND_GUEST;
+
        return ret;
 }
 
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
 {
        struct xen_pcibk_dev_data *dev_data;
        int err;
+       u16 val;
+       struct pci_cmd_info *cmd = data;
 
        dev_data = pci_get_drvdata(dev);
        if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
                }
        }
 
+       cmd->val = value;
+
+       if (!permissive && (!dev_data || !dev_data->permissive))
+               return 0;
+
+       /* Only allow the guest to control certain bits. */
+       err = pci_read_config_word(dev, offset, &val);
+       if (err || val == value)
+               return err;
+
+       value &= PCI_COMMAND_GUEST;
+       value |= val & ~PCI_COMMAND_GUEST;
+
        return pci_write_config_word(dev, offset, value);
 }
 
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
        {
         .offset    = PCI_COMMAND,
         .size      = 2,
+        .init      = command_init,
+        .release   = bar_release,
         .u.w.read  = command_read,
         .u.w.write = command_write,
        },
index 61653a03a8f5037c12e28c5087e3fd8960342460..9faca6a60bb01b33c311f86b01af239280934a78 100644 (file)
@@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 {
        struct vscsiif_back_ring *ring = &info->ring;
-       struct vscsiif_request *ring_req;
+       struct vscsiif_request ring_req;
        struct vscsibk_pend *pending_req;
        RING_IDX rc, rp;
        int err, more_to_do;
        uint32_t result;
-       uint8_t act;
 
        rc = ring->req_cons;
        rp = ring->sring->req_prod;
@@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                if (!pending_req)
                        return 1;
 
-               ring_req = RING_GET_REQUEST(ring, rc);
+               ring_req = *RING_GET_REQUEST(ring, rc);
                ring->req_cons = ++rc;
 
-               act = ring_req->act;
-               err = prepare_pending_reqs(info, ring_req, pending_req);
+               err = prepare_pending_reqs(info, &ring_req, pending_req);
                if (err) {
                        switch (err) {
                        case -ENODEV:
@@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                        return 1;
                }
 
-               switch (act) {
+               switch (ring_req.act) {
                case VSCSIIF_ACT_SCSI_CDB:
-                       if (scsiback_gnttab_data_map(ring_req, pending_req)) {
+                       if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
                                scsiback_fast_flush_area(pending_req);
                                scsiback_do_resp_with_sense(NULL,
                                        DRIVER_ERROR << 24, 0, pending_req);
@@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
                        break;
                case VSCSIIF_ACT_SCSI_ABORT:
                        scsiback_device_action(pending_req, TMR_ABORT_TASK,
-                               ring_req->ref_rqid);
+                               ring_req.ref_rqid);
                        break;
                case VSCSIIF_ACT_SCSI_RESET:
                        scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
index 9ee5343d48849d85c1404538ce3f5433ad460d6a..3662f1d1d9cf0fc2f73c44fa4c34fb6e14c0af5f 100644 (file)
@@ -1127,7 +1127,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
        }
 
        /* Write all dirty data */
-       if (S_ISREG(dentry->d_inode->i_mode))
+       if (d_is_reg(dentry))
                filemap_write_and_wait(dentry->d_inode->i_mapping);
 
        retval = p9_client_wstat(fid, &wstat);
index 118a2e0088d8fdd8391654a44edb06157dad5629..f8e52a1854c1ab383e32383ac65a0f167e385793 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
 
        ret = -EINVAL;
        if (unlikely(ctx || nr_events == 0)) {
-               pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+               pr_debug("EINVAL: ctx %lu nr_events %u\n",
                         ctx, nr_events);
                goto out;
        }
@@ -1333,7 +1333,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 
                return ret;
        }
-       pr_debug("EINVAL: io_destroy: invalid context id\n");
+       pr_debug("EINVAL: invalid context id\n");
        return -EINVAL;
 }
 
@@ -1515,7 +1515,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
            (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
            ((ssize_t)iocb->aio_nbytes < 0)
           )) {
-               pr_debug("EINVAL: io_submit: overflow check\n");
+               pr_debug("EINVAL: overflow check\n");
                return -EINVAL;
        }
 
index aaf96cb25452cf04a5d7a47f2b506486e67cd502..ac7d921ed9844b0a0c6afd0e6d4eaf4ca718f955 100644 (file)
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
  */
 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
 {
-       struct autofs_dev_ioctl tmp;
+       struct autofs_dev_ioctl tmp, *res;
 
        if (copy_from_user(&tmp, in, sizeof(tmp)))
                return ERR_PTR(-EFAULT);
@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
        if (tmp.size > (PATH_MAX + sizeof(tmp)))
                return ERR_PTR(-ENAMETOOLONG);
 
-       return memdup_user(in, tmp.size);
+       res = memdup_user(in, tmp.size);
+       if (!IS_ERR(res))
+               res->size = tmp.size;
+
+       return res;
 }
 
 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
index bfdbaba9c2ba40e7216d7435d69d45016c9ec5ff..11dd118f75e25e6a8f25f2143724901fb64c72a0 100644 (file)
@@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry,
                return NULL;
        }
 
-       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dentry->d_inode && d_is_symlink(dentry)) {
                DPRINTK("checking symlink %p %pd", dentry, dentry);
                /*
                 * A symlink can't be "busy" in the usual sense so
index dbb5b7212ce162130727256d49351e3f2d2677c9..7e44fdd03e2dd0a684036be49e7c2152c192ee63 100644 (file)
@@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
        struct dentry *dentry = file->f_path.dentry;
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
 
-       DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry);
+       DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry);
 
        if (autofs4_oz_mode(sbi))
                goto out;
@@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
         * having d_mountpoint() true, so there's no need to call back
         * to the daemon.
         */
-       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dentry->d_inode && d_is_symlink(dentry)) {
                spin_unlock(&sbi->fs_lock);
                goto done;
        }
@@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
                 * an incorrect ELOOP error return.
                 */
                if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
-                   (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+                   (dentry->d_inode && d_is_symlink(dentry)))
                        status = -EISDIR;
        }
        spin_unlock(&sbi->fs_lock);
index afd2b4408adf53d78c716043c847ba48cf5c3bef..861b1e1c477710faced77767a0327c1d6e6b79c5 100644 (file)
 #include <linux/namei.h>
 #include <linux/poll.h>
 
-
-static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_read(struct file *filp, char __user *buf,
-                       size_t size, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_write(struct file *filp, const char __user *buf,
-                       size_t siz, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       return -EIO;
-}
-
-static int bad_file_readdir(struct file *file, struct dir_context *ctx)
-{
-       return -EIO;
-}
-
-static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
-{
-       return POLLERR;
-}
-
-static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
-                       unsigned long arg)
-{
-       return -EIO;
-}
-
-static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
-                       unsigned long arg)
-{
-       return -EIO;
-}
-
-static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       return -EIO;
-}
-
 static int bad_file_open(struct inode *inode, struct file *filp)
 {
        return -EIO;
 }
 
-static int bad_file_flush(struct file *file, fl_owner_t id)
-{
-       return -EIO;
-}
-
-static int bad_file_release(struct inode *inode, struct file *filp)
-{
-       return -EIO;
-}
-
-static int bad_file_fsync(struct file *file, loff_t start, loff_t end,
-                         int datasync)
-{
-       return -EIO;
-}
-
-static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
-{
-       return -EIO;
-}
-
-static int bad_file_fasync(int fd, struct file *filp, int on)
-{
-       return -EIO;
-}
-
-static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_sendpage(struct file *file, struct page *page,
-                       int off, size_t len, loff_t *pos, int more)
-{
-       return -EIO;
-}
-
-static unsigned long bad_file_get_unmapped_area(struct file *file,
-                               unsigned long addr, unsigned long len,
-                               unsigned long pgoff, unsigned long flags)
-{
-       return -EIO;
-}
-
-static int bad_file_check_flags(int flags)
-{
-       return -EIO;
-}
-
-static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
-                       struct file *out, loff_t *ppos, size_t len,
-                       unsigned int flags)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
-                       struct pipe_inode_info *pipe, size_t len,
-                       unsigned int flags)
-{
-       return -EIO;
-}
-
 static const struct file_operations bad_file_ops =
 {
-       .llseek         = bad_file_llseek,
-       .read           = bad_file_read,
-       .write          = bad_file_write,
-       .aio_read       = bad_file_aio_read,
-       .aio_write      = bad_file_aio_write,
-       .iterate        = bad_file_readdir,
-       .poll           = bad_file_poll,
-       .unlocked_ioctl = bad_file_unlocked_ioctl,
-       .compat_ioctl   = bad_file_compat_ioctl,
-       .mmap           = bad_file_mmap,
        .open           = bad_file_open,
-       .flush          = bad_file_flush,
-       .release        = bad_file_release,
-       .fsync          = bad_file_fsync,
-       .aio_fsync      = bad_file_aio_fsync,
-       .fasync         = bad_file_fasync,
-       .lock           = bad_file_lock,
-       .sendpage       = bad_file_sendpage,
-       .get_unmapped_area = bad_file_get_unmapped_area,
-       .check_flags    = bad_file_check_flags,
-       .flock          = bad_file_flock,
-       .splice_write   = bad_file_splice_write,
-       .splice_read    = bad_file_splice_read,
 };
 
 static int bad_inode_create (struct inode *dir, struct dentry *dentry,
index 02b16910f4c9d500619286029ee16f0815bce269..995986b8e36b8f3fd8529582c50e545d9b26322e 100644 (file)
@@ -645,11 +645,12 @@ out:
 
 static unsigned long randomize_stack_top(unsigned long stack_top)
 {
-       unsigned int random_variable = 0;
+       unsigned long random_variable = 0;
 
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               random_variable = get_random_int() & STACK_RND_MASK;
+               random_variable = (unsigned long) get_random_int();
+               random_variable &= STACK_RND_MASK;
                random_variable <<= PAGE_SHIFT;
        }
 #ifdef CONFIG_STACK_GROWSUP
index 993642199326a757f55c78dbc2722ecc2b409b60..6d67f32e648df72bc5f5b5d707ff39ce8fa4302b 100644 (file)
@@ -1645,14 +1645,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 
        parent_nritems = btrfs_header_nritems(parent);
        blocksize = root->nodesize;
-       end_slot = parent_nritems;
+       end_slot = parent_nritems - 1;
 
-       if (parent_nritems == 1)
+       if (parent_nritems <= 1)
                return 0;
 
        btrfs_set_lock_blocking(parent);
 
-       for (i = start_slot; i < end_slot; i++) {
+       for (i = start_slot; i <= end_slot; i++) {
                int close = 1;
 
                btrfs_node_key(parent, &disk_key, i);
@@ -1669,7 +1669,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                        other = btrfs_node_blockptr(parent, i - 1);
                        close = close_blocks(blocknr, other, blocksize);
                }
-               if (!close && i < end_slot - 2) {
+               if (!close && i < end_slot) {
                        other = btrfs_node_blockptr(parent, i + 1);
                        close = close_blocks(blocknr, other, blocksize);
                }
index 571f402d3fc46e5f0205451e85a7b78f3cc16b50..6f080451fcb11181c10247a542b599412bb42f22 100644 (file)
@@ -3208,6 +3208,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
                return 0;
        }
 
+       if (trans->aborted)
+               return 0;
 again:
        inode = lookup_free_space_inode(root, block_group, path);
        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
@@ -3243,6 +3245,20 @@ again:
         */
        BTRFS_I(inode)->generation = 0;
        ret = btrfs_update_inode(trans, root, inode);
+       if (ret) {
+               /*
+                * So theoretically we could recover from this, simply set the
+                * super cache generation to 0 so we know to invalidate the
+                * cache, but then we'd have to keep track of the block groups
+                * that fail this way so we know we _have_ to reset this cache
+                * before the next commit or risk reading stale cache.  So to
+                * limit our exposure to horrible edge cases lets just abort the
+                * transaction, this only happens in really bad situations
+                * anyway.
+                */
+               btrfs_abort_transaction(trans, root, ret);
+               goto out_put;
+       }
        WARN_ON(ret);
 
        if (i_size_read(inode) > 0) {
index b78bbbac900db833e54fc63bdbff8bae365225f3..30982bbd31c30c2b154836b0f51b94c37e22923c 100644 (file)
@@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        mutex_unlock(&inode->i_mutex);
 
        /*
-        * we want to make sure fsync finds this change
-        * but we haven't joined a transaction running right now.
-        *
-        * Later on, someone is sure to update the inode and get the
-        * real transid recorded.
-        *
-        * We set last_trans now to the fs_info generation + 1,
-        * this will either be one more than the running transaction
-        * or the generation used for the next transaction if there isn't
-        * one running right now.
-        *
         * We also have to set last_sub_trans to the current log transid,
         * otherwise subsequent syncs to a file that's been synced in this
         * transaction will appear to have already occured.
         */
-       BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
        BTRFS_I(inode)->last_sub_trans = root->log_transid;
        if (num_written > 0) {
                err = generic_write_sync(file, pos, num_written);
@@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        atomic_inc(&root->log_batch);
 
        /*
-        * check the transaction that last modified this inode
-        * and see if its already been committed
-        */
-       if (!BTRFS_I(inode)->last_trans) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
-       }
-
-       /*
-        * if the last transaction that changed this file was before
-        * the current transaction, we can bail out now without any
-        * syncing
+        * If the last transaction that changed this file was before the current
+        * transaction and we have the full sync flag set in our inode, we can
+        * bail out now without any syncing.
+        *
+        * Note that we can't bail out if the full sync flag isn't set. This is
+        * because when the full sync flag is set we start all ordered extents
+        * and wait for them to fully complete - when they complete they update
+        * the inode's last_trans field through:
+        *
+        *     btrfs_finish_ordered_io() ->
+        *         btrfs_update_inode_fallback() ->
+        *             btrfs_update_inode() ->
+        *                 btrfs_set_inode_last_trans()
+        *
+        * So we are sure that last_trans is up to date and can do this check to
+        * bail out safely. For the fast path, when the full sync flag is not
+        * set in our inode, we can not do it because we start only our ordered
+        * extents and don't wait for them to complete (that is when
+        * btrfs_finish_ordered_io runs), so here at this point their last_trans
+        * value might be less than or equals to fs_info->last_trans_committed,
+        * and setting a speculative last_trans for an inode when a buffered
+        * write is made (such as fs_info->generation + 1 for example) would not
+        * be reliable since after setting the value and before fsync is called
+        * any number of transactions can start and commit (transaction kthread
+        * commits the current transaction periodically), and a transaction
+        * commit does not start nor waits for ordered extents to complete.
         */
        smp_mb();
        if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
-           BTRFS_I(inode)->last_trans <=
-           root->fs_info->last_trans_committed) {
-               BTRFS_I(inode)->last_trans = 0;
-
+           (full_sync && BTRFS_I(inode)->last_trans <=
+            root->fs_info->last_trans_committed)) {
                /*
                 * We'v had everything committed since the last time we were
                 * modified so clear this flag in case it was set for whatever
@@ -2275,6 +2275,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        bool same_page;
        bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
        u64 ino_size;
+       bool truncated_page = false;
+       bool updated_inode = false;
 
        ret = btrfs_wait_ordered_range(inode, offset, len);
        if (ret)
@@ -2306,13 +2308,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
         * entire page.
         */
        if (same_page && len < PAGE_CACHE_SIZE) {
-               if (offset < ino_size)
+               if (offset < ino_size) {
+                       truncated_page = true;
                        ret = btrfs_truncate_page(inode, offset, len, 0);
+               } else {
+                       ret = 0;
+               }
                goto out_only_mutex;
        }
 
        /* zero back part of the first page */
        if (offset < ino_size) {
+               truncated_page = true;
                ret = btrfs_truncate_page(inode, offset, 0, 0);
                if (ret) {
                        mutex_unlock(&inode->i_mutex);
@@ -2348,6 +2355,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                if (!ret) {
                        /* zero the front end of the last page */
                        if (tail_start + tail_len < ino_size) {
+                               truncated_page = true;
                                ret = btrfs_truncate_page(inode,
                                                tail_start + tail_len, 0, 1);
                                if (ret)
@@ -2357,8 +2365,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        }
 
        if (lockend < lockstart) {
-               mutex_unlock(&inode->i_mutex);
-               return 0;
+               ret = 0;
+               goto out_only_mutex;
        }
 
        while (1) {
@@ -2506,6 +2514,7 @@ out_trans:
 
        trans->block_rsv = &root->fs_info->trans_block_rsv;
        ret = btrfs_update_inode(trans, root, inode);
+       updated_inode = true;
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
 out_free:
@@ -2515,6 +2524,22 @@ out:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                             &cached_state, GFP_NOFS);
 out_only_mutex:
+       if (!updated_inode && truncated_page && !ret && !err) {
+               /*
+                * If we only end up zeroing part of a page, we still need to
+                * update the inode item, so that all the time fields are
+                * updated as well as the necessary btrfs inode in memory fields
+                * for detecting, at fsync time, if the inode isn't yet in the
+                * log tree or it's there but not up to date.
+                */
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans)) {
+                       err = PTR_ERR(trans);
+               } else {
+                       err = btrfs_update_inode(trans, root, inode);
+                       ret = btrfs_end_transaction(trans, root);
+               }
+       }
        mutex_unlock(&inode->i_mutex);
        if (ret && !err)
                err = ret;
index a85c23dfcddbcfd992069811f24b116d74e4dc21..da828cf5e8f885b542012dc3d23854916dc35106 100644 (file)
@@ -7285,7 +7285,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
            ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
             em->block_start != EXTENT_MAP_HOLE)) {
                int type;
-               int ret;
                u64 block_start, orig_start, orig_block_len, ram_bytes;
 
                if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
index d49fe8a0f6b5c9ada112830f6f27a8eafe202c92..74609b931ba5564da01de0955b8aa1d3142512d7 100644 (file)
@@ -776,11 +776,11 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
            IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
                return -EPERM;
        if (isdir) {
-               if (!S_ISDIR(victim->d_inode->i_mode))
+               if (!d_is_dir(victim))
                        return -ENOTDIR;
                if (IS_ROOT(victim))
                        return -EBUSY;
-       } else if (S_ISDIR(victim->d_inode->i_mode))
+       } else if (d_is_dir(victim))
                return -EISDIR;
        if (IS_DEADDIR(dir))
                return -ENOENT;
index 534544e08f769486a97c3c7f3719bed982500783..157cc54fc63486e485a95bf6d8d6da692ef171ca 100644 (file)
@@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode,
                        continue;
                if (entry_end(ordered) <= start)
                        break;
-               if (!list_empty(&ordered->log_list))
-                       continue;
-               if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+               if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
                        continue;
                list_add(&ordered->log_list, logged_list);
                atomic_inc(&ordered->refs);
@@ -511,8 +509,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
                wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
                                                   &ordered->flags));
 
-               if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
-                       list_add_tail(&ordered->trans_list, &trans->ordered);
+               list_add_tail(&ordered->trans_list, &trans->ordered);
                spin_lock_irq(&log->log_extents_lock[index]);
        }
        spin_unlock_irq(&log->log_extents_lock[index]);
index fe5857223515d14753c1f75ce60c2c12e16a3e67..d6033f540cc75cfb49ddbe5f07689d67fdefc492 100644 (file)
@@ -230,6 +230,7 @@ struct pending_dir_move {
        u64 parent_ino;
        u64 ino;
        u64 gen;
+       bool is_orphan;
        struct list_head update_refs;
 };
 
@@ -2984,7 +2985,8 @@ static int add_pending_dir_move(struct send_ctx *sctx,
                                u64 ino_gen,
                                u64 parent_ino,
                                struct list_head *new_refs,
-                               struct list_head *deleted_refs)
+                               struct list_head *deleted_refs,
+                               const bool is_orphan)
 {
        struct rb_node **p = &sctx->pending_dir_moves.rb_node;
        struct rb_node *parent = NULL;
@@ -2999,6 +3001,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
        pm->parent_ino = parent_ino;
        pm->ino = ino;
        pm->gen = ino_gen;
+       pm->is_orphan = is_orphan;
        INIT_LIST_HEAD(&pm->list);
        INIT_LIST_HEAD(&pm->update_refs);
        RB_CLEAR_NODE(&pm->node);
@@ -3131,16 +3134,20 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        rmdir_ino = dm->rmdir_ino;
        free_waiting_dir_move(sctx, dm);
 
-       ret = get_first_ref(sctx->parent_root, pm->ino,
-                           &parent_ino, &parent_gen, name);
-       if (ret < 0)
-               goto out;
-
-       ret = get_cur_path(sctx, parent_ino, parent_gen,
-                          from_path);
-       if (ret < 0)
-               goto out;
-       ret = fs_path_add_path(from_path, name);
+       if (pm->is_orphan) {
+               ret = gen_unique_name(sctx, pm->ino,
+                                     pm->gen, from_path);
+       } else {
+               ret = get_first_ref(sctx->parent_root, pm->ino,
+                                   &parent_ino, &parent_gen, name);
+               if (ret < 0)
+                       goto out;
+               ret = get_cur_path(sctx, parent_ino, parent_gen,
+                                  from_path);
+               if (ret < 0)
+                       goto out;
+               ret = fs_path_add_path(from_path, name);
+       }
        if (ret < 0)
                goto out;
 
@@ -3150,7 +3157,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                LIST_HEAD(deleted_refs);
                ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
                ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
-                                          &pm->update_refs, &deleted_refs);
+                                          &pm->update_refs, &deleted_refs,
+                                          pm->is_orphan);
                if (ret < 0)
                        goto out;
                if (rmdir_ino) {
@@ -3283,6 +3291,127 @@ out:
        return ret;
 }
 
+/*
+ * We might need to delay a directory rename even when no ancestor directory
+ * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
+ * renamed. This happens when we rename a directory to the old name (the name
+ * in the parent root) of some other unrelated directory that got its rename
+ * delayed due to some ancestor with higher number that got renamed.
+ *
+ * Example:
+ *
+ * Parent snapshot:
+ * .                                       (ino 256)
+ * |---- a/                                (ino 257)
+ * |     |---- file                        (ino 260)
+ * |
+ * |---- b/                                (ino 258)
+ * |---- c/                                (ino 259)
+ *
+ * Send snapshot:
+ * .                                       (ino 256)
+ * |---- a/                                (ino 258)
+ * |---- x/                                (ino 259)
+ *       |---- y/                          (ino 257)
+ *             |----- file                 (ino 260)
+ *
+ * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
+ * from 'a' to 'x/y' happening first, which in turn depends on the rename of
+ * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
+ * must issue is:
+ *
+ * 1 - rename 259 from 'c' to 'x'
+ * 2 - rename 257 from 'a' to 'x/y'
+ * 3 - rename 258 from 'b' to 'a'
+ *
+ * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
+ * be done right away and < 0 on error.
+ */
+static int wait_for_dest_dir_move(struct send_ctx *sctx,
+                                 struct recorded_ref *parent_ref,
+                                 const bool is_orphan)
+{
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       struct btrfs_key di_key;
+       struct btrfs_dir_item *di;
+       u64 left_gen;
+       u64 right_gen;
+       int ret = 0;
+
+       if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
+               return 0;
+
+       path = alloc_path_for_send();
+       if (!path)
+               return -ENOMEM;
+
+       key.objectid = parent_ref->dir;
+       key.type = BTRFS_DIR_ITEM_KEY;
+       key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
+
+       ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
+       if (ret < 0) {
+               goto out;
+       } else if (ret > 0) {
+               ret = 0;
+               goto out;
+       }
+
+       di = btrfs_match_dir_item_name(sctx->parent_root, path,
+                                      parent_ref->name, parent_ref->name_len);
+       if (!di) {
+               ret = 0;
+               goto out;
+       }
+       /*
+        * di_key.objectid has the number of the inode that has a dentry in the
+        * parent directory with the same name that sctx->cur_ino is being
+        * renamed to. We need to check if that inode is in the send root as
+        * well and if it is currently marked as an inode with a pending rename,
+        * if it is, we need to delay the rename of sctx->cur_ino as well, so
+        * that it happens after that other inode is renamed.
+        */
+       btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
+       if (di_key.type != BTRFS_INODE_ITEM_KEY) {
+               ret = 0;
+               goto out;
+       }
+
+       ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
+                            &left_gen, NULL, NULL, NULL, NULL);
+       if (ret < 0)
+               goto out;
+       ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
+                            &right_gen, NULL, NULL, NULL, NULL);
+       if (ret < 0) {
+               if (ret == -ENOENT)
+                       ret = 0;
+               goto out;
+       }
+
+       /* Different inode, no need to delay the rename of sctx->cur_ino */
+       if (right_gen != left_gen) {
+               ret = 0;
+               goto out;
+       }
+
+       if (is_waiting_for_move(sctx, di_key.objectid)) {
+               ret = add_pending_dir_move(sctx,
+                                          sctx->cur_ino,
+                                          sctx->cur_inode_gen,
+                                          di_key.objectid,
+                                          &sctx->new_refs,
+                                          &sctx->deleted_refs,
+                                          is_orphan);
+               if (!ret)
+                       ret = 1;
+       }
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
 static int wait_for_parent_move(struct send_ctx *sctx,
                                struct recorded_ref *parent_ref)
 {
@@ -3349,7 +3478,8 @@ out:
                                           sctx->cur_inode_gen,
                                           ino,
                                           &sctx->new_refs,
-                                          &sctx->deleted_refs);
+                                          &sctx->deleted_refs,
+                                          false);
                if (!ret)
                        ret = 1;
        }
@@ -3372,6 +3502,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
        int did_overwrite = 0;
        int is_orphan = 0;
        u64 last_dir_ino_rm = 0;
+       bool can_rename = true;
 
 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
 
@@ -3490,12 +3621,22 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                        }
                }
 
+               if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
+                       ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
+                       if (ret < 0)
+                               goto out;
+                       if (ret == 1) {
+                               can_rename = false;
+                               *pending_move = 1;
+                       }
+               }
+
                /*
                 * link/move the ref to the new place. If we have an orphan
                 * inode, move it and update valid_path. If not, link or move
                 * it depending on the inode mode.
                 */
-               if (is_orphan) {
+               if (is_orphan && can_rename) {
                        ret = send_rename(sctx, valid_path, cur->full_path);
                        if (ret < 0)
                                goto out;
@@ -3503,7 +3644,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                        ret = fs_path_copy(valid_path, cur->full_path);
                        if (ret < 0)
                                goto out;
-               } else {
+               } else if (can_rename) {
                        if (S_ISDIR(sctx->cur_inode_mode)) {
                                /*
                                 * Dirs can't be linked, so move it. For moved
index 7e80f32550a663e7438d3e5fb8b1c37fcc391b64..88e51aded6bda9744a5fffa19b0b4e701dab925e 100644 (file)
@@ -1052,9 +1052,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
                if (ret)
                        return ret;
-               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-               if (ret)
-                       return ret;
        }
 
        return 0;
index 9a37f8b39bae9058a20aafe11c59793409115005..c5b8ba37f88e31fa188258af9c6e13036c2df18f 100644 (file)
@@ -1012,7 +1012,7 @@ again:
                base = btrfs_item_ptr_offset(leaf, path->slots[0]);
 
                while (cur_offset < item_size) {
-                       extref = (struct btrfs_inode_extref *)base + cur_offset;
+                       extref = (struct btrfs_inode_extref *)(base + cur_offset);
 
                        victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
 
index cd4d1315aaa92d43476a9e86c521e94a04519d0e..8222f6f74147972ba1b654c12b013c55a3b60825 100644 (file)
@@ -4903,10 +4903,17 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
 {
        struct btrfs_bio *bbio = kzalloc(
+                /* the size of the btrfs_bio */
                sizeof(struct btrfs_bio) +
+               /* plus the variable array for the stripes */
                sizeof(struct btrfs_bio_stripe) * (total_stripes) +
+               /* plus the variable array for the tgt dev */
                sizeof(int) * (real_stripes) +
-               sizeof(u64) * (real_stripes),
+               /*
+                * plus the raid_map, which includes both the tgt dev
+                * and the stripes
+                */
+               sizeof(u64) * (total_stripes),
                GFP_NOFS);
        if (!bbio)
                return NULL;
index 47b19465f0dc64e85d00d99eaee50e88de9f1014..883b93623bc5682ecde2684bfe936bb98fd9e51d 100644 (file)
@@ -111,6 +111,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                                        name, name_len, -1);
                if (!di && (flags & XATTR_REPLACE))
                        ret = -ENODATA;
+               else if (IS_ERR(di))
+                       ret = PTR_ERR(di);
                else if (di)
                        ret = btrfs_delete_one_dir_name(trans, root, path, di);
                goto out;
@@ -127,10 +129,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                ASSERT(mutex_is_locked(&inode->i_mutex));
                di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
                                        name, name_len, 0);
-               if (!di) {
+               if (!di)
                        ret = -ENODATA;
+               else if (IS_ERR(di))
+                       ret = PTR_ERR(di);
+               if (ret)
                        goto out;
-               }
                btrfs_release_path(path);
                di = NULL;
        }
index ce1b115dcc28bc2968009e5e8182004da99cf29e..f601def05bdf00663f5b1666514e1810068e1b19 100644 (file)
@@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
        /* extract the directory dentry from the cwd */
        get_fs_pwd(current->fs, &path);
 
-       if (!S_ISDIR(path.dentry->d_inode->i_mode))
+       if (!d_can_lookup(path.dentry))
                goto notdir;
 
        cachefiles_begin_secure(cache, &saved_cred);
@@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
        /* extract the directory dentry from the cwd */
        get_fs_pwd(current->fs, &path);
 
-       if (!S_ISDIR(path.dentry->d_inode->i_mode))
+       if (!d_can_lookup(path.dentry))
                goto notdir;
 
        cachefiles_begin_secure(cache, &saved_cred);
index 1c7293c3a93ae935c5be8cdfc149fcd3f84e0dad..232426214fdd1849b21a4e3c080563e2132e8bd2 100644 (file)
@@ -437,7 +437,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
        if (!object->backer)
                return -ENOBUFS;
 
-       ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+       ASSERT(d_is_reg(object->backer));
 
        fscache_set_store_limit(&object->fscache, ni_size);
 
@@ -501,7 +501,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op)
               op->object->debug_id, (unsigned long long)ni_size);
 
        if (object->backer) {
-               ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+               ASSERT(d_is_reg(object->backer));
 
                fscache_set_store_limit(&object->fscache, ni_size);
 
index 7f8e83f9d74eb87712db9e8fdc7da95fcccaefdb..1e51714eb33e15f5e6624699021d26d95e1c16aa 100644 (file)
@@ -277,7 +277,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
        _debug("remove %p from %p", rep, dir);
 
        /* non-directories can just be unlinked */
-       if (!S_ISDIR(rep->d_inode->i_mode)) {
+       if (!d_is_dir(rep)) {
                _debug("unlink stale object");
 
                path.mnt = cache->mnt;
@@ -323,7 +323,7 @@ try_again:
                return 0;
        }
 
-       if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
+       if (!d_can_lookup(cache->graveyard)) {
                unlock_rename(cache->graveyard, dir);
                cachefiles_io_error(cache, "Graveyard no longer a directory");
                return -EIO;
@@ -475,7 +475,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
        ASSERT(parent->dentry);
        ASSERT(parent->dentry->d_inode);
 
-       if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
+       if (!(d_is_dir(parent->dentry))) {
                // TODO: convert file to dir
                _leave("looking up in none directory");
                return -ENOBUFS;
@@ -539,7 +539,7 @@ lookup_again:
                        _debug("mkdir -> %p{%p{ino=%lu}}",
                               next, next->d_inode, next->d_inode->i_ino);
 
-               } else if (!S_ISDIR(next->d_inode->i_mode)) {
+               } else if (!d_can_lookup(next)) {
                        pr_err("inode %lu is not a directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
@@ -568,8 +568,8 @@ lookup_again:
                        _debug("create -> %p{%p{ino=%lu}}",
                               next, next->d_inode, next->d_inode->i_ino);
 
-               } else if (!S_ISDIR(next->d_inode->i_mode) &&
-                          !S_ISREG(next->d_inode->i_mode)
+               } else if (!d_can_lookup(next) &&
+                          !d_is_reg(next)
                           ) {
                        pr_err("inode %lu is not a file or directory\n",
                               next->d_inode->i_ino);
@@ -642,7 +642,7 @@ lookup_again:
 
        /* open a file interface onto a data file */
        if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
-               if (S_ISREG(object->dentry->d_inode->i_mode)) {
+               if (d_is_reg(object->dentry)) {
                        const struct address_space_operations *aops;
 
                        ret = -EPERM;
@@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
        /* we need to make sure the subdir is a directory */
        ASSERT(subdir->d_inode);
 
-       if (!S_ISDIR(subdir->d_inode->i_mode)) {
+       if (!d_can_lookup(subdir)) {
                pr_err("%s is not a directory\n", dirname);
                ret = -EIO;
                goto check_error;
index 616db0e77b44bd8481782047829dfd06605700ae..c6cd8d7a4eef91b379efc75888a94eba6345ac33 100644 (file)
@@ -900,7 +900,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
                return -ENOBUFS;
        }
 
-       ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+       ASSERT(d_is_reg(object->backer));
 
        cache = container_of(object->fscache.cache,
                             struct cachefiles_cache, cache);
index 0411dbb15815fd7036dc7f88b5926a4f30190c35..83e9976f718983ccfb5d95c56893e81c7d01caa9 100644 (file)
@@ -904,7 +904,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
        } else if (ceph_snap(dir) == CEPH_NOSNAP) {
                dout("unlink/rmdir dir %p dn %p inode %p\n",
                     dir, dentry, inode);
-               op = S_ISDIR(dentry->d_inode->i_mode) ?
+               op = d_is_dir(dentry) ?
                        CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
        } else
                goto out;
index a3d774b351494b64b2f03c6db2d9270017defc76..d533075a823d5eb92e709547b8fe790c59cba981 100644 (file)
@@ -292,7 +292,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        }
        if (err)
                goto out_req;
-       if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) {
                /* make vfs retry on splice, ENOENT, or symlink */
                dout("atomic_open finish_no_open on dn %p\n", dn);
                err = finish_no_open(file, dn);
index 281ee011bb6a936125cdcfdde6d8122e66447b15..60cb88c1dd2bf88bb12f3e5451e33ed51d5ac1ff 100644 (file)
@@ -304,7 +304,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
                             (const char *) old_name, (const char *)new_name);
        if (!error) {
                if (new_dentry->d_inode) {
-                       if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+                       if (d_is_dir(new_dentry)) {
                                coda_dir_drop_nlink(old_dir);
                                coda_dir_inc_nlink(new_dir);
                        }
index a315677e44d34d8501b1b03629da0201283f5b1f..b65d1ef532d52d692cc7ebbdc3c7350d1dd9d735 100644 (file)
@@ -69,14 +69,13 @@ extern struct kmem_cache *configfs_dir_cachep;
 extern int configfs_is_root(struct config_item *item);
 
 extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
-extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
+extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *));
 
 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
 extern int configfs_make_dirent(struct configfs_dirent *,
                                struct dentry *, void *, umode_t, int);
 extern int configfs_dirent_is_ready(struct configfs_dirent *);
 
-extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int);
 extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
 
 extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
index c9c298bd3058924b8fed471ccdebabeef2feeb88..cf0db005d2f58ab2ed2b42217777f643e6d0d235 100644 (file)
@@ -240,60 +240,26 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd,
        return 0;
 }
 
-static int init_dir(struct inode * inode)
+static void init_dir(struct inode * inode)
 {
        inode->i_op = &configfs_dir_inode_operations;
        inode->i_fop = &configfs_dir_operations;
 
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
-       return 0;
 }
 
-static int configfs_init_file(struct inode * inode)
+static void configfs_init_file(struct inode * inode)
 {
        inode->i_size = PAGE_SIZE;
        inode->i_fop = &configfs_file_operations;
-       return 0;
 }
 
-static int init_symlink(struct inode * inode)
+static void init_symlink(struct inode * inode)
 {
        inode->i_op = &configfs_symlink_inode_operations;
-       return 0;
-}
-
-static int create_dir(struct config_item *k, struct dentry *d)
-{
-       int error;
-       umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
-       struct dentry *p = d->d_parent;
-
-       BUG_ON(!k);
-
-       error = configfs_dirent_exists(p->d_fsdata, d->d_name.name);
-       if (!error)
-               error = configfs_make_dirent(p->d_fsdata, d, k, mode,
-                                            CONFIGFS_DIR | CONFIGFS_USET_CREATING);
-       if (!error) {
-               configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata);
-               error = configfs_create(d, mode, init_dir);
-               if (!error) {
-                       inc_nlink(p->d_inode);
-               } else {
-                       struct configfs_dirent *sd = d->d_fsdata;
-                       if (sd) {
-                               spin_lock(&configfs_dirent_lock);
-                               list_del_init(&sd->s_sibling);
-                               spin_unlock(&configfs_dirent_lock);
-                               configfs_put(sd);
-                       }
-               }
-       }
-       return error;
 }
 
-
 /**
  *     configfs_create_dir - create a directory for an config_item.
  *     @item:          config_itemwe're creating directory for.
@@ -303,11 +269,37 @@ static int create_dir(struct config_item *k, struct dentry *d)
  *     until it is validated by configfs_dir_set_ready()
  */
 
-static int configfs_create_dir(struct config_item * item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
 {
-       int error = create_dir(item, dentry);
-       if (!error)
+       int error;
+       umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+       struct dentry *p = dentry->d_parent;
+
+       BUG_ON(!item);
+
+       error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
+       if (unlikely(error))
+               return error;
+
+       error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
+                                    CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+       if (unlikely(error))
+               return error;
+
+       configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
+       error = configfs_create(dentry, mode, init_dir);
+       if (!error) {
+               inc_nlink(p->d_inode);
                item->ci_dentry = dentry;
+       } else {
+               struct configfs_dirent *sd = dentry->d_fsdata;
+               if (sd) {
+                       spin_lock(&configfs_dirent_lock);
+                       list_del_init(&sd->s_sibling);
+                       spin_unlock(&configfs_dirent_lock);
+                       configfs_put(sd);
+               }
+       }
        return error;
 }
 
index 1d1c41f1014d9039c0a3ec217d6c6e53856760b1..56d2cdc9ae0a7213f91bb5e332b1513fda887b79 100644 (file)
@@ -313,21 +313,6 @@ const struct file_operations configfs_file_operations = {
        .release        = configfs_release,
 };
 
-
-int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
-{
-       struct configfs_dirent * parent_sd = dir->d_fsdata;
-       umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
-       int error = 0;
-
-       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
-       error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
-       mutex_unlock(&dir->d_inode->i_mutex);
-
-       return error;
-}
-
-
 /**
  *     configfs_create_file - create an attribute file for an item.
  *     @item:  item we're creating for.
@@ -336,9 +321,16 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att
 
 int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
 {
-       BUG_ON(!item || !item->ci_dentry || !attr);
+       struct dentry *dir = item->ci_dentry;
+       struct configfs_dirent *parent_sd = dir->d_fsdata;
+       umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
+       int error = 0;
 
-       return configfs_add_file(item->ci_dentry, attr,
-                                CONFIGFS_ITEM_ATTR);
+       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
+       error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
+                                    CONFIGFS_ITEM_ATTR);
+       mutex_unlock(&dir->d_inode->i_mutex);
+
+       return error;
 }
 
index 65af861471541924525bcd9c04ab32a931296ba8..5423a6a6ecc8350c0284d307ac47cf6afad9238a 100644 (file)
@@ -176,7 +176,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
 
 #endif /* CONFIG_LOCKDEP */
 
-int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
+int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *))
 {
        int error = 0;
        struct inode *inode = NULL;
@@ -198,13 +198,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino
        p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
        configfs_set_inode_lock_class(sd, inode);
 
-       if (init) {
-               error = init(inode);
-               if (error) {
-                       iput(inode);
-                       return error;
-               }
-       }
+       init(inode);
        d_instantiate(dentry, inode);
        if (S_ISDIR(mode) || S_ISLNK(mode))
                dget(dentry);  /* pin link and directory dentries in core */
@@ -242,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
 
        if (dentry) {
                spin_lock(&dentry->d_lock);
-               if (!(d_unhashed(dentry) && dentry->d_inode)) {
+               if (!d_unhashed(dentry) && dentry->d_inode) {
                        dget_dlock(dentry);
                        __d_drop(dentry);
                        spin_unlock(&dentry->d_lock);
index b5c86ffd5033420523934c7153080d8cdc605bea..f319926ddf8cbc5cc90a003628bf500701ce3c25 100644 (file)
@@ -572,7 +572,7 @@ void do_coredump(const siginfo_t *siginfo)
                         *
                         * Normally core limits are irrelevant to pipes, since
                         * we're not writing to the file system, but we use
-                        * cprm.limit of 1 here as a speacial value, this is a
+                        * cprm.limit of 1 here as a special value, this is a
                         * consistent way to catch recursive crashes.
                         * We can still crash if the core_pattern binary sets
                         * RLIM_CORE = !1, but it runs as root, and can do
index dc400fd29f4d1c3c8e2265b4275aaabe4250e1fb..c71e3732e53bcebbffca749e65b7095fd4ff6e7e 100644 (file)
@@ -1659,9 +1659,25 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
 }
 EXPORT_SYMBOL(d_set_d_op);
 
+
+/*
+ * d_set_fallthru - Mark a dentry as falling through to a lower layer
+ * @dentry - The dentry to mark
+ *
+ * Mark a dentry as falling through to the lower layer (as set with
+ * d_pin_lower()).  This flag may be recorded on the medium.
+ */
+void d_set_fallthru(struct dentry *dentry)
+{
+       spin_lock(&dentry->d_lock);
+       dentry->d_flags |= DCACHE_FALLTHRU;
+       spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_set_fallthru);
+
 static unsigned d_flags_for_inode(struct inode *inode)
 {
-       unsigned add_flags = DCACHE_FILE_TYPE;
+       unsigned add_flags = DCACHE_REGULAR_TYPE;
 
        if (!inode)
                return DCACHE_MISS_TYPE;
@@ -1674,13 +1690,21 @@ static unsigned d_flags_for_inode(struct inode *inode)
                        else
                                inode->i_opflags |= IOP_LOOKUP;
                }
-       } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
-               if (unlikely(inode->i_op->follow_link))
+               goto type_determined;
+       }
+
+       if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+               if (unlikely(inode->i_op->follow_link)) {
                        add_flags = DCACHE_SYMLINK_TYPE;
-               else
-                       inode->i_opflags |= IOP_NOFOLLOW;
+                       goto type_determined;
+               }
+               inode->i_opflags |= IOP_NOFOLLOW;
        }
 
+       if (unlikely(!S_ISREG(inode->i_mode)))
+               add_flags = DCACHE_SPECIAL_TYPE;
+
+type_determined:
        if (unlikely(IS_AUTOMOUNT(inode)))
                add_flags |= DCACHE_NEED_AUTOMOUNT;
        return add_flags;
@@ -1691,7 +1715,8 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        unsigned add_flags = d_flags_for_inode(inode);
 
        spin_lock(&dentry->d_lock);
-       __d_set_type(dentry, add_flags);
+       dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+       dentry->d_flags |= add_flags;
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
        dentry->d_inode = inode;
index 45b18a5e225c3bc6fffe927cbad3257ccafc2109..96400ab42d135e7d572d29de9d4f3637fedd7817 100644 (file)
@@ -169,10 +169,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void debugfs_evict_inode(struct inode *inode)
+{
+       truncate_inode_pages_final(&inode->i_data);
+       clear_inode(inode);
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_private);
+}
+
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
+       .evict_inode    = debugfs_evict_inode,
 };
 
 static struct vfsmount *debugfs_automount(struct path *path)
@@ -511,23 +520,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
        int ret = 0;
 
        if (debugfs_positive(dentry)) {
-               if (dentry->d_inode) {
-                       dget(dentry);
-                       switch (dentry->d_inode->i_mode & S_IFMT) {
-                       case S_IFDIR:
-                               ret = simple_rmdir(parent->d_inode, dentry);
-                               break;
-                       case S_IFLNK:
-                               kfree(dentry->d_inode->i_private);
-                               /* fall through */
-                       default:
-                               simple_unlink(parent->d_inode, dentry);
-                               break;
-                       }
-                       if (!ret)
-                               d_delete(dentry);
-                       dput(dentry);
-               }
+               dget(dentry);
+               if (S_ISDIR(dentry->d_inode->i_mode))
+                       ret = simple_rmdir(parent->d_inode, dentry);
+               else
+                       simple_unlink(parent->d_inode, dentry);
+               if (!ret)
+                       d_delete(dentry);
+               dput(dentry);
        }
        return ret;
 }
@@ -690,7 +690,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
        }
        d_move(old_dentry, dentry);
        fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
-               S_ISDIR(old_dentry->d_inode->i_mode),
+               d_is_dir(old_dentry),
                NULL, old_dentry);
        fsnotify_oldname_free(old_name);
        unlock_rename(new_dir, old_dir);
index 90d1882b306face4c53d10ed08c44e3ec77b726a..5ba029e627cc22db0648317a619685e545977316 100644 (file)
@@ -124,7 +124,7 @@ ecryptfs_get_key_payload_data(struct key *key)
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
-#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32
+#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 31
 #define ECRYPTFS_MAX_NUM_ENC_KEYS 64
 #define ECRYPTFS_MAX_IV_BYTES 16       /* 128 bits */
 #define ECRYPTFS_SALT_BYTES 2
@@ -237,7 +237,7 @@ struct ecryptfs_crypt_stat {
        struct crypto_ablkcipher *tfm;
        struct crypto_hash *hash_tfm; /* Crypto context for generating
                                       * the initialization vectors */
-       unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
+       unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
        unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
        unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
        struct list_head keysig_list;
index 6f4e659f508f303bdadcc8922b2cc1a1397bb51b..fd39bad6f1bdf8bbcb4321a8fc8ff1934d67167c 100644 (file)
@@ -230,7 +230,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
        }
        ecryptfs_set_file_lower(
                file, ecryptfs_inode_to_private(inode)->lower_file);
-       if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+       if (d_is_dir(ecryptfs_dentry)) {
                ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
                mutex_lock(&crypt_stat->cs_mutex);
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
@@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct file *lower_file = ecryptfs_file_to_lower(file);
        long rc = -ENOTTY;
 
-       if (lower_file->f_op->unlocked_ioctl)
+       if (!lower_file->f_op->unlocked_ioctl)
+               return rc;
+
+       switch (cmd) {
+       case FITRIM:
+       case FS_IOC_GETFLAGS:
+       case FS_IOC_SETFLAGS:
+       case FS_IOC_GETVERSION:
+       case FS_IOC_SETVERSION:
                rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
-       return rc;
+               fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
+
+               return rc;
+       default:
+               return rc;
+       }
 }
 
 #ifdef CONFIG_COMPAT
@@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct file *lower_file = ecryptfs_file_to_lower(file);
        long rc = -ENOIOCTLCMD;
 
-       if (lower_file->f_op->compat_ioctl)
+       if (!lower_file->f_op->compat_ioctl)
+               return rc;
+
+       switch (cmd) {
+       case FITRIM:
+       case FS_IOC32_GETFLAGS:
+       case FS_IOC32_SETFLAGS:
+       case FS_IOC32_GETVERSION:
+       case FS_IOC32_SETVERSION:
                rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
-       return rc;
+               fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
+
+               return rc;
+       default:
+               return rc;
+       }
 }
 #endif
 
index 34b36a5040593e960ff3da4fdc20dcaa5c5cb8f6..b08b5187f6622cb6c7934d6dba9fcdfdbb0aefb6 100644 (file)
@@ -907,9 +907,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
        lower_inode = ecryptfs_inode_to_lower(inode);
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        mutex_lock(&crypt_stat->cs_mutex);
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-       else if (S_ISREG(dentry->d_inode->i_mode)
+       else if (d_is_reg(dentry)
                 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
                     || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
                struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
index 917bd5c9776aabcff5b482f59bd8d316193ead35..6bd67e2011f083e4e184e4d2d336cb15176d40ee 100644 (file)
@@ -891,7 +891,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
        struct blkcipher_desc desc;
        char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
        char iv[ECRYPTFS_MAX_IV_BYTES];
-       char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
+       char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
 };
 
 /**
index 1895d60f4122c21d0e2ee267066c6308473ba642..c095d32642599f90cf68a9437b9a10674d5cd3d9 100644 (file)
@@ -407,7 +407,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
        if (!cipher_name_set) {
                int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
 
-               BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE);
+               BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE);
                strcpy(mount_crypt_stat->global_default_cipher_name,
                       ECRYPTFS_DEFAULT_CIPHER);
        }
index fdfd206c737a39d20853ebe069ac7dc706f61dd6..714cd37a6ba30fd970b8384a2c2b26fc5209351f 100644 (file)
@@ -429,7 +429,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
        if (IS_ERR(result))
                return result;
 
-       if (S_ISDIR(result->d_inode->i_mode)) {
+       if (d_is_dir(result)) {
                /*
                 * This request is for a directory.
                 *
index 982d934fd9ac98338377d3b1621b3d577531b6e6..f63c3d5805c4c156ad3ed412cbecf85e700cf9d2 100644 (file)
@@ -364,7 +364,8 @@ struct flex_groups {
 #define EXT4_DIRTY_FL                  0x00000100
 #define EXT4_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
 #define EXT4_NOCOMPR_FL                        0x00000400 /* Don't compress */
-#define EXT4_ECOMPR_FL                 0x00000800 /* Compression error */
+       /* nb: was previously EXT2_ECOMPR_FL */
+#define EXT4_ENCRYPT_FL                        0x00000800 /* encrypted file */
 /* End compression flags --- maybe not all used */
 #define EXT4_INDEX_FL                  0x00001000 /* hash-indexed directory */
 #define EXT4_IMAGIC_FL                 0x00002000 /* AFS directory */
@@ -421,7 +422,7 @@ enum {
        EXT4_INODE_DIRTY        = 8,
        EXT4_INODE_COMPRBLK     = 9,    /* One or more compressed clusters */
        EXT4_INODE_NOCOMPR      = 10,   /* Don't compress */
-       EXT4_INODE_ECOMPR       = 11,   /* Compression error */
+       EXT4_INODE_ENCRYPT      = 11,   /* Compression error */
 /* End compression flags --- maybe not all used */
        EXT4_INODE_INDEX        = 12,   /* hash-indexed directory */
        EXT4_INODE_IMAGIC       = 13,   /* AFS directory */
@@ -466,7 +467,7 @@ static inline void ext4_check_flag_values(void)
        CHECK_FLAG_VALUE(DIRTY);
        CHECK_FLAG_VALUE(COMPRBLK);
        CHECK_FLAG_VALUE(NOCOMPR);
-       CHECK_FLAG_VALUE(ECOMPR);
+       CHECK_FLAG_VALUE(ENCRYPT);
        CHECK_FLAG_VALUE(INDEX);
        CHECK_FLAG_VALUE(IMAGIC);
        CHECK_FLAG_VALUE(JOURNAL_DATA);
@@ -1048,6 +1049,12 @@ extern void ext4_set_bits(void *bm, int cur, int len);
 /* Metadata checksum algorithm codes */
 #define EXT4_CRC32C_CHKSUM             1
 
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID           0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS       1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM       2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC       3
+
 /*
  * Structure of the super block
  */
@@ -1161,7 +1168,8 @@ struct ext4_super_block {
        __le32  s_grp_quota_inum;       /* inode for tracking group quota */
        __le32  s_overhead_clusters;    /* overhead blocks/clusters in fs */
        __le32  s_backup_bgs[2];        /* groups with sparse_super2 SBs */
-       __le32  s_reserved[106];        /* Padding to the end of the block */
+       __u8    s_encrypt_algos[4];     /* Encryption algorithms in use  */
+       __le32  s_reserved[105];        /* Padding to the end of the block */
        __le32  s_checksum;             /* crc32c(superblock) */
 };
 
@@ -1527,6 +1535,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
  * GDT_CSUM bits are mutually exclusive.
  */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM   0x0400
+#define EXT4_FEATURE_RO_COMPAT_READONLY                0x1000
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE         0x0002
@@ -1542,6 +1551,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */
 #define EXT4_FEATURE_INCOMPAT_LARGEDIR         0x4000 /* >2GB or 3-lvl htree */
 #define EXT4_FEATURE_INCOMPAT_INLINE_DATA      0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_ENCRYPT          0x10000
 
 #define EXT2_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
index 6b9878a24182b06125cb496ef973ff0d9b739106..45fe924f82bce2ff76e3e74b45ec1833729433ea 100644 (file)
@@ -1401,10 +1401,7 @@ end_range:
                                 * to free. Everything was covered by the start
                                 * of the range.
                                 */
-                               return 0;
-                       } else {
-                               /* Shared branch grows from an indirect block */
-                               partial2--;
+                               goto do_indirects;
                        }
                } else {
                        /*
@@ -1435,56 +1432,96 @@ end_range:
        /* Punch happened within the same level (n == n2) */
        partial = ext4_find_shared(inode, n, offsets, chain, &nr);
        partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
-       /*
-        * ext4_find_shared returns Indirect structure which
-        * points to the last element which should not be
-        * removed by truncate. But this is end of the range
-        * in punch_hole so we need to point to the next element
-        */
-       partial2->p++;
-       while ((partial > chain) || (partial2 > chain2)) {
-               /* We're at the same block, so we're almost finished */
-               if ((partial->bh && partial2->bh) &&
-                   (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
-                       if ((partial > chain) && (partial2 > chain2)) {
+
+       /* Free top, but only if partial2 isn't its subtree. */
+       if (nr) {
+               int level = min(partial - chain, partial2 - chain2);
+               int i;
+               int subtree = 1;
+
+               for (i = 0; i <= level; i++) {
+                       if (offsets[i] != offsets2[i]) {
+                               subtree = 0;
+                               break;
+                       }
+               }
+
+               if (!subtree) {
+                       if (partial == chain) {
+                               /* Shared branch grows from the inode */
+                               ext4_free_branches(handle, inode, NULL,
+                                                  &nr, &nr+1,
+                                                  (chain+n-1) - partial);
+                               *partial->p = 0;
+                       } else {
+                               /* Shared branch grows from an indirect block */
+                               BUFFER_TRACE(partial->bh, "get_write_access");
                                ext4_free_branches(handle, inode, partial->bh,
-                                                  partial->p + 1,
-                                                  partial2->p,
+                                                  partial->p,
+                                                  partial->p+1,
                                                   (chain+n-1) - partial);
-                               BUFFER_TRACE(partial->bh, "call brelse");
-                               brelse(partial->bh);
-                               BUFFER_TRACE(partial2->bh, "call brelse");
-                               brelse(partial2->bh);
                        }
-                       return 0;
                }
+       }
+
+       if (!nr2) {
                /*
-                * Clear the ends of indirect blocks on the shared branch
-                * at the start of the range
+                * ext4_find_shared returns Indirect structure which
+                * points to the last element which should not be
+                * removed by truncate. But this is end of the range
+                * in punch_hole so we need to point to the next element
                 */
-               if (partial > chain) {
+               partial2->p++;
+       }
+
+       while (partial > chain || partial2 > chain2) {
+               int depth = (chain+n-1) - partial;
+               int depth2 = (chain2+n2-1) - partial2;
+
+               if (partial > chain && partial2 > chain2 &&
+                   partial->bh->b_blocknr == partial2->bh->b_blocknr) {
+                       /*
+                        * We've converged on the same block. Clear the range,
+                        * then we're done.
+                        */
                        ext4_free_branches(handle, inode, partial->bh,
-                                  partial->p + 1,
-                                  (__le32 *)partial->bh->b_data+addr_per_block,
-                                  (chain+n-1) - partial);
+                                          partial->p + 1,
+                                          partial2->p,
+                                          (chain+n-1) - partial);
                        BUFFER_TRACE(partial->bh, "call brelse");
                        brelse(partial->bh);
-                       partial--;
+                       BUFFER_TRACE(partial2->bh, "call brelse");
+                       brelse(partial2->bh);
+                       return 0;
                }
+
                /*
-                * Clear the ends of indirect blocks on the shared branch
-                * at the end of the range
+                * The start and end partial branches may not be at the same
+                * level even though the punch happened within one level. So, we
+                * give them a chance to arrive at the same level, then walk
+                * them in step with each other until we converge on the same
+                * block.
                 */
-               if (partial2 > chain2) {
+               if (partial > chain && depth <= depth2) {
+                       ext4_free_branches(handle, inode, partial->bh,
+                                          partial->p + 1,
+                                          (__le32 *)partial->bh->b_data+addr_per_block,
+                                          (chain+n-1) - partial);
+                       BUFFER_TRACE(partial->bh, "call brelse");
+                       brelse(partial->bh);
+                       partial--;
+               }
+               if (partial2 > chain2 && depth2 <= depth) {
                        ext4_free_branches(handle, inode, partial2->bh,
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
-                                          (chain2+n-1) - partial2);
+                                          (chain2+n2-1) - partial2);
                        BUFFER_TRACE(partial2->bh, "call brelse");
                        brelse(partial2->bh);
                        partial2--;
                }
        }
+       return 0;
 
 do_indirects:
        /* Kill the remaining (whole) subtrees */
index 85404f15e53a28860ce5a7be08220106b76fc06f..5cb9a212b86f3efd69ca604df07dc20b901dabb1 100644 (file)
@@ -1024,6 +1024,7 @@ static int ext4_write_end(struct file *file,
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
 
@@ -1054,6 +1055,8 @@ static int ext4_write_end(struct file *file,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
        /*
         * Don't mark the inode dirty under page lock. First, it unnecessarily
         * makes the holding time of page lock longer. Second, it forces lock
@@ -1095,6 +1098,7 @@ static int ext4_journalled_write_end(struct file *file,
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int partial = 0;
        unsigned from, to;
@@ -1127,6 +1131,9 @@ static int ext4_journalled_write_end(struct file *file,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
+
        if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
index 1adac6868e6fd0e97f91fa871ed45288fffc5cb6..e061e66c82800f700b7642e4c82fa2cc836be05f 100644 (file)
@@ -2779,6 +2779,12 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
        if (readonly)
                return 1;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) {
+               ext4_msg(sb, KERN_INFO, "filesystem is read-only");
+               sb->s_flags |= MS_RDONLY;
+               return 1;
+       }
+
        /* Check that feature set is OK for a read-write mount */
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
                ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
@@ -3936,9 +3942,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
-       init_timer(&sbi->s_err_report);
-       sbi->s_err_report.function = print_daily_error_info;
-       sbi->s_err_report.data = (unsigned long) sb;
+       setup_timer(&sbi->s_err_report, print_daily_error_info,
+               (unsigned long) sb);
 
        /* Register extent status tree shrinker */
        if (ext4_es_register_shrinker(sbi))
@@ -4866,9 +4871,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_journal && sbi->s_journal->j_task->io_context)
                journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
                err = -EINVAL;
                goto restore_opts;
@@ -4877,17 +4879,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
            test_opt(sb, JOURNAL_CHECKSUM)) {
                ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-                        "during remount not supported");
-               err = -EINVAL;
-               goto restore_opts;
-       }
-
-       if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
-           test_opt(sb, JOURNAL_CHECKSUM)) {
-               ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-                        "during remount not supported");
-               err = -EINVAL;
-               goto restore_opts;
+                        "during remount not supported; ignoring");
+               sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
        }
 
        if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
@@ -4963,7 +4956,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                ext4_mark_recovery_complete(sb, es);
                } else {
                        /* Make sure we can mount this feature set readwrite */
-                       if (!ext4_feature_set_ok(sb, 0)) {
+                       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_READONLY) ||
+                           !ext4_feature_set_ok(sb, 0)) {
                                err = -EROFS;
                                goto restore_opts;
                        }
index 073657f755d4a5b9c0d08acca991fbd35a9f37f0..e907052eeadb69f683df3c7d8838106c6e36905b 100644 (file)
@@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                struct inode *inode = wb_inode(wb->b_io.prev);
                struct super_block *sb = inode->i_sb;
 
-               if (!grab_super_passive(sb)) {
+               if (!trylock_super(sb)) {
                        /*
-                        * grab_super_passive() may fail consistently due to
+                        * trylock_super() may fail consistently due to
                         * s_umount being grabbed by someone else. Don't use
                         * requeue_io() to avoid busy retrying the inode/sb.
                         */
@@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                        continue;
                }
                wrote += writeback_sb_inodes(sb, wb, work);
-               drop_super(sb);
+               up_read(&sb->s_umount);
 
                /* refer to the same tests at the end of writeback_sb_inodes */
                if (wrote) {
index ed19a7d622fa35decaa08b10e83b8bdee8712419..39706c57ad3cb157d81594065a15f154f61d7bd8 100644 (file)
@@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 
        newpage = buf->page;
 
-       if (WARN_ON(!PageUptodate(newpage)))
-               return -EIO;
+       if (!PageUptodate(newpage))
+               SetPageUptodate(newpage);
 
        ClearPageMappedToDisk(newpage);
 
@@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
        return err;
 }
 
+static int fuse_dev_open(struct inode *inode, struct file *file)
+{
+       /*
+        * The fuse device's file's private_data is used to hold
+        * the fuse_conn(ection) when it is mounted, and is used to
+        * keep track of whether the file has been mounted already.
+        */
+       file->private_data = NULL;
+       return 0;
+}
+
 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
                              unsigned long nr_segs, loff_t pos)
 {
@@ -1797,6 +1808,9 @@ copy_finish:
 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
                       unsigned int size, struct fuse_copy_state *cs)
 {
+       /* Don't try to move pages (yet) */
+       cs->move_pages = 0;
+
        switch (code) {
        case FUSE_NOTIFY_POLL:
                return fuse_notify_poll(fc, size, cs);
@@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on)
 
 const struct file_operations fuse_dev_operations = {
        .owner          = THIS_MODULE,
+       .open           = fuse_dev_open,
        .llseek         = no_llseek,
        .read           = do_sync_read,
        .aio_read       = fuse_dev_read,
index 08e7b1a9d5d0edaca8b94ef386d9200078958df3..1545b711ddcfdc925410b3b553b6597b93c71a25 100644 (file)
@@ -971,7 +971,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
                        err = -EBUSY;
                        goto badentry;
                }
-               if (S_ISDIR(entry->d_inode->i_mode)) {
+               if (d_is_dir(entry)) {
                        shrink_dcache_parent(entry);
                        if (!simple_empty(entry)) {
                                err = -ENOTEMPTY;
index 6371192961e2260cbbb9976ad1c636d89b92cee6..487527b42d94a381d329d8be5ae4459cafabe15b 100644 (file)
@@ -1809,7 +1809,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
                gfs2_consist_inode(dip);
        dip->i_entries--;
        dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                drop_nlink(&dip->i_inode);
        mark_inode_dirty(&dip->i_inode);
 
index 435bea231cc6e83031976a3974bbf24d81a0b8ab..f0235c1640af7ec29edb92541f66dfc739c9b0db 100644 (file)
@@ -530,7 +530,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (d_is_dir(new_dentry))
                        res = hfsplus_rmdir(new_dir, new_dentry);
                else
                        res = hfsplus_unlink(new_dir, new_dentry);
index 5f2755117ce775dea45cb5c2bbc369568f991f29..043ac9d77262a858464adad185969109902c9479 100644 (file)
@@ -678,10 +678,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
                return NULL;
        }
 
-       if (S_ISDIR(dentry->d_inode->i_mode)) {
+       if (d_is_dir(dentry)) {
                inode->i_op = &hppfs_dir_iops;
                inode->i_fop = &hppfs_dir_fops;
-       } else if (S_ISLNK(dentry->d_inode->i_mode)) {
+       } else if (d_is_symlink(dentry)) {
                inode->i_op = &hppfs_link_iops;
                inode->i_fop = &hppfs_file_fops;
        } else {
index 30459dab409dd5f8b08f22d69b589f9c91dd2678..01dce1d1476b7bc93633f787e989c464c5f2ef58 100644 (file)
@@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void);
  * super.c
  */
 extern int do_remount_sb(struct super_block *, int, void *, int);
-extern bool grab_super_passive(struct super_block *sb);
+extern bool trylock_super(struct super_block *sb);
 extern struct dentry *mount_fs(struct file_system_type *,
                               int, const char *, void *);
 extern struct super_block *user_get_super(dev_t);
index bcbef08a4d8fc8873994eb37d35881626f05af70..b5128c6e63ad6644d19bf861a062d63f48265a4d 100644 (file)
@@ -524,6 +524,9 @@ static int do_one_pass(journal_t *journal,
                        if (descr_csum_size > 0 &&
                            !jbd2_descr_block_csum_verify(journal,
                                                          bh->b_data)) {
+                               printk(KERN_ERR "JBD2: Invalid checksum "
+                                      "recovering block %lu in log\n",
+                                      next_log_block);
                                err = -EIO;
                                brelse(bh);
                                goto failed;
index 938556025d643349b5166e54b5cfc55ea4767dbe..f21b6fb5e4c42f219022edb51c84a0d0674bd4d2 100644 (file)
@@ -252,7 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
        if (!f->inocache)
                return -EIO;
 
-       if (S_ISDIR(old_dentry->d_inode->i_mode))
+       if (d_is_dir(old_dentry))
                return -EPERM;
 
        /* XXX: This is ugly */
@@ -772,7 +772,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
         */
        if (new_dentry->d_inode) {
                victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
-               if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+               if (d_is_dir(new_dentry)) {
                        struct jffs2_full_dirent *fd;
 
                        mutex_lock(&victim_f->sem);
@@ -807,7 +807,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
        if (victim_f) {
                /* There was a victim. Kill it off nicely */
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (d_is_dir(new_dentry))
                        clear_nlink(new_dentry->d_inode);
                else
                        drop_nlink(new_dentry->d_inode);
@@ -815,7 +815,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                   inode which didn't exist. */
                if (victim_f->inocache) {
                        mutex_lock(&victim_f->sem);
-                       if (S_ISDIR(new_dentry->d_inode->i_mode))
+                       if (d_is_dir(new_dentry))
                                victim_f->inocache->pino_nlink = 0;
                        else
                                victim_f->inocache->pino_nlink--;
@@ -825,7 +825,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
        /* If it was a directory we moved, and there was no victim,
           increase i_nlink on its new parent */
-       if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+       if (d_is_dir(old_dentry) && !victim_f)
                inc_nlink(new_dir_i);
 
        /* Unlink the original */
@@ -839,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
                mutex_lock(&f->sem);
                inc_nlink(old_dentry->d_inode);
-               if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode))
+               if (f->inocache && !d_is_dir(old_dentry))
                        f->inocache->pino_nlink++;
                mutex_unlock(&f->sem);
 
@@ -852,7 +852,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                return ret;
        }
 
-       if (S_ISDIR(old_dentry->d_inode->i_mode))
+       if (d_is_dir(old_dentry))
                drop_nlink(old_dir_i);
 
        new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
index 0918f0e2e26608467356235c4267c336cccee87c..3d76f28a2ba9dc1d9e4b1d5b9dfde57af1b983e6 100644 (file)
@@ -138,7 +138,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
        struct jffs2_inode_info *f;
        uint32_t pino;
 
-       BUG_ON(!S_ISDIR(child->d_inode->i_mode));
+       BUG_ON(!d_is_dir(child));
 
        f = JFFS2_INODE_INFO(child->d_inode);
 
index b2ffdb045be42c1c5c476d9298aacd98045f47f3..0ab65122ee45405bfcc96463a3de2b228b36e283 100644 (file)
@@ -329,7 +329,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct inode *new_dir, struct dentry *new_dentry)
 {
        struct inode *inode = old_dentry->d_inode;
-       int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
+       int they_are_dirs = d_is_dir(old_dentry);
 
        if (!simple_empty(new_dentry))
                return -ENOTEMPTY;
index 365c82e1b3a9a602057e65edc9b857c6adce6b76..528fedfda15e6432bd69b80c7bd8faceb7d1351d 100644 (file)
@@ -1665,7 +1665,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
        }
 
        if (my_fl != NULL) {
-               error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
+               lease = my_fl;
+               error = lease->fl_lmops->lm_change(lease, arg, &dispose);
                if (error)
                        goto out;
                goto out_setup;
@@ -1727,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
                        break;
                }
        }
-       trace_generic_delete_lease(inode, fl);
+       trace_generic_delete_lease(inode, victim);
        if (victim)
                error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
        spin_unlock(&ctx->flc_lock);
index 96ca11dea4a20c56b89ca126274b68089235346f..c83145af4bfc0ea9bb159002e3545e8a8cd65157 100644 (file)
@@ -2814,7 +2814,7 @@ no_open:
                        } else if (!dentry->d_inode) {
                                goto out;
                        } else if ((open_flag & O_TRUNC) &&
-                                  S_ISREG(dentry->d_inode->i_mode)) {
+                                  d_is_reg(dentry)) {
                                goto out;
                        }
                        /* will fail later, go on to get the right error */
index 72a286e0d33eb37a2ff3cc8a33f7ca5cefcca266..82ef1405260e1cfbe551ffba781fec25f881e918 100644 (file)
@@ -1907,8 +1907,8 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
        if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
                return -EINVAL;
 
-       if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
-             S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
+       if (d_is_dir(mp->m_dentry) !=
+             d_is_dir(mnt->mnt.mnt_root))
                return -ENOTDIR;
 
        return attach_recursive_mnt(mnt, p, mp, NULL);
@@ -2180,8 +2180,8 @@ static int do_move_mount(struct path *path, const char *old_name)
        if (!mnt_has_parent(old))
                goto out1;
 
-       if (S_ISDIR(path->dentry->d_inode->i_mode) !=
-             S_ISDIR(old_path.dentry->d_inode->i_mode))
+       if (d_is_dir(path->dentry) !=
+             d_is_dir(old_path.dentry))
                goto out1;
        /*
         * Don't move a mount residing in a shared parent.
@@ -2271,7 +2271,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
                goto unlock;
 
        err = -EINVAL;
-       if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
+       if (d_is_symlink(newmnt->mnt.mnt_root))
                goto unlock;
 
        newmnt->mnt.mnt_flags = mnt_flags;
index e36a9d78ea49adc63329253a0a94cb22d6be933a..197806fb87ffb459c19f3c4bbc8da50c58c870dc 100644 (file)
@@ -427,6 +427,8 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
        if (clp == NULL)
                goto out;
 
+       if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+               goto out;
        tbl = &clp->cl_session->bc_slot_table;
 
        spin_lock(&tbl->slot_tbl_lock);
index f4ccfe6521ec80f80fd4b9096bcc7ed49ab7d795..19ca95cdfd9b0f26aedbbc23f036babf2aeca67a 100644 (file)
@@ -313,7 +313,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
                goto out;
        }
 
-       args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
+       args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
        if (!args->devs) {
                status = htonl(NFS4ERR_DELAY);
                goto out;
@@ -415,7 +415,7 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
                             rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
                if (unlikely(p == NULL))
                        goto out;
-               rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+               rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
                                                sizeof(*rc_list->rcl_refcalls),
                                                GFP_KERNEL);
                if (unlikely(rc_list->rcl_refcalls == NULL))
@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
 
                for (i = 0; i < args->csa_nrclists; i++) {
                        status = decode_rc_list(xdr, &args->csa_rclists[i]);
-                       if (status)
+                       if (status) {
+                               args->csa_nrclists = i;
                                goto out_free;
+                       }
                }
        }
        status = 0;
index f9f4845db989e58b7e09f3e46b185f5ff5d2d9e4..19874151e95c9908660132e6a3e2e6d7d2d3c8e4 100644 (file)
@@ -433,7 +433,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
 
 static bool nfs_client_init_is_complete(const struct nfs_client *clp)
 {
-       return clp->cl_cons_state != NFS_CS_INITING;
+       return clp->cl_cons_state <= NFS_CS_READY;
 }
 
 int nfs_wait_client_init_complete(const struct nfs_client *clp)
index da5433230bb1960bc78bafc17ced89f3bca76b65..a6ad688658803424d726afae85597ad2ace80044 100644 (file)
@@ -180,10 +180,9 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
                        delegation->cred = get_rpccred(cred);
                        clear_bit(NFS_DELEGATION_NEED_RECLAIM,
                                  &delegation->flags);
-                       NFS_I(inode)->delegation_state = delegation->type;
                        spin_unlock(&delegation->lock);
-                       put_rpccred(oldcred);
                        rcu_read_unlock();
+                       put_rpccred(oldcred);
                        trace_nfs4_reclaim_delegation(inode, res->delegation_type);
                } else {
                        /* We appear to have raced with a delegation return. */
@@ -275,7 +274,6 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
        set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
        list_del_rcu(&delegation->super_list);
        delegation->inode = NULL;
-       nfsi->delegation_state = 0;
        rcu_assign_pointer(nfsi->delegation, NULL);
        spin_unlock(&delegation->lock);
        return delegation;
@@ -355,7 +353,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
                                        &delegation->stateid)) {
                        nfs_update_inplace_delegation(old_delegation,
                                        delegation);
-                       nfsi->delegation_state = old_delegation->type;
                        goto out;
                }
                /*
@@ -373,13 +370,15 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
                        delegation = NULL;
                        goto out;
                }
-               freeme = nfs_detach_delegation_locked(nfsi, 
+               if (test_and_set_bit(NFS_DELEGATION_RETURNING,
+                                       &old_delegation->flags))
+                       goto out;
+               freeme = nfs_detach_delegation_locked(nfsi,
                                old_delegation, clp);
                if (freeme == NULL)
                        goto out;
        }
        list_add_rcu(&delegation->super_list, &server->delegations);
-       nfsi->delegation_state = delegation->type;
        rcu_assign_pointer(nfsi->delegation, delegation);
        delegation = NULL;
 
@@ -437,6 +436,8 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
 {
        bool ret = false;
 
+       if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+               goto out;
        if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
                ret = true;
        if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
@@ -448,6 +449,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
                        ret = true;
                spin_unlock(&delegation->lock);
        }
+out:
        return ret;
 }
 
@@ -475,14 +477,20 @@ restart:
                                                                super_list) {
                        if (!nfs_delegation_need_return(delegation))
                                continue;
-                       inode = nfs_delegation_grab_inode(delegation);
-                       if (inode == NULL)
+                       if (!nfs_sb_active(server->super))
                                continue;
+                       inode = nfs_delegation_grab_inode(delegation);
+                       if (inode == NULL) {
+                               rcu_read_unlock();
+                               nfs_sb_deactive(server->super);
+                               goto restart;
+                       }
                        delegation = nfs_start_delegation_return_locked(NFS_I(inode));
                        rcu_read_unlock();
 
                        err = nfs_end_delegation_return(inode, delegation, 0);
                        iput(inode);
+                       nfs_sb_deactive(server->super);
                        if (!err)
                                goto restart;
                        set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
@@ -813,19 +821,30 @@ restart:
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
                list_for_each_entry_rcu(delegation, &server->delegations,
                                                                super_list) {
+                       if (test_bit(NFS_DELEGATION_RETURNING,
+                                               &delegation->flags))
+                               continue;
                        if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
                                                &delegation->flags) == 0)
                                continue;
-                       inode = nfs_delegation_grab_inode(delegation);
-                       if (inode == NULL)
+                       if (!nfs_sb_active(server->super))
                                continue;
-                       delegation = nfs_detach_delegation(NFS_I(inode),
-                                       delegation, server);
+                       inode = nfs_delegation_grab_inode(delegation);
+                       if (inode == NULL) {
+                               rcu_read_unlock();
+                               nfs_sb_deactive(server->super);
+                               goto restart;
+                       }
+                       delegation = nfs_start_delegation_return_locked(NFS_I(inode));
                        rcu_read_unlock();
-
-                       if (delegation != NULL)
-                               nfs_free_delegation(delegation);
+                       if (delegation != NULL) {
+                               delegation = nfs_detach_delegation(NFS_I(inode),
+                                       delegation, server);
+                               if (delegation != NULL)
+                                       nfs_free_delegation(delegation);
+                       }
                        iput(inode);
+                       nfs_sb_deactive(server->super);
                        goto restart;
                }
        }
index 9b0c55cb2a2eaa9166670258ef7bf2b87824a0b7..c19e16f0b2d049f972acee413a287d9f73759ba3 100644 (file)
@@ -408,14 +408,22 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc,
        return 0;
 }
 
+/* Match file and dirent using either filehandle or fileid
+ * Note: caller is responsible for checking the fsid
+ */
 static
 int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
 {
+       struct nfs_inode *nfsi;
+
        if (dentry->d_inode == NULL)
                goto different;
-       if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0)
-               goto different;
-       return 1;
+
+       nfsi = NFS_I(dentry->d_inode);
+       if (entry->fattr->fileid == nfsi->fileid)
+               return 1;
+       if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
+               return 1;
 different:
        return 0;
 }
@@ -469,6 +477,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
        struct inode *inode;
        int status;
 
+       if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID))
+               return;
+       if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
+               return;
        if (filename.name[0] == '.') {
                if (filename.len == 1)
                        return;
@@ -479,6 +491,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
 
        dentry = d_lookup(parent, &filename);
        if (dentry != NULL) {
+               /* Is there a mountpoint here? If so, just exit */
+               if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
+                                       &entry->fattr->fsid))
+                       goto out;
                if (nfs_same_file(dentry, entry)) {
                        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
                        status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
index 7077521acdf4609cc57f54da4ce11bb331493632..e907c8cf732e3cff6bc9711ccf0b20c9261cdca2 100644 (file)
@@ -283,7 +283,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
                              struct nfs_direct_req *dreq)
 {
-       cinfo->lock = &dreq->lock;
+       cinfo->lock = &dreq->inode->i_lock;
        cinfo->mds = &dreq->mds_cinfo;
        cinfo->ds = &dreq->ds_cinfo;
        cinfo->dreq = dreq;
index 94712fc781fa58fa162c8399c6b9427aa242f5ce..e679d24c39d3a57d5ef510a22d5ccbe2832c5335 100644 (file)
@@ -178,7 +178,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
                iocb->ki_filp,
                iov_iter_count(to), (unsigned long) iocb->ki_pos);
 
-       result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+       result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping);
        if (!result) {
                result = generic_file_read_iter(iocb, to);
                if (result > 0)
@@ -199,7 +199,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
        dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
                filp, (unsigned long) count, (unsigned long long) *ppos);
 
-       res = nfs_revalidate_mapping(inode, filp->f_mapping);
+       res = nfs_revalidate_mapping_protected(inode, filp->f_mapping);
        if (!res) {
                res = generic_file_splice_read(filp, ppos, pipe, count, flags);
                if (res > 0)
@@ -372,6 +372,10 @@ start:
                                 nfs_wait_bit_killable, TASK_KILLABLE);
        if (ret)
                return ret;
+       /*
+        * Wait for O_DIRECT to complete
+        */
+       nfs_inode_dio_wait(mapping->host);
 
        page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
@@ -619,6 +623,9 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* make sure the cache has finished storing the page */
        nfs_fscache_wait_on_page_write(NFS_I(inode), page);
 
+       wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
+                       nfs_wait_bit_killable, TASK_KILLABLE);
+
        lock_page(page);
        mapping = page_file_mapping(page);
        if (mapping != inode->i_mapping)
index 7ae1c263c5cf03b8d63f1fec359d794174564020..91e88a7ecef0c64354b0278fc01381e0970d2086 100644 (file)
@@ -960,52 +960,19 @@ filelayout_mark_request_commit(struct nfs_page *req,
 {
        struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
        u32 i, j;
-       struct list_head *list;
-       struct pnfs_commit_bucket *buckets;
 
        if (fl->commit_through_mds) {
-               list = &cinfo->mds->list;
-               spin_lock(cinfo->lock);
-               goto mds_commit;
-       }
-
-       /* Note that we are calling nfs4_fl_calc_j_index on each page
-        * that ends up being committed to a data server.  An attractive
-        * alternative is to add a field to nfs_write_data and nfs_page
-        * to store the value calculated in filelayout_write_pagelist
-        * and just use that here.
-        */
-       j = nfs4_fl_calc_j_index(lseg, req_offset(req));
-       i = select_bucket_index(fl, j);
-       spin_lock(cinfo->lock);
-       buckets = cinfo->ds->buckets;
-       list = &buckets[i].written;
-       if (list_empty(list)) {
-               /* Non-empty buckets hold a reference on the lseg.  That ref
-                * is normally transferred to the COMMIT call and released
-                * there.  It could also be released if the last req is pulled
-                * off due to a rewrite, in which case it will be done in
-                * pnfs_generic_clear_request_commit
+               nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
+       } else {
+               /* Note that we are calling nfs4_fl_calc_j_index on each page
+                * that ends up being committed to a data server.  An attractive
+                * alternative is to add a field to nfs_write_data and nfs_page
+                * to store the value calculated in filelayout_write_pagelist
+                * and just use that here.
                 */
-               buckets[i].wlseg = pnfs_get_lseg(lseg);
-       }
-       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-       cinfo->ds->nwritten++;
-
-mds_commit:
-       /* nfs_request_add_commit_list(). We need to add req to list without
-        * dropping cinfo lock.
-        */
-       set_bit(PG_CLEAN, &(req)->wb_flags);
-       nfs_list_add_request(req, list);
-       cinfo->mds->ncommit++;
-       spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
+               j = nfs4_fl_calc_j_index(lseg, req_offset(req));
+               i = select_bucket_index(fl, j);
+               pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
        }
 }
 
index c22ecaa86c1c27cc2138f1853c27757b11104f17..315cc68945b9d1d3ad00b04e2597e38d7b7c4cab 100644 (file)
@@ -1332,47 +1332,6 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
        return PNFS_ATTEMPTED;
 }
 
-static void
-ff_layout_mark_request_commit(struct nfs_page *req,
-                             struct pnfs_layout_segment *lseg,
-                             struct nfs_commit_info *cinfo,
-                             u32 ds_commit_idx)
-{
-       struct list_head *list;
-       struct pnfs_commit_bucket *buckets;
-
-       spin_lock(cinfo->lock);
-       buckets = cinfo->ds->buckets;
-       list = &buckets[ds_commit_idx].written;
-       if (list_empty(list)) {
-               /* Non-empty buckets hold a reference on the lseg.  That ref
-                * is normally transferred to the COMMIT call and released
-                * there.  It could also be released if the last req is pulled
-                * off due to a rewrite, in which case it will be done in
-                * pnfs_common_clear_request_commit
-                */
-               WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
-               buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
-       }
-       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-       cinfo->ds->nwritten++;
-
-       /* nfs_request_add_commit_list(). We need to add req to list without
-        * dropping cinfo lock.
-        */
-       set_bit(PG_CLEAN, &(req)->wb_flags);
-       nfs_list_add_request(req, list);
-       cinfo->mds->ncommit++;
-       spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
-       }
-}
-
 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
 {
        return i;
@@ -1540,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
        .pg_write_ops           = &ff_layout_pg_write_ops,
        .get_ds_info            = ff_layout_get_ds_info,
        .free_deviceid_node     = ff_layout_free_deveiceid_node,
-       .mark_request_commit    = ff_layout_mark_request_commit,
+       .mark_request_commit    = pnfs_layout_mark_request_commit,
        .clear_request_commit   = pnfs_generic_clear_request_commit,
        .scan_commit_lists      = pnfs_generic_scan_commit_lists,
        .recover_commit_reqs    = pnfs_generic_recover_commit_reqs,
index e4f0dcef8f5455e60676bf70d9d0489107ad7c79..d42dff6d5e983f65c2e2f299e12d5353f54236c7 100644 (file)
@@ -556,6 +556,7 @@ EXPORT_SYMBOL_GPL(nfs_setattr);
  * This is a copy of the common vmtruncate, but with the locking
  * corrected to take into account the fact that NFS requires
  * inode->i_size to be updated under the inode->i_lock.
+ * Note: must be called with inode->i_lock held!
  */
 static int nfs_vmtruncate(struct inode * inode, loff_t offset)
 {
@@ -565,14 +566,14 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
        if (err)
                goto out;
 
-       spin_lock(&inode->i_lock);
        i_size_write(inode, offset);
        /* Optimisation */
        if (offset == 0)
                NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
-       spin_unlock(&inode->i_lock);
 
+       spin_unlock(&inode->i_lock);
        truncate_pagecache(inode, offset);
+       spin_lock(&inode->i_lock);
 out:
        return err;
 }
@@ -585,10 +586,15 @@ out:
  * Note: we do this in the *proc.c in order to ensure that
  *       it works for things like exclusive creates too.
  */
-void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
+void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
+               struct nfs_fattr *fattr)
 {
+       /* Barrier: bump the attribute generation count. */
+       nfs_fattr_set_barrier(fattr);
+
+       spin_lock(&inode->i_lock);
+       NFS_I(inode)->attr_gencount = fattr->gencount;
        if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
-               spin_lock(&inode->i_lock);
                if ((attr->ia_valid & ATTR_MODE) != 0) {
                        int mode = attr->ia_mode & S_IALLUGO;
                        mode |= inode->i_mode & ~S_IALLUGO;
@@ -600,12 +606,13 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
                        inode->i_gid = attr->ia_gid;
                nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
                                | NFS_INO_INVALID_ACL);
-               spin_unlock(&inode->i_lock);
        }
        if ((attr->ia_valid & ATTR_SIZE) != 0) {
                nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
                nfs_vmtruncate(inode, attr->ia_size);
        }
+       nfs_update_inode(inode, fattr);
+       spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
 
@@ -1028,6 +1035,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
 
        if (mapping->nrpages != 0) {
                if (S_ISREG(inode->i_mode)) {
+                       unmap_mapping_range(mapping, 0, 0, 0);
                        ret = nfs_sync_mapping(mapping);
                        if (ret < 0)
                                return ret;
@@ -1060,11 +1068,14 @@ static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
 }
 
 /**
- * nfs_revalidate_mapping - Revalidate the pagecache
+ * __nfs_revalidate_mapping - Revalidate the pagecache
  * @inode - pointer to host inode
  * @mapping - pointer to mapping
+ * @may_lock - take inode->i_mutex?
  */
-int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+static int __nfs_revalidate_mapping(struct inode *inode,
+               struct address_space *mapping,
+               bool may_lock)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
        unsigned long *bitlock = &nfsi->flags;
@@ -1113,7 +1124,12 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
        nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
        spin_unlock(&inode->i_lock);
        trace_nfs_invalidate_mapping_enter(inode);
-       ret = nfs_invalidate_mapping(inode, mapping);
+       if (may_lock) {
+               mutex_lock(&inode->i_mutex);
+               ret = nfs_invalidate_mapping(inode, mapping);
+               mutex_unlock(&inode->i_mutex);
+       } else
+               ret = nfs_invalidate_mapping(inode, mapping);
        trace_nfs_invalidate_mapping_exit(inode, ret);
 
        clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
@@ -1123,6 +1139,29 @@ out:
        return ret;
 }
 
+/**
+ * nfs_revalidate_mapping - Revalidate the pagecache
+ * @inode - pointer to host inode
+ * @mapping - pointer to mapping
+ */
+int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+{
+       return __nfs_revalidate_mapping(inode, mapping, false);
+}
+
+/**
+ * nfs_revalidate_mapping_protected - Revalidate the pagecache
+ * @inode - pointer to host inode
+ * @mapping - pointer to mapping
+ *
+ * Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex
+ * while invalidating the mapping.
+ */
+int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping)
+{
+       return __nfs_revalidate_mapping(inode, mapping, true);
+}
+
 static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
@@ -1231,13 +1270,6 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
        return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
 }
 
-static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
-{
-       if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
-               return 0;
-       return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
-}
-
 static atomic_long_t nfs_attr_generation_counter;
 
 static unsigned long nfs_read_attr_generation_counter(void)
@@ -1249,6 +1281,7 @@ unsigned long nfs_inc_attr_generation_counter(void)
 {
        return atomic_long_inc_return(&nfs_attr_generation_counter);
 }
+EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
 
 void nfs_fattr_init(struct nfs_fattr *fattr)
 {
@@ -1260,6 +1293,22 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
 }
 EXPORT_SYMBOL_GPL(nfs_fattr_init);
 
+/**
+ * nfs_fattr_set_barrier
+ * @fattr: attributes
+ *
+ * Used to set a barrier after an attribute was updated. This
+ * barrier ensures that older attributes from RPC calls that may
+ * have raced with our update cannot clobber these new values.
+ * Note that you are still responsible for ensuring that other
+ * operations which change the attribute on the server do not
+ * collide.
+ */
+void nfs_fattr_set_barrier(struct nfs_fattr *fattr)
+{
+       fattr->gencount = nfs_inc_attr_generation_counter();
+}
+
 struct nfs_fattr *nfs_alloc_fattr(void)
 {
        struct nfs_fattr *fattr;
@@ -1370,7 +1419,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
 
        return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
                nfs_ctime_need_update(inode, fattr) ||
-               nfs_size_need_update(inode, fattr) ||
                ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
 }
 
@@ -1460,6 +1508,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
        int status;
 
        spin_lock(&inode->i_lock);
+       nfs_fattr_set_barrier(fattr);
        status = nfs_post_op_update_inode_locked(inode, fattr);
        spin_unlock(&inode->i_lock);
 
@@ -1468,7 +1517,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
 
 /**
- * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
+ * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
  * @inode - pointer to inode
  * @fattr - updated attributes
  *
@@ -1478,11 +1527,10 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
  *
  * This function is mainly designed to be used by the ->write_done() functions.
  */
-int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
+int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr)
 {
        int status;
 
-       spin_lock(&inode->i_lock);
        /* Don't do a WCC update if these attributes are already stale */
        if ((fattr->valid & NFS_ATTR_FATTR) == 0 ||
                        !nfs_inode_attrs_need_update(inode, fattr)) {
@@ -1514,6 +1562,27 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
        }
 out_noforce:
        status = nfs_post_op_update_inode_locked(inode, fattr);
+       return status;
+}
+
+/**
+ * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * After an operation that has changed the inode metadata, mark the
+ * attribute cache as being invalid, then try to update it. Fake up
+ * weak cache consistency data, if none exist.
+ *
+ * This function is mainly designed to be used by the ->write_done() functions.
+ */
+int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
+{
+       int status;
+
+       spin_lock(&inode->i_lock);
+       nfs_fattr_set_barrier(fattr);
+       status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
        spin_unlock(&inode->i_lock);
        return status;
 }
@@ -1715,6 +1784,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
                nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
                nfsi->attrtimeo_timestamp = now;
+               /* Set barrier to be more recent than all outstanding updates */
                nfsi->attr_gencount = nfs_inc_attr_generation_counter();
        } else {
                if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
@@ -1722,6 +1792,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                                nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
                        nfsi->attrtimeo_timestamp = now;
                }
+               /* Set the barrier to be more recent than this fattr */
+               if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
+                       nfsi->attr_gencount = fattr->gencount;
        }
        invalid &= ~NFS_INO_INVALID_ATTR;
        /* Don't invalidate the data if we were to blame */
@@ -1775,7 +1848,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
 #if IS_ENABLED(CONFIG_NFS_V4)
        INIT_LIST_HEAD(&nfsi->open_states);
        nfsi->delegation = NULL;
-       nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
 #endif
index 212b8c883d22881b4258c2c7f7e611d9d85f0427..9e6475bc5ba22be7293c687d78dda1a626d3024b 100644 (file)
@@ -459,6 +459,7 @@ void nfs_mark_request_commit(struct nfs_page *req,
                             struct nfs_commit_info *cinfo,
                             u32 ds_commit_idx);
 int nfs_write_need_commit(struct nfs_pgio_header *);
+void nfs_writeback_update_inode(struct nfs_pgio_header *hdr);
 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
                            int how, struct nfs_commit_info *cinfo);
 void nfs_retry_commit(struct list_head *page_list,
@@ -597,6 +598,19 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
 }
 
+/*
+ * Record the page as unstable and mark its inode as dirty.
+ */
+static inline
+void nfs_mark_page_unstable(struct page *page)
+{
+       struct inode *inode = page_file_mapping(page)->host;
+
+       inc_zone_page_state(page, NR_UNSTABLE_NFS);
+       inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE);
+        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+}
+
 /*
  * Determine the number of bytes of data the page contains
  */
index 78e557c3ab87d014694e6e4098faefb20553f579..1f11d2533ee4107bd9ae3f8de460b38ecdbaae74 100644 (file)
@@ -138,7 +138,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
        if (status == 0)
-               nfs_setattr_update_inode(inode, sattr);
+               nfs_setattr_update_inode(inode, sattr, fattr);
        dprintk("NFS reply setattr: %d\n", status);
        return status;
 }
@@ -834,7 +834,7 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
        if (nfs3_async_handle_jukebox(task, inode))
                return -EAGAIN;
        if (task->tk_status >= 0)
-               nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr);
+               nfs_writeback_update_inode(hdr);
        return 0;
 }
 
index 2a932fdc57cb37b2676e08dcb33838c9aecd2a9a..53852a4bd88be68781bb9dd8a39760fd497d1d61 100644 (file)
@@ -1987,6 +1987,11 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
                if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
                        entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
 
+               if (entry->fattr->fileid != entry->ino) {
+                       entry->fattr->mounted_on_fileid = entry->ino;
+                       entry->fattr->valid |= NFS_ATTR_FATTR_MOUNTED_ON_FILEID;
+               }
+
                /* In fact, a post_op_fh3: */
                p = xdr_inline_decode(xdr, 4);
                if (unlikely(p == NULL))
index 8646af9b11d2e1b866f1c1e789a5b6001a36c882..86d6214ea022fee66f16f976a17451590edde7b8 100644 (file)
@@ -621,6 +621,9 @@ int nfs41_walk_client_list(struct nfs_client *new,
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
 
+               if (pos == new)
+                       goto found;
+
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
@@ -639,10 +642,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        prev = pos;
 
                        status = nfs_wait_client_init_complete(pos);
-                       if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
-                               nfs4_schedule_lease_recovery(pos);
-                               status = nfs4_wait_clnt_recover(pos);
-                       }
                        spin_lock(&nn->nfs_client_lock);
                        if (status < 0)
                                break;
@@ -668,7 +667,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                 */
                if (!nfs4_match_client_owner_id(pos, new))
                        continue;
-
+found:
                atomic_inc(&pos->cl_count);
                *result = pos;
                status = 0;
index 2e7c9f7a6f7cc8369bfcefda7a202d34f7a9dbe5..627f37c44456752d6bd90a8882ae373ec6d70718 100644 (file)
@@ -901,6 +901,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
        if (!cinfo->atomic || cinfo->before != dir->i_version)
                nfs_force_lookup_revalidate(dir);
        dir->i_version = cinfo->after;
+       nfsi->attr_gencount = nfs_inc_attr_generation_counter();
        nfs_fscache_invalidate(dir);
        spin_unlock(&dir->i_lock);
 }
@@ -1552,6 +1553,9 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
 
        opendata->o_arg.open_flags = 0;
        opendata->o_arg.fmode = fmode;
+       opendata->o_arg.share_access = nfs4_map_atomic_open_share(
+                       NFS_SB(opendata->dentry->d_sb),
+                       fmode, 0);
        memset(&opendata->o_res, 0, sizeof(opendata->o_res));
        memset(&opendata->c_res, 0, sizeof(opendata->c_res));
        nfs4_init_opendata_res(opendata);
@@ -2413,8 +2417,8 @@ static int _nfs4_do_open(struct inode *dir,
                                opendata->o_res.f_attr, sattr,
                                state, label, olabel);
                if (status == 0) {
-                       nfs_setattr_update_inode(state->inode, sattr);
-                       nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
+                       nfs_setattr_update_inode(state->inode, sattr,
+                                       opendata->o_res.f_attr);
                        nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
                }
        }
@@ -2651,7 +2655,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
                case -NFS4ERR_BAD_STATEID:
                case -NFS4ERR_EXPIRED:
                        if (!nfs4_stateid_match(&calldata->arg.stateid,
-                                               &state->stateid)) {
+                                               &state->open_stateid)) {
                                rpc_restart_call_prepare(task);
                                goto out_release;
                        }
@@ -2687,7 +2691,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
        is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
        is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
-       nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid);
+       nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
        /* Calculate the change in open mode */
        calldata->arg.fmode = 0;
        if (state->n_rdwr == 0) {
@@ -3288,7 +3292,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 
        status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
        if (status == 0) {
-               nfs_setattr_update_inode(inode, sattr);
+               nfs_setattr_update_inode(inode, sattr, fattr);
                nfs_setsecurity(inode, fattr, label);
        }
        nfs4_label_free(label);
@@ -4234,7 +4238,7 @@ static int nfs4_write_done_cb(struct rpc_task *task,
        }
        if (task->tk_status >= 0) {
                renew_lease(NFS_SERVER(inode), hdr->timestamp);
-               nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr);
+               nfs_writeback_update_inode(hdr);
        }
        return 0;
 }
@@ -6648,47 +6652,47 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
 {
        int status;
+       struct nfs41_bind_conn_to_session_args args = {
+               .client = clp,
+               .dir = NFS4_CDFC4_FORE_OR_BOTH,
+       };
        struct nfs41_bind_conn_to_session_res res;
        struct rpc_message msg = {
                .rpc_proc =
                        &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
-               .rpc_argp = clp,
+               .rpc_argp = &args,
                .rpc_resp = &res,
                .rpc_cred = cred,
        };
 
        dprintk("--> %s\n", __func__);
 
-       res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
-       if (unlikely(res.session == NULL)) {
-               status = -ENOMEM;
-               goto out;
-       }
+       nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
+       if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+               args.dir = NFS4_CDFC4_FORE;
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        trace_nfs4_bind_conn_to_session(clp, status);
        if (status == 0) {
-               if (memcmp(res.session->sess_id.data,
+               if (memcmp(res.sessionid.data,
                    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
                        dprintk("NFS: %s: Session ID mismatch\n", __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
-               if (res.dir != NFS4_CDFS4_BOTH) {
+               if ((res.dir & args.dir) != res.dir || res.dir == 0) {
                        dprintk("NFS: %s: Unexpected direction from server\n",
                                __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
-               if (res.use_conn_in_rdma_mode) {
+               if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
                        dprintk("NFS: %s: Server returned RDMA mode = true\n",
                                __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
        }
-out_session:
-       kfree(res.session);
 out:
        dprintk("<-- %s status= %d\n", __func__, status);
        return status;
@@ -6893,9 +6897,13 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
 
        if (status == 0) {
                clp->cl_clientid = res.clientid;
-               clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
-               if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
+               clp->cl_exchange_flags = res.flags;
+               /* Client ID is not confirmed */
+               if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
+                       clear_bit(NFS4_SESSION_ESTABLISHED,
+                                       &clp->cl_session->session_state);
                        clp->cl_seqid = res.seqid;
+               }
 
                kfree(clp->cl_serverowner);
                clp->cl_serverowner = res.server_owner;
@@ -7166,10 +7174,11 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
                args->bc_attrs.max_reqs);
 }
 
-static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
+               struct nfs41_create_session_res *res)
 {
        struct nfs4_channel_attrs *sent = &args->fc_attrs;
-       struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+       struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
 
        if (rcvd->max_resp_sz > sent->max_resp_sz)
                return -EINVAL;
@@ -7188,11 +7197,14 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
        return 0;
 }
 
-static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
+               struct nfs41_create_session_res *res)
 {
        struct nfs4_channel_attrs *sent = &args->bc_attrs;
-       struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
+       struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
 
+       if (!(res->flags & SESSION4_BACK_CHAN))
+               goto out;
        if (rcvd->max_rqst_sz > sent->max_rqst_sz)
                return -EINVAL;
        if (rcvd->max_resp_sz < sent->max_resp_sz)
@@ -7204,18 +7216,33 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
                return -EINVAL;
        if (rcvd->max_reqs != sent->max_reqs)
                return -EINVAL;
+out:
        return 0;
 }
 
 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
-                                    struct nfs4_session *session)
+                                    struct nfs41_create_session_res *res)
 {
        int ret;
 
-       ret = nfs4_verify_fore_channel_attrs(args, session);
+       ret = nfs4_verify_fore_channel_attrs(args, res);
        if (ret)
                return ret;
-       return nfs4_verify_back_channel_attrs(args, session);
+       return nfs4_verify_back_channel_attrs(args, res);
+}
+
+static void nfs4_update_session(struct nfs4_session *session,
+               struct nfs41_create_session_res *res)
+{
+       nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
+       /* Mark client id and session as being confirmed */
+       session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
+       set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
+       session->flags = res->flags;
+       memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
+       if (res->flags & SESSION4_BACK_CHAN)
+               memcpy(&session->bc_attrs, &res->bc_attrs,
+                               sizeof(session->bc_attrs));
 }
 
 static int _nfs4_proc_create_session(struct nfs_client *clp,
@@ -7224,11 +7251,12 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
        struct nfs4_session *session = clp->cl_session;
        struct nfs41_create_session_args args = {
                .client = clp,
+               .clientid = clp->cl_clientid,
+               .seqid = clp->cl_seqid,
                .cb_program = NFS4_CALLBACK,
        };
-       struct nfs41_create_session_res res = {
-               .client = clp,
-       };
+       struct nfs41_create_session_res res;
+
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
                .rpc_argp = &args,
@@ -7245,11 +7273,15 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
 
        if (!status) {
                /* Verify the session's negotiated channel_attrs values */
-               status = nfs4_verify_channel_attrs(&args, session);
+               status = nfs4_verify_channel_attrs(&args, &res);
                /* Increment the clientid slot sequence id */
-               clp->cl_seqid++;
+               if (clp->cl_seqid == res.seqid)
+                       clp->cl_seqid++;
+               if (status)
+                       goto out;
+               nfs4_update_session(session, &res);
        }
-
+out:
        return status;
 }
 
@@ -7301,8 +7333,8 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
        dprintk("--> nfs4_proc_destroy_session\n");
 
        /* session is still being setup */
-       if (session->clp->cl_cons_state != NFS_CS_READY)
-               return status;
+       if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
+               return 0;
 
        status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        trace_nfs4_destroy_session(session->clp, status);
index e799dc3c3b1db9f7681199907bff5e7bffb3f853..e23366effcfb1e43bcb81983bcbaeacf2e512002 100644 (file)
@@ -450,7 +450,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
        tbl = &ses->fc_slot_table;
        tbl->session = ses;
        status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
-       if (status) /* -ENOMEM */
+       if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
                return status;
        /* Back channel */
        tbl = &ses->bc_slot_table;
index b34ada9bc6a2d03a677e46cc5ab8c4eb7c1fd838..e3ea2c5324d68e92591058e896bf478538302a5a 100644 (file)
@@ -70,6 +70,7 @@ struct nfs4_session {
 
 enum nfs4_session_state {
        NFS4_SESSION_INITING,
+       NFS4_SESSION_ESTABLISHED,
 };
 
 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
@@ -118,6 +119,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
        return 0;
 }
 
+static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
+               const struct nfs4_sessionid *src)
+{
+       memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
+}
+
 #ifdef CONFIG_CRC32
 /*
  * nfs_session_id_hash - calculate the crc32 hash for the session id
index 5ad908e9ce9c332696ed52a85a5c4324a45d8b02..f95e3b58bbc304ae381df2a5f1721d8074789c51 100644 (file)
@@ -346,9 +346,23 @@ int nfs41_discover_server_trunking(struct nfs_client *clp,
        status = nfs4_proc_exchange_id(clp, cred);
        if (status != NFS4_OK)
                return status;
-       set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 
-       return nfs41_walk_client_list(clp, result, cred);
+       status = nfs41_walk_client_list(clp, result, cred);
+       if (status < 0)
+               return status;
+       if (clp != *result)
+               return 0;
+
+       /* Purge state if the client id was established in a prior instance */
+       if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R)
+               set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+       else
+               set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+       nfs4_schedule_state_manager(clp);
+       status = nfs_wait_client_init_complete(clp);
+       if (status < 0)
+               nfs_put_client(clp);
+       return status;
 }
 
 #endif /* CONFIG_NFS_V4_1 */
index e23a0a664e12d5130162bc320e80a57a5264bc9b..5c399ec41079687791d2fb668d5f6543ce374a2f 100644 (file)
@@ -1715,17 +1715,17 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
 #if defined(CONFIG_NFS_V4_1)
 /* NFSv4.1 operations */
 static void encode_bind_conn_to_session(struct xdr_stream *xdr,
-                                  struct nfs4_session *session,
+                                  struct nfs41_bind_conn_to_session_args *args,
                                   struct compound_hdr *hdr)
 {
        __be32 *p;
 
        encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
                decode_bind_conn_to_session_maxsz, hdr);
-       encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+       encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN);
        p = xdr_reserve_space(xdr, 8);
-       *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
-       *p = 0; /* use_conn_in_rdma_mode = False */
+       *p++ = cpu_to_be32(args->dir);
+       *p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
 }
 
 static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
@@ -1806,8 +1806,8 @@ static void encode_create_session(struct xdr_stream *xdr,
 
        encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
        p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
-       p = xdr_encode_hyper(p, clp->cl_clientid);
-       *p++ = cpu_to_be32(clp->cl_seqid);                      /*Sequence id */
+       p = xdr_encode_hyper(p, args->clientid);
+       *p++ = cpu_to_be32(args->seqid);                        /*Sequence id */
        *p++ = cpu_to_be32(args->flags);                        /*flags */
 
        /* Fore Channel */
@@ -2734,14 +2734,14 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
  */
 static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
                                struct xdr_stream *xdr,
-                               struct nfs_client *clp)
+                               struct nfs41_bind_conn_to_session_args *args)
 {
        struct compound_hdr hdr = {
-               .minorversion = clp->cl_mvops->minor_version,
+               .minorversion = args->client->cl_mvops->minor_version,
        };
 
        encode_compound_hdr(xdr, req, &hdr);
-       encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+       encode_bind_conn_to_session(xdr, args, &hdr);
        encode_nops(&hdr);
 }
 
@@ -5613,7 +5613,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr,
 
        status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
        if (!status)
-               status = decode_sessionid(xdr, &res->session->sess_id);
+               status = decode_sessionid(xdr, &res->sessionid);
        if (unlikely(status))
                return status;
 
@@ -5641,12 +5641,10 @@ static int decode_create_session(struct xdr_stream *xdr,
 {
        __be32 *p;
        int status;
-       struct nfs_client *clp = res->client;
-       struct nfs4_session *session = clp->cl_session;
 
        status = decode_op_hdr(xdr, OP_CREATE_SESSION);
        if (!status)
-               status = decode_sessionid(xdr, &session->sess_id);
+               status = decode_sessionid(xdr, &res->sessionid);
        if (unlikely(status))
                return status;
 
@@ -5654,13 +5652,13 @@ static int decode_create_session(struct xdr_stream *xdr,
        p = xdr_inline_decode(xdr, 8);
        if (unlikely(!p))
                goto out_overflow;
-       clp->cl_seqid = be32_to_cpup(p++);
-       session->flags = be32_to_cpup(p);
+       res->seqid = be32_to_cpup(p++);
+       res->flags = be32_to_cpup(p);
 
        /* Channel attributes */
-       status = decode_chan_attrs(xdr, &session->fc_attrs);
+       status = decode_chan_attrs(xdr, &res->fc_attrs);
        if (!status)
-               status = decode_chan_attrs(xdr, &session->bc_attrs);
+               status = decode_chan_attrs(xdr, &res->bc_attrs);
        return status;
 out_overflow:
        print_overflow_msg(__func__, xdr);
index 797cd6253adf74d809510151080d1d06439f378d..635f0865671cf38b27eea4a49261c9405a4a4731 100644 (file)
@@ -344,6 +344,10 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
                                                 struct xdr_stream *xdr,
                                                 gfp_t gfp_flags);
+void pnfs_layout_mark_request_commit(struct nfs_page *req,
+                                    struct pnfs_layout_segment *lseg,
+                                    struct nfs_commit_info *cinfo,
+                                    u32 ds_commit_idx);
 
 static inline bool nfs_have_layout(struct inode *inode)
 {
index fdc4f6562bb7efc65179f970053dfec6a9c38705..54e36b38fb5f89310287635e0838601ad07cf34a 100644 (file)
@@ -838,3 +838,33 @@ out_err:
        return NULL;
 }
 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
+
+void
+pnfs_layout_mark_request_commit(struct nfs_page *req,
+                               struct pnfs_layout_segment *lseg,
+                               struct nfs_commit_info *cinfo,
+                               u32 ds_commit_idx)
+{
+       struct list_head *list;
+       struct pnfs_commit_bucket *buckets;
+
+       spin_lock(cinfo->lock);
+       buckets = cinfo->ds->buckets;
+       list = &buckets[ds_commit_idx].written;
+       if (list_empty(list)) {
+               /* Non-empty buckets hold a reference on the lseg.  That ref
+                * is normally transferred to the COMMIT call and released
+                * there.  It could also be released if the last req is pulled
+                * off due to a rewrite, in which case it will be done in
+                * pnfs_common_clear_request_commit
+                */
+               WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
+               buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
+       }
+       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+       cinfo->ds->nwritten++;
+       spin_unlock(cinfo->lock);
+
+       nfs_request_add_commit_list(req, list, cinfo);
+}
+EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
index b09cc23d6f433bc5ea8aff6cfe68c3910c4f8319..c63189acd0523cf733ab494dc85c1acf0cdb8f33 100644 (file)
@@ -139,7 +139,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        nfs_fattr_init(fattr);
        status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
        if (status == 0)
-               nfs_setattr_update_inode(inode, sattr);
+               nfs_setattr_update_inode(inode, sattr, fattr);
        dprintk("NFS reply setattr: %d\n", status);
        return status;
 }
@@ -609,10 +609,8 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
-       struct inode *inode = hdr->inode;
-
        if (task->tk_status >= 0)
-               nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr);
+               nfs_writeback_update_inode(hdr);
        return 0;
 }
 
index 88a6d2196ece3bf5ce7a94027dd96e8f25bc9792..849ed784d6ac1fc6b923c32278e089f87575b8db 100644 (file)
@@ -789,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
        nfs_list_add_request(req, dst);
        cinfo->mds->ncommit++;
        spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
-       }
+       if (!cinfo->dreq)
+               nfs_mark_page_unstable(req->wb_page);
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
@@ -1382,6 +1377,36 @@ static int nfs_should_remove_suid(const struct inode *inode)
        return 0;
 }
 
+static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
+               struct nfs_fattr *fattr)
+{
+       struct nfs_pgio_args *argp = &hdr->args;
+       struct nfs_pgio_res *resp = &hdr->res;
+
+       if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
+               return;
+       if (argp->offset + resp->count != fattr->size)
+               return;
+       if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
+               return;
+       /* Set attribute barrier */
+       nfs_fattr_set_barrier(fattr);
+}
+
+void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
+{
+       struct nfs_fattr *fattr = hdr->res.fattr;
+       struct inode *inode = hdr->inode;
+
+       if (fattr == NULL)
+               return;
+       spin_lock(&inode->i_lock);
+       nfs_writeback_check_extend(hdr, fattr);
+       nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
+       spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
+
 /*
  * This function is called when the WRITE call is complete.
  */
@@ -1605,11 +1630,8 @@ void nfs_retry_commit(struct list_head *page_list,
                req = nfs_list_entry(page_list->next);
                nfs_list_remove_request(req);
                nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
-               if (!cinfo->dreq) {
-                       dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-                       dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                                    BDI_RECLAIMABLE);
-               }
+               if (!cinfo->dreq)
+                       nfs_clear_page_commit(req->wb_page);
                nfs_unlock_and_release_request(req);
        }
 }
index cc6a76072009262475c656307e454bb7ad6e44a1..1c307f02baa89e79f0a6d2889631d3ef50ed0a48 100644 (file)
@@ -583,7 +583,7 @@ nfs4_reset_recoverydir(char *recdir)
        if (status)
                return status;
        status = -ENOTDIR;
-       if (S_ISDIR(path.dentry->d_inode->i_mode)) {
+       if (d_is_dir(path.dentry)) {
                strcpy(user_recovery_dirname, recdir);
                status = 0;
        }
@@ -1426,7 +1426,7 @@ nfsd4_client_tracking_init(struct net *net)
        nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
        status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
        if (!status) {
-               status = S_ISDIR(path.dentry->d_inode->i_mode);
+               status = d_is_dir(path.dentry);
                path_put(&path);
                if (status)
                        goto do_init;
index f6b2a09f793f453e5b6c40e80ceb383a12af7616..d2f2c37dc2dbd2649399fe2ddad4032a5025d337 100644 (file)
@@ -1638,7 +1638,7 @@ __destroy_client(struct nfs4_client *clp)
                nfs4_put_stid(&dp->dl_stid);
        }
        while (!list_empty(&clp->cl_revoked)) {
-               dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
+               dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
                list_del_init(&dp->dl_recall_lru);
                nfs4_put_stid(&dp->dl_stid);
        }
index 965b478d50fc40a97a85243e478d9e47e3c38a51..e9fa966fc37fe5415f9fba50b61b37d459172303 100644 (file)
@@ -114,8 +114,8 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
         * We're exposing only the directories and symlinks that have to be
         * traversed on the way to real exports:
         */
-       if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
-                    !S_ISLNK(dentry->d_inode->i_mode)))
+       if (unlikely(!d_is_dir(dentry) &&
+                    !d_is_symlink(dentry)))
                return nfserr_stale;
        /*
         * A pseudoroot export gives permission to access only one
@@ -259,7 +259,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
                goto out;
        }
 
-       if (S_ISDIR(dentry->d_inode->i_mode) &&
+       if (d_is_dir(dentry) &&
                        (dentry->d_flags & DCACHE_DISCONNECTED)) {
                printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
                                dentry);
@@ -414,7 +414,7 @@ static inline void _fh_update_old(struct dentry *dentry,
 {
        fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
        fh->ofh_generation = dentry->d_inode->i_generation;
-       if (S_ISDIR(dentry->d_inode->i_mode) ||
+       if (d_is_dir(dentry) ||
            (exp->ex_flags & NFSEXP_NOSUBTREECHECK))
                fh->ofh_dirino = 0;
 }
index 5685c679dd93d4371626de7d6107a9ac66a98093..36852658242943051f1a1cac7d4b6a8c1945f84c 100644 (file)
@@ -615,9 +615,9 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
        export = fhp->fh_export;
        dentry = fhp->fh_dentry;
 
-       if (S_ISREG(dentry->d_inode->i_mode))
+       if (d_is_reg(dentry))
                map = nfs3_regaccess;
-       else if (S_ISDIR(dentry->d_inode->i_mode))
+       else if (d_is_dir(dentry))
                map = nfs3_diraccess;
        else
                map = nfs3_anyaccess;
@@ -1402,7 +1402,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
 
                switch (createmode) {
                case NFS3_CREATE_UNCHECKED:
-                       if (! S_ISREG(dchild->d_inode->i_mode))
+                       if (! d_is_reg(dchild))
                                goto out;
                        else if (truncp) {
                                /* in nfsv4, we need to treat this case a little
@@ -1615,7 +1615,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        if (err)
                goto out;
        err = nfserr_isdir;
-       if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode))
+       if (d_is_dir(tfhp->fh_dentry))
                goto out;
        err = nfserr_perm;
        if (!len)
index b2e3ff34762070a4b37085c051223809b59e4a6c..ecdbae19a766d914e7d68c005c14356455dc83af 100644 (file)
@@ -31,6 +31,8 @@
 #include "alloc.h"
 #include "dat.h"
 
+static void __nilfs_btree_init(struct nilfs_bmap *bmap);
+
 static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
 {
        struct nilfs_btree_path *path;
@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
        return ret;
 }
 
+/**
+ * nilfs_btree_root_broken - verify consistency of btree root node
+ * @node: btree root node to be examined
+ * @ino: inode number
+ *
+ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ */
+static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+                                  unsigned long ino)
+{
+       int level, flags, nchildren;
+       int ret = 0;
+
+       level = nilfs_btree_node_get_level(node);
+       flags = nilfs_btree_node_get_flags(node);
+       nchildren = nilfs_btree_node_get_nchildren(node);
+
+       if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+                    level > NILFS_BTREE_LEVEL_MAX ||
+                    nchildren < 0 ||
+                    nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+               pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
+                       ino, level, flags, nchildren);
+               ret = 1;
+       }
+       return ret;
+}
+
 int nilfs_btree_broken_node_block(struct buffer_head *bh)
 {
        int ret;
@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
 
        /* convert and insert */
        dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
-       nilfs_btree_init(btree);
+       __nilfs_btree_init(btree);
        if (nreq != NULL) {
                nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
                nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
        .bop_gather_data        =       NULL,
 };
 
-int nilfs_btree_init(struct nilfs_bmap *bmap)
+static void __nilfs_btree_init(struct nilfs_bmap *bmap)
 {
        bmap->b_ops = &nilfs_btree_ops;
        bmap->b_nchildren_per_block =
                NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
-       return 0;
+}
+
+int nilfs_btree_init(struct nilfs_bmap *bmap)
+{
+       int ret = 0;
+
+       __nilfs_btree_init(bmap);
+
+       if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
+                                   bmap->b_inode->i_ino))
+               ret = -EIO;
+       return ret;
 }
 
 void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
index 469086b9f99bc8e20053e492237d48a3373bdb37..0c3f303baf32f196af9360f8f78773f29a31e139 100644 (file)
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
        struct nilfs_inode_info *ii, *n;
+       int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
        int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                brelse(ii->i_bh);
                ii->i_bh = NULL;
                list_del_init(&ii->i_dirty);
-               if (!ii->vfs_inode.i_nlink) {
+               if (!ii->vfs_inode.i_nlink || during_mount) {
                        /*
-                        * Defer calling iput() to avoid a deadlock
-                        * over I_SYNC flag for inodes with i_nlink == 0
+                        * Defer calling iput() to avoid deadlocks if
+                        * i_nlink == 0 or mount is not yet finished.
                         */
                        list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
                        defer_iput = true;
index 51ceb81072847441135303dd752f57e6a97e2646..d2f97ecca6a5dfe6091d56da09871449574524bf 100644 (file)
@@ -115,8 +115,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
                return false;
 
        /* sorry, fanotify only gives a damn about files and dirs */
-       if (!S_ISREG(path->dentry->d_inode->i_mode) &&
-           !S_ISDIR(path->dentry->d_inode->i_mode))
+       if (!d_is_reg(path->dentry) &&
+           !d_can_lookup(path->dentry))
                return false;
 
        if (inode_mark && vfsmnt_mark) {
@@ -139,11 +139,12 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
                BUG();
        }
 
-       if (S_ISDIR(path->dentry->d_inode->i_mode) &&
+       if (d_is_dir(path->dentry) &&
            !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
                return false;
 
-       if (event_mask & marks_mask & ~marks_ignored_mask)
+       if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
+                                ~marks_ignored_mask)
                return true;
 
        return false;
index 8490c64d34fef4fd0421c4bb7e7c7ea9f6d87bca..460c6c37e683f844bd9612d51e6b89224cabb22b 100644 (file)
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
 
 static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
 {
-       if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+       if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
                return 1;
        return 0;
 }
index 20e37a3ed26f3eb721ad45c2699a6332a098cee8..db64ce2d4667be86ca27aecc304c1125667906f6 100644 (file)
                                         | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \
                                         | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \
                                         | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG  \
-                                        | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO)
+                                        | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \
+                                        | OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
 #define OCFS2_FEATURE_RO_COMPAT_SUPP   (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
                                         | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
-                                        | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \
-                                        | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+                                        | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
 
 /*
  * Heartbeat-only devices are missing journals and other files.  The
  */
 #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO     0x4000
 
+/*
+ * Append Direct IO support
+ */
+#define OCFS2_FEATURE_INCOMPAT_APPEND_DIO      0x8000
+
 /*
  * backup superblock flag is used to indicate that this volume
  * has backup superblocks.
 #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA       0x0002
 #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA       0x0004
 
-/*
- * Append Direct IO support
- */
-#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO     0x0008
 
 /* The byte offset of the first backup block will be 1G.
  * The following will be 4G, 16G, 64G, 256G and 1T.
index ea10a87191072339b1352c01f0261da054701a9b..24f640441bd90977a079aac782768025c68f3712 100644 (file)
@@ -191,7 +191,6 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
                ovl_set_timestamps(upperdentry, stat);
 
        return err;
-
 }
 
 static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
@@ -385,7 +384,7 @@ int ovl_copy_up(struct dentry *dentry)
                struct kstat stat;
                enum ovl_path_type type = ovl_path_type(dentry);
 
-               if (type != OVL_PATH_LOWER)
+               if (OVL_TYPE_UPPER(type))
                        break;
 
                next = dget(dentry);
@@ -394,7 +393,7 @@ int ovl_copy_up(struct dentry *dentry)
                        parent = dget_parent(next);
 
                        type = ovl_path_type(parent);
-                       if (type != OVL_PATH_LOWER)
+                       if (OVL_TYPE_UPPER(type))
                                break;
 
                        dput(next);
index 8ffc4b980f1b68641c17a7bfb67979f205c658b5..d139405d2bfad7cfd94c735913ecebf221def5b5 100644 (file)
@@ -19,7 +19,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        int err;
 
        dget(wdentry);
-       if (S_ISDIR(wdentry->d_inode->i_mode))
+       if (d_is_dir(wdentry))
                err = ovl_do_rmdir(wdir, wdentry);
        else
                err = ovl_do_unlink(wdir, wdentry);
@@ -118,14 +118,14 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 
 static int ovl_set_opaque(struct dentry *upperdentry)
 {
-       return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
+       return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
 }
 
 static void ovl_remove_opaque(struct dentry *upperdentry)
 {
        int err;
 
-       err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr);
+       err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
        if (err) {
                pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
                        upperdentry->d_name.name, err);
@@ -152,7 +152,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
         * correct link count.  nlink=1 seems to pacify 'find' and
         * other utilities.
         */
-       if (type == OVL_PATH_MERGE)
+       if (OVL_TYPE_MERGE(type))
                stat->nlink = 1;
 
        return 0;
@@ -506,7 +506,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *opaquedir = NULL;
        int err;
 
-       if (is_dir) {
+       if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
                opaquedir = ovl_check_empty_and_clear(dentry);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir))
@@ -630,7 +630,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                goto out_drop_write;
 
        type = ovl_path_type(dentry);
-       if (type == OVL_PATH_PURE_UPPER) {
+       if (OVL_TYPE_PURE_UPPER(type)) {
                err = ovl_remove_upper(dentry, is_dir);
        } else {
                const struct cred *old_cred;
@@ -693,7 +693,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        bool new_create = false;
        bool cleanup_whiteout = false;
        bool overwrite = !(flags & RENAME_EXCHANGE);
-       bool is_dir = S_ISDIR(old->d_inode->i_mode);
+       bool is_dir = d_is_dir(old);
        bool new_is_dir = false;
        struct dentry *opaquedir = NULL;
        const struct cred *old_cred = NULL;
@@ -712,7 +712,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        /* Don't copy up directory trees */
        old_type = ovl_path_type(old);
        err = -EXDEV;
-       if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir)
+       if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
                goto out;
 
        if (new->d_inode) {
@@ -720,30 +720,30 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                if (err)
                        goto out;
 
-               if (S_ISDIR(new->d_inode->i_mode))
+               if (d_is_dir(new))
                        new_is_dir = true;
 
                new_type = ovl_path_type(new);
                err = -EXDEV;
-               if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir)
+               if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
                        goto out;
 
                err = 0;
-               if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
+               if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
                        if (ovl_dentry_lower(old)->d_inode ==
                            ovl_dentry_lower(new)->d_inode)
                                goto out;
                }
-               if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
+               if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
                        if (ovl_dentry_upper(old)->d_inode ==
                            ovl_dentry_upper(new)->d_inode)
                                goto out;
                }
        } else {
                if (ovl_dentry_is_opaque(new))
-                       new_type = OVL_PATH_UPPER;
+                       new_type = __OVL_PATH_UPPER;
                else
-                       new_type = OVL_PATH_PURE_UPPER;
+                       new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
        }
 
        err = ovl_want_write(old);
@@ -763,8 +763,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                        goto out_drop_write;
        }
 
-       old_opaque = old_type != OVL_PATH_PURE_UPPER;
-       new_opaque = new_type != OVL_PATH_PURE_UPPER;
+       old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
+       new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
 
        if (old_opaque || new_opaque) {
                err = -ENOMEM;
@@ -787,7 +787,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                old_cred = override_creds(override_cred);
        }
 
-       if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
+       if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
                opaquedir = ovl_check_empty_and_clear(new);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir)) {
index 07d74b24913bdee757377d103427883fb7d12e70..04f1248846877d019625c861b474702177b38ae5 100644 (file)
@@ -205,7 +205,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
 
 static bool ovl_is_private_xattr(const char *name)
 {
-       return strncmp(name, "trusted.overlay.", 14) == 0;
+       return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
 }
 
 int ovl_setxattr(struct dentry *dentry, const char *name,
@@ -238,7 +238,10 @@ out:
 static bool ovl_need_xattr_filter(struct dentry *dentry,
                                  enum ovl_path_type type)
 {
-       return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
+       if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
+               return S_ISDIR(dentry->d_inode->i_mode);
+       else
+               return false;
 }
 
 ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
@@ -299,7 +302,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
        if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                goto out_drop_write;
 
-       if (type == OVL_PATH_LOWER) {
+       if (!OVL_TYPE_UPPER(type)) {
                err = vfs_getxattr(realpath.dentry, name, NULL, 0);
                if (err < 0)
                        goto out_drop_write;
@@ -321,7 +324,7 @@ out:
 static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
                                  struct dentry *realdentry)
 {
-       if (type != OVL_PATH_LOWER)
+       if (OVL_TYPE_UPPER(type))
                return false;
 
        if (special_file(realdentry->d_inode->i_mode))
@@ -430,5 +433,4 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
        }
 
        return inode;
-
 }
index 814bed33dd078c00ade8ed534ae19e969530ccdb..17ac5afc9ffbce150d03e352fd7aaa99f101aa0c 100644 (file)
 struct ovl_entry;
 
 enum ovl_path_type {
-       OVL_PATH_PURE_UPPER,
-       OVL_PATH_UPPER,
-       OVL_PATH_MERGE,
-       OVL_PATH_LOWER,
+       __OVL_PATH_PURE         = (1 << 0),
+       __OVL_PATH_UPPER        = (1 << 1),
+       __OVL_PATH_MERGE        = (1 << 2),
 };
 
-extern const char *ovl_opaque_xattr;
+#define OVL_TYPE_UPPER(type)   ((type) & __OVL_PATH_UPPER)
+#define OVL_TYPE_MERGE(type)   ((type) & __OVL_PATH_MERGE)
+#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
+#define OVL_TYPE_MERGE_OR_LOWER(type) \
+       (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
+
+#define OVL_XATTR_PRE_NAME "trusted.overlay."
+#define OVL_XATTR_PRE_LEN  16
+#define OVL_XATTR_OPAQUE   OVL_XATTR_PRE_NAME"opaque"
 
 static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -130,6 +137,7 @@ void ovl_dentry_version_inc(struct dentry *dentry);
 void ovl_path_upper(struct dentry *dentry, struct path *path);
 void ovl_path_lower(struct dentry *dentry, struct path *path);
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
index c0205990a9f54d03f77909555e368efc5a90bd6f..907870e81a72e36f4c5abb29fd34e23b4307efc5 100644 (file)
@@ -24,7 +24,6 @@ struct ovl_cache_entry {
        struct list_head l_node;
        struct rb_node node;
        bool is_whiteout;
-       bool is_cursor;
        char name[];
 };
 
@@ -40,6 +39,7 @@ struct ovl_readdir_data {
        struct rb_root root;
        struct list_head *list;
        struct list_head middle;
+       struct dentry *dir;
        int count;
        int err;
 };
@@ -48,7 +48,7 @@ struct ovl_dir_file {
        bool is_real;
        bool is_upper;
        struct ovl_dir_cache *cache;
-       struct ovl_cache_entry cursor;
+       struct list_head *cursor;
        struct file *realfile;
        struct file *upperfile;
 };
@@ -79,23 +79,49 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
        return NULL;
 }
 
-static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
+static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+                                                  const char *name, int len,
                                                   u64 ino, unsigned int d_type)
 {
        struct ovl_cache_entry *p;
        size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
 
        p = kmalloc(size, GFP_KERNEL);
-       if (p) {
-               memcpy(p->name, name, len);
-               p->name[len] = '\0';
-               p->len = len;
-               p->type = d_type;
-               p->ino = ino;
-               p->is_whiteout = false;
-               p->is_cursor = false;
-       }
+       if (!p)
+               return NULL;
+
+       memcpy(p->name, name, len);
+       p->name[len] = '\0';
+       p->len = len;
+       p->type = d_type;
+       p->ino = ino;
+       p->is_whiteout = false;
+
+       if (d_type == DT_CHR) {
+               struct dentry *dentry;
+               const struct cred *old_cred;
+               struct cred *override_cred;
+
+               override_cred = prepare_creds();
+               if (!override_cred) {
+                       kfree(p);
+                       return NULL;
+               }
+
+               /*
+                * CAP_DAC_OVERRIDE for lookup
+                */
+               cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+               old_cred = override_creds(override_cred);
 
+               dentry = lookup_one_len(name, dir, len);
+               if (!IS_ERR(dentry)) {
+                       p->is_whiteout = ovl_is_whiteout(dentry);
+                       dput(dentry);
+               }
+               revert_creds(old_cred);
+               put_cred(override_cred);
+       }
        return p;
 }
 
@@ -122,7 +148,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
                        return 0;
        }
 
-       p = ovl_cache_entry_new(name, len, ino, d_type);
+       p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
        if (p == NULL)
                return -ENOMEM;
 
@@ -143,7 +169,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
        if (p) {
                list_move_tail(&p->l_node, &rdd->middle);
        } else {
-               p = ovl_cache_entry_new(name, namelen, ino, d_type);
+               p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
                if (p == NULL)
                        rdd->err = -ENOMEM;
                else
@@ -168,7 +194,6 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
 {
        struct ovl_dir_cache *cache = od->cache;
 
-       list_del_init(&od->cursor.l_node);
        WARN_ON(cache->refcount <= 0);
        cache->refcount--;
        if (!cache->refcount) {
@@ -204,6 +229,7 @@ static inline int ovl_dir_read(struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
+       rdd->dir = realpath->dentry;
        rdd->ctx.pos = 0;
        do {
                rdd->count = 0;
@@ -227,108 +253,58 @@ static void ovl_dir_reset(struct file *file)
        if (cache && ovl_dentry_version_get(dentry) != cache->version) {
                ovl_cache_put(od, dentry);
                od->cache = NULL;
+               od->cursor = NULL;
        }
-       WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
-       if (od->is_real && type == OVL_PATH_MERGE)
+       WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
+       if (od->is_real && OVL_TYPE_MERGE(type))
                od->is_real = false;
 }
 
-static int ovl_dir_mark_whiteouts(struct dentry *dir,
-                                 struct ovl_readdir_data *rdd)
-{
-       struct ovl_cache_entry *p;
-       struct dentry *dentry;
-       const struct cred *old_cred;
-       struct cred *override_cred;
-
-       override_cred = prepare_creds();
-       if (!override_cred) {
-               ovl_cache_free(rdd->list);
-               return -ENOMEM;
-       }
-
-       /*
-        * CAP_DAC_OVERRIDE for lookup
-        */
-       cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-       old_cred = override_creds(override_cred);
-
-       mutex_lock(&dir->d_inode->i_mutex);
-       list_for_each_entry(p, rdd->list, l_node) {
-               if (p->is_cursor)
-                       continue;
-
-               if (p->type != DT_CHR)
-                       continue;
-
-               dentry = lookup_one_len(p->name, dir, p->len);
-               if (IS_ERR(dentry))
-                       continue;
-
-               p->is_whiteout = ovl_is_whiteout(dentry);
-               dput(dentry);
-       }
-       mutex_unlock(&dir->d_inode->i_mutex);
-
-       revert_creds(old_cred);
-       put_cred(override_cred);
-
-       return 0;
-}
-
 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
 {
        int err;
-       struct path lowerpath;
-       struct path upperpath;
+       struct path realpath;
        struct ovl_readdir_data rdd = {
                .ctx.actor = ovl_fill_merge,
                .list = list,
                .root = RB_ROOT,
                .is_merge = false,
        };
+       int idx, next;
 
-       ovl_path_lower(dentry, &lowerpath);
-       ovl_path_upper(dentry, &upperpath);
+       for (idx = 0; idx != -1; idx = next) {
+               next = ovl_path_next(idx, dentry, &realpath);
 
-       if (upperpath.dentry) {
-               err = ovl_dir_read(&upperpath, &rdd);
-               if (err)
-                       goto out;
-
-               if (lowerpath.dentry) {
-                       err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
+               if (next != -1) {
+                       err = ovl_dir_read(&realpath, &rdd);
                        if (err)
-                               goto out;
+                               break;
+               } else {
+                       /*
+                        * Insert lowest layer entries before upper ones, this
+                        * allows offsets to be reasonably constant
+                        */
+                       list_add(&rdd.middle, rdd.list);
+                       rdd.is_merge = true;
+                       err = ovl_dir_read(&realpath, &rdd);
+                       list_del(&rdd.middle);
                }
        }
-       if (lowerpath.dentry) {
-               /*
-                * Insert lowerpath entries before upperpath ones, this allows
-                * offsets to be reasonably constant
-                */
-               list_add(&rdd.middle, rdd.list);
-               rdd.is_merge = true;
-               err = ovl_dir_read(&lowerpath, &rdd);
-               list_del(&rdd.middle);
-       }
-out:
        return err;
 }
 
 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
 {
-       struct ovl_cache_entry *p;
+       struct list_head *p;
        loff_t off = 0;
 
-       list_for_each_entry(p, &od->cache->entries, l_node) {
-               if (p->is_cursor)
-                       continue;
+       list_for_each(p, &od->cache->entries) {
                if (off >= pos)
                        break;
                off++;
        }
-       list_move_tail(&od->cursor.l_node, &p->l_node);
+       /* Cursor is safe since the cache is stable */
+       od->cursor = p;
 }
 
 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
@@ -367,6 +343,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
 {
        struct ovl_dir_file *od = file->private_data;
        struct dentry *dentry = file->f_path.dentry;
+       struct ovl_cache_entry *p;
 
        if (!ctx->pos)
                ovl_dir_reset(file);
@@ -385,19 +362,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                ovl_seek_cursor(od, ctx->pos);
        }
 
-       while (od->cursor.l_node.next != &od->cache->entries) {
-               struct ovl_cache_entry *p;
-
-               p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
-               /* Skip cursors */
-               if (!p->is_cursor) {
-                       if (!p->is_whiteout) {
-                               if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
-                                       break;
-                       }
-                       ctx->pos++;
-               }
-               list_move(&od->cursor.l_node, &p->l_node);
+       while (od->cursor != &od->cache->entries) {
+               p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
+               if (!p->is_whiteout)
+                       if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
+                               break;
+               od->cursor = p->l_node.next;
+               ctx->pos++;
        }
        return 0;
 }
@@ -452,7 +423,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        /*
         * Need to check if we started out being a lower dir, but got copied up
         */
-       if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
+       if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
                struct inode *inode = file_inode(file);
 
                realfile = lockless_dereference(od->upperfile);
@@ -516,11 +487,9 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
                kfree(od);
                return PTR_ERR(realfile);
        }
-       INIT_LIST_HEAD(&od->cursor.l_node);
        od->realfile = realfile;
-       od->is_real = (type != OVL_PATH_MERGE);
-       od->is_upper = (type != OVL_PATH_LOWER);
-       od->cursor.is_cursor = true;
+       od->is_real = !OVL_TYPE_MERGE(type);
+       od->is_upper = OVL_TYPE_UPPER(type);
        file->private_data = od;
 
        return 0;
index f16d318b71f8bbe4e77f8a3214e616101848e49c..5f0d1993e6e3952bda9352d231e8fce7dee838e8 100644 (file)
@@ -35,7 +35,8 @@ struct ovl_config {
 /* private information held for overlayfs's superblock */
 struct ovl_fs {
        struct vfsmount *upper_mnt;
-       struct vfsmount *lower_mnt;
+       unsigned numlower;
+       struct vfsmount **lower_mnt;
        struct dentry *workdir;
        long lower_namelen;
        /* pathnames of lower and upper dirs, for show_options */
@@ -47,7 +48,6 @@ struct ovl_dir_cache;
 /* private information held for every overlayfs dentry */
 struct ovl_entry {
        struct dentry *__upperdentry;
-       struct dentry *lowerdentry;
        struct ovl_dir_cache *cache;
        union {
                struct {
@@ -56,30 +56,36 @@ struct ovl_entry {
                };
                struct rcu_head rcu;
        };
+       unsigned numlower;
+       struct path lowerstack[];
 };
 
-const char *ovl_opaque_xattr = "trusted.overlay.opaque";
+#define OVL_MAX_STACK 500
 
+static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
+{
+       return oe->numlower ? oe->lowerstack[0].dentry : NULL;
+}
 
 enum ovl_path_type ovl_path_type(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
+       enum ovl_path_type type = 0;
 
        if (oe->__upperdentry) {
-               if (oe->lowerdentry) {
+               type = __OVL_PATH_UPPER;
+
+               if (oe->numlower) {
                        if (S_ISDIR(dentry->d_inode->i_mode))
-                               return OVL_PATH_MERGE;
-                       else
-                               return OVL_PATH_UPPER;
-               } else {
-                       if (oe->opaque)
-                               return OVL_PATH_UPPER;
-                       else
-                               return OVL_PATH_PURE_UPPER;
+                               type |= __OVL_PATH_MERGE;
+               } else if (!oe->opaque) {
+                       type |= __OVL_PATH_PURE;
                }
        } else {
-               return OVL_PATH_LOWER;
+               if (oe->numlower > 1)
+                       type |= __OVL_PATH_MERGE;
        }
+       return type;
 }
 
 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
@@ -98,10 +104,9 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
 
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
 {
-
        enum ovl_path_type type = ovl_path_type(dentry);
 
-       if (type == OVL_PATH_LOWER)
+       if (!OVL_TYPE_UPPER(type))
                ovl_path_lower(dentry, path);
        else
                ovl_path_upper(dentry, path);
@@ -120,7 +125,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
 
-       return oe->lowerdentry;
+       return __ovl_dentry_lower(oe);
 }
 
 struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -130,7 +135,7 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
 
        realdentry = ovl_upperdentry_dereference(oe);
        if (!realdentry)
-               realdentry = oe->lowerdentry;
+               realdentry = __ovl_dentry_lower(oe);
 
        return realdentry;
 }
@@ -143,7 +148,7 @@ struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
        if (realdentry) {
                *is_upper = true;
        } else {
-               realdentry = oe->lowerdentry;
+               realdentry = __ovl_dentry_lower(oe);
                *is_upper = false;
        }
        return realdentry;
@@ -165,11 +170,9 @@ void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
 
 void ovl_path_lower(struct dentry *dentry, struct path *path)
 {
-       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
        struct ovl_entry *oe = dentry->d_fsdata;
 
-       path->mnt = ofs->lower_mnt;
-       path->dentry = oe->lowerdentry;
+       *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
 }
 
 int ovl_want_write(struct dentry *dentry)
@@ -249,7 +252,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
        if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
                return false;
 
-       res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1);
+       res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
        if (res == 1 && val == 'y')
                return true;
 
@@ -261,8 +264,11 @@ static void ovl_dentry_release(struct dentry *dentry)
        struct ovl_entry *oe = dentry->d_fsdata;
 
        if (oe) {
+               unsigned int i;
+
                dput(oe->__upperdentry);
-               dput(oe->lowerdentry);
+               for (i = 0; i < oe->numlower; i++)
+                       dput(oe->lowerstack[i].dentry);
                kfree_rcu(oe, rcu);
        }
 }
@@ -271,9 +277,15 @@ static const struct dentry_operations ovl_dentry_operations = {
        .d_release = ovl_dentry_release,
 };
 
-static struct ovl_entry *ovl_alloc_entry(void)
+static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
 {
-       return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
+       size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
+       struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
+
+       if (oe)
+               oe->numlower = numlower;
+
+       return oe;
 }
 
 static inline struct dentry *ovl_lookup_real(struct dentry *dir,
@@ -295,82 +307,154 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
        return dentry;
 }
 
+/*
+ * Returns next layer in stack starting from top.
+ * Returns -1 if this is the last layer.
+ */
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       BUG_ON(idx < 0);
+       if (idx == 0) {
+               ovl_path_upper(dentry, path);
+               if (path->dentry)
+                       return oe->numlower ? 1 : -1;
+               idx++;
+       }
+       BUG_ON(idx > oe->numlower);
+       *path = oe->lowerstack[idx - 1];
+
+       return (idx < oe->numlower) ? idx + 1 : -1;
+}
+
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                          unsigned int flags)
 {
        struct ovl_entry *oe;
-       struct dentry *upperdir;
-       struct dentry *lowerdir;
-       struct dentry *upperdentry = NULL;
-       struct dentry *lowerdentry = NULL;
+       struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+       struct path *stack = NULL;
+       struct dentry *upperdir, *upperdentry = NULL;
+       unsigned int ctr = 0;
        struct inode *inode = NULL;
+       bool upperopaque = false;
+       struct dentry *this, *prev = NULL;
+       unsigned int i;
        int err;
 
-       err = -ENOMEM;
-       oe = ovl_alloc_entry();
-       if (!oe)
-               goto out;
-
-       upperdir = ovl_dentry_upper(dentry->d_parent);
-       lowerdir = ovl_dentry_lower(dentry->d_parent);
-
+       upperdir = ovl_upperdentry_dereference(poe);
        if (upperdir) {
-               upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
-               err = PTR_ERR(upperdentry);
-               if (IS_ERR(upperdentry))
-                       goto out_put_dir;
-
-               if (lowerdir && upperdentry) {
-                       if (ovl_is_whiteout(upperdentry)) {
-                               dput(upperdentry);
-                               upperdentry = NULL;
-                               oe->opaque = true;
-                       } else if (ovl_is_opaquedir(upperdentry)) {
-                               oe->opaque = true;
+               this = ovl_lookup_real(upperdir, &dentry->d_name);
+               err = PTR_ERR(this);
+               if (IS_ERR(this))
+                       goto out;
+
+               if (this) {
+                       if (ovl_is_whiteout(this)) {
+                               dput(this);
+                               this = NULL;
+                               upperopaque = true;
+                       } else if (poe->numlower && ovl_is_opaquedir(this)) {
+                               upperopaque = true;
                        }
                }
+               upperdentry = prev = this;
        }
-       if (lowerdir && !oe->opaque) {
-               lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
-               err = PTR_ERR(lowerdentry);
-               if (IS_ERR(lowerdentry))
-                       goto out_dput_upper;
+
+       if (!upperopaque && poe->numlower) {
+               err = -ENOMEM;
+               stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL);
+               if (!stack)
+                       goto out_put_upper;
        }
 
-       if (lowerdentry && upperdentry &&
-           (!S_ISDIR(upperdentry->d_inode->i_mode) ||
-            !S_ISDIR(lowerdentry->d_inode->i_mode))) {
-               dput(lowerdentry);
-               lowerdentry = NULL;
-               oe->opaque = true;
+       for (i = 0; !upperopaque && i < poe->numlower; i++) {
+               bool opaque = false;
+               struct path lowerpath = poe->lowerstack[i];
+
+               this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
+               err = PTR_ERR(this);
+               if (IS_ERR(this)) {
+                       /*
+                        * If it's positive, then treat ENAMETOOLONG as ENOENT.
+                        */
+                       if (err == -ENAMETOOLONG && (upperdentry || ctr))
+                               continue;
+                       goto out_put;
+               }
+               if (!this)
+                       continue;
+               if (ovl_is_whiteout(this)) {
+                       dput(this);
+                       break;
+               }
+               /*
+                * Only makes sense to check opaque dir if this is not the
+                * lowermost layer.
+                */
+               if (i < poe->numlower - 1 && ovl_is_opaquedir(this))
+                       opaque = true;
+
+               if (prev && (!S_ISDIR(prev->d_inode->i_mode) ||
+                            !S_ISDIR(this->d_inode->i_mode))) {
+                       /*
+                        * FIXME: check for upper-opaqueness maybe better done
+                        * in remove code.
+                        */
+                       if (prev == upperdentry)
+                               upperopaque = true;
+                       dput(this);
+                       break;
+               }
+               /*
+                * If this is a non-directory then stop here.
+                */
+               if (!S_ISDIR(this->d_inode->i_mode))
+                       opaque = true;
+
+               stack[ctr].dentry = this;
+               stack[ctr].mnt = lowerpath.mnt;
+               ctr++;
+               prev = this;
+               if (opaque)
+                       break;
        }
 
-       if (lowerdentry || upperdentry) {
+       oe = ovl_alloc_entry(ctr);
+       err = -ENOMEM;
+       if (!oe)
+               goto out_put;
+
+       if (upperdentry || ctr) {
                struct dentry *realdentry;
 
-               realdentry = upperdentry ? upperdentry : lowerdentry;
+               realdentry = upperdentry ? upperdentry : stack[0].dentry;
+
                err = -ENOMEM;
                inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
                                      oe);
                if (!inode)
-                       goto out_dput;
+                       goto out_free_oe;
                ovl_copyattr(realdentry->d_inode, inode);
        }
 
+       oe->opaque = upperopaque;
        oe->__upperdentry = upperdentry;
-       oe->lowerdentry = lowerdentry;
-
+       memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+       kfree(stack);
        dentry->d_fsdata = oe;
        d_add(dentry, inode);
 
        return NULL;
 
-out_dput:
-       dput(lowerdentry);
-out_dput_upper:
-       dput(upperdentry);
-out_put_dir:
+out_free_oe:
        kfree(oe);
+out_put:
+       for (i = 0; i < ctr; i++)
+               dput(stack[i].dentry);
+       kfree(stack);
+out_put_upper:
+       dput(upperdentry);
 out:
        return ERR_PTR(err);
 }
@@ -383,10 +467,12 @@ struct file *ovl_path_open(struct path *path, int flags)
 static void ovl_put_super(struct super_block *sb)
 {
        struct ovl_fs *ufs = sb->s_fs_info;
+       unsigned i;
 
        dput(ufs->workdir);
        mntput(ufs->upper_mnt);
-       mntput(ufs->lower_mnt);
+       for (i = 0; i < ufs->numlower; i++)
+               mntput(ufs->lower_mnt[i]);
 
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
@@ -400,7 +486,7 @@ static void ovl_put_super(struct super_block *sb)
  * @buf: The struct kstatfs to fill in with stats
  *
  * Get the filesystem statistics.  As writes always target the upper layer
- * filesystem pass the statfs to the same filesystem.
+ * filesystem pass the statfs to the upper filesystem (if it exists)
  */
 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -409,7 +495,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct path path;
        int err;
 
-       ovl_path_upper(root_dentry, &path);
+       ovl_path_real(root_dentry, &path);
 
        err = vfs_statfs(&path, buf);
        if (!err) {
@@ -432,8 +518,20 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        struct ovl_fs *ufs = sb->s_fs_info;
 
        seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
-       seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
-       seq_printf(m, ",workdir=%s", ufs->config.workdir);
+       if (ufs->config.upperdir) {
+               seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
+               seq_printf(m, ",workdir=%s", ufs->config.workdir);
+       }
+       return 0;
+}
+
+static int ovl_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct ovl_fs *ufs = sb->s_fs_info;
+
+       if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
+               return -EROFS;
+
        return 0;
 }
 
@@ -441,6 +539,7 @@ static const struct super_operations ovl_super_operations = {
        .put_super      = ovl_put_super,
        .statfs         = ovl_statfs,
        .show_options   = ovl_show_options,
+       .remount_fs     = ovl_remount,
 };
 
 enum {
@@ -515,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                        break;
 
                default:
+                       pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
                        return -EINVAL;
                }
        }
+
+       /* Workdir is useless in non-upper mount */
+       if (!config->upperdir && config->workdir) {
+               pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+                       config->workdir);
+               kfree(config->workdir);
+               config->workdir = NULL;
+       }
+
        return 0;
 }
 
@@ -585,24 +694,6 @@ static void ovl_unescape(char *s)
        }
 }
 
-static int ovl_mount_dir(const char *name, struct path *path)
-{
-       int err;
-       char *tmp = kstrdup(name, GFP_KERNEL);
-
-       if (!tmp)
-               return -ENOMEM;
-
-       ovl_unescape(tmp);
-       err = kern_path(tmp, LOOKUP_FOLLOW, path);
-       if (err) {
-               pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
-               err = -EINVAL;
-       }
-       kfree(tmp);
-       return err;
-}
-
 static bool ovl_is_allowed_fs_type(struct dentry *root)
 {
        const struct dentry_operations *dop = root->d_op;
@@ -622,6 +713,75 @@ static bool ovl_is_allowed_fs_type(struct dentry *root)
        return true;
 }
 
+static int ovl_mount_dir_noesc(const char *name, struct path *path)
+{
+       int err = -EINVAL;
+
+       if (!*name) {
+               pr_err("overlayfs: empty lowerdir\n");
+               goto out;
+       }
+       err = kern_path(name, LOOKUP_FOLLOW, path);
+       if (err) {
+               pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+               goto out;
+       }
+       err = -EINVAL;
+       if (!ovl_is_allowed_fs_type(path->dentry)) {
+               pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+               goto out_put;
+       }
+       if (!S_ISDIR(path->dentry->d_inode->i_mode)) {
+               pr_err("overlayfs: '%s' not a directory\n", name);
+               goto out_put;
+       }
+       return 0;
+
+out_put:
+       path_put(path);
+out:
+       return err;
+}
+
+static int ovl_mount_dir(const char *name, struct path *path)
+{
+       int err = -ENOMEM;
+       char *tmp = kstrdup(name, GFP_KERNEL);
+
+       if (tmp) {
+               ovl_unescape(tmp);
+               err = ovl_mount_dir_noesc(tmp, path);
+               kfree(tmp);
+       }
+       return err;
+}
+
+static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
+                        int *stack_depth)
+{
+       int err;
+       struct kstatfs statfs;
+
+       err = ovl_mount_dir_noesc(name, path);
+       if (err)
+               goto out;
+
+       err = vfs_statfs(path, &statfs);
+       if (err) {
+               pr_err("overlayfs: statfs failed on '%s'\n", name);
+               goto out_put;
+       }
+       *namelen = max(*namelen, statfs.f_namelen);
+       *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
+
+       return 0;
+
+out_put:
+       path_put(path);
+out:
+       return err;
+}
+
 /* Workdir should not be subdir of upperdir and vice versa */
 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
 {
@@ -634,16 +794,39 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
        return ok;
 }
 
+static unsigned int ovl_split_lowerdirs(char *str)
+{
+       unsigned int ctr = 1;
+       char *s, *d;
+
+       for (s = d = str;; s++, d++) {
+               if (*s == '\\') {
+                       s++;
+               } else if (*s == ':') {
+                       *d = '\0';
+                       ctr++;
+                       continue;
+               }
+               *d = *s;
+               if (!*s)
+                       break;
+       }
+       return ctr;
+}
+
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
-       struct path lowerpath;
-       struct path upperpath;
-       struct path workpath;
-       struct inode *root_inode;
+       struct path upperpath = { NULL, NULL };
+       struct path workpath = { NULL, NULL };
        struct dentry *root_dentry;
        struct ovl_entry *oe;
        struct ovl_fs *ufs;
-       struct kstatfs statfs;
+       struct path *stack = NULL;
+       char *lowertmp;
+       char *lower;
+       unsigned int numlower;
+       unsigned int stacklen = 0;
+       unsigned int i;
        int err;
 
        err = -ENOMEM;
@@ -655,123 +838,147 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (err)
                goto out_free_config;
 
-       /* FIXME: workdir is not needed for a R/O mount */
        err = -EINVAL;
-       if (!ufs->config.upperdir || !ufs->config.lowerdir ||
-           !ufs->config.workdir) {
-               pr_err("overlayfs: missing upperdir or lowerdir or workdir\n");
+       if (!ufs->config.lowerdir) {
+               pr_err("overlayfs: missing 'lowerdir'\n");
                goto out_free_config;
        }
 
-       err = -ENOMEM;
-       oe = ovl_alloc_entry();
-       if (oe == NULL)
-               goto out_free_config;
-
-       err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
-       if (err)
-               goto out_free_oe;
+       sb->s_stack_depth = 0;
+       if (ufs->config.upperdir) {
+               if (!ufs->config.workdir) {
+                       pr_err("overlayfs: missing 'workdir'\n");
+                       goto out_free_config;
+               }
 
-       err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath);
-       if (err)
-               goto out_put_upperpath;
+               err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
+               if (err)
+                       goto out_free_config;
 
-       err = ovl_mount_dir(ufs->config.workdir, &workpath);
-       if (err)
-               goto out_put_lowerpath;
+               /* Upper fs should not be r/o */
+               if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) {
+                       pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
+                       err = -EINVAL;
+                       goto out_put_upperpath;
+               }
 
-       err = -EINVAL;
-       if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
-           !S_ISDIR(lowerpath.dentry->d_inode->i_mode) ||
-           !S_ISDIR(workpath.dentry->d_inode->i_mode)) {
-               pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n");
-               goto out_put_workpath;
-       }
+               err = ovl_mount_dir(ufs->config.workdir, &workpath);
+               if (err)
+                       goto out_put_upperpath;
 
-       if (upperpath.mnt != workpath.mnt) {
-               pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
-               goto out_put_workpath;
+               err = -EINVAL;
+               if (upperpath.mnt != workpath.mnt) {
+                       pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+                       goto out_put_workpath;
+               }
+               if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
+                       pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+                       goto out_put_workpath;
+               }
+               sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth;
        }
-       if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
-               pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+       err = -ENOMEM;
+       lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL);
+       if (!lowertmp)
                goto out_put_workpath;
-       }
 
-       if (!ovl_is_allowed_fs_type(upperpath.dentry)) {
-               pr_err("overlayfs: filesystem of upperdir is not supported\n");
-               goto out_put_workpath;
+       err = -EINVAL;
+       stacklen = ovl_split_lowerdirs(lowertmp);
+       if (stacklen > OVL_MAX_STACK) {
+               pr_err("overlayfs: too many lower directries, limit is %d\n",
+                      OVL_MAX_STACK);
+               goto out_free_lowertmp;
+       } else if (!ufs->config.upperdir && stacklen == 1) {
+               pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
+               goto out_free_lowertmp;
        }
 
-       if (!ovl_is_allowed_fs_type(lowerpath.dentry)) {
-               pr_err("overlayfs: filesystem of lowerdir is not supported\n");
-               goto out_put_workpath;
-       }
+       stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
+       if (!stack)
+               goto out_free_lowertmp;
 
-       err = vfs_statfs(&lowerpath, &statfs);
-       if (err) {
-               pr_err("overlayfs: statfs failed on lowerpath\n");
-               goto out_put_workpath;
-       }
-       ufs->lower_namelen = statfs.f_namelen;
+       lower = lowertmp;
+       for (numlower = 0; numlower < stacklen; numlower++) {
+               err = ovl_lower_dir(lower, &stack[numlower],
+                                   &ufs->lower_namelen, &sb->s_stack_depth);
+               if (err)
+                       goto out_put_lowerpath;
 
-       sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
-                               lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
+               lower = strchr(lower, '\0') + 1;
+       }
 
        err = -EINVAL;
+       sb->s_stack_depth++;
        if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
                pr_err("overlayfs: maximum fs stacking depth exceeded\n");
-               goto out_put_workpath;
+               goto out_put_lowerpath;
        }
 
-       ufs->upper_mnt = clone_private_mount(&upperpath);
-       err = PTR_ERR(ufs->upper_mnt);
-       if (IS_ERR(ufs->upper_mnt)) {
-               pr_err("overlayfs: failed to clone upperpath\n");
-               goto out_put_workpath;
-       }
+       if (ufs->config.upperdir) {
+               ufs->upper_mnt = clone_private_mount(&upperpath);
+               err = PTR_ERR(ufs->upper_mnt);
+               if (IS_ERR(ufs->upper_mnt)) {
+                       pr_err("overlayfs: failed to clone upperpath\n");
+                       goto out_put_lowerpath;
+               }
 
-       ufs->lower_mnt = clone_private_mount(&lowerpath);
-       err = PTR_ERR(ufs->lower_mnt);
-       if (IS_ERR(ufs->lower_mnt)) {
-               pr_err("overlayfs: failed to clone lowerpath\n");
-               goto out_put_upper_mnt;
+               ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
+               err = PTR_ERR(ufs->workdir);
+               if (IS_ERR(ufs->workdir)) {
+                       pr_err("overlayfs: failed to create directory %s/%s\n",
+                              ufs->config.workdir, OVL_WORKDIR_NAME);
+                       goto out_put_upper_mnt;
+               }
        }
 
-       ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
-       err = PTR_ERR(ufs->workdir);
-       if (IS_ERR(ufs->workdir)) {
-               pr_err("overlayfs: failed to create directory %s/%s\n",
-                      ufs->config.workdir, OVL_WORKDIR_NAME);
-               goto out_put_lower_mnt;
-       }
+       err = -ENOMEM;
+       ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL);
+       if (ufs->lower_mnt == NULL)
+               goto out_put_workdir;
+       for (i = 0; i < numlower; i++) {
+               struct vfsmount *mnt = clone_private_mount(&stack[i]);
 
-       /*
-        * Make lower_mnt R/O.  That way fchmod/fchown on lower file
-        * will fail instead of modifying lower fs.
-        */
-       ufs->lower_mnt->mnt_flags |= MNT_READONLY;
+               err = PTR_ERR(mnt);
+               if (IS_ERR(mnt)) {
+                       pr_err("overlayfs: failed to clone lowerpath\n");
+                       goto out_put_lower_mnt;
+               }
+               /*
+                * Make lower_mnt R/O.  That way fchmod/fchown on lower file
+                * will fail instead of modifying lower fs.
+                */
+               mnt->mnt_flags |= MNT_READONLY;
+
+               ufs->lower_mnt[ufs->numlower] = mnt;
+               ufs->numlower++;
+       }
 
-       /* If the upper fs is r/o, we mark overlayfs r/o too */
-       if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
+       /* If the upper fs is nonexistent, we mark overlayfs r/o too */
+       if (!ufs->upper_mnt)
                sb->s_flags |= MS_RDONLY;
 
        sb->s_d_op = &ovl_dentry_operations;
 
        err = -ENOMEM;
-       root_inode = ovl_new_inode(sb, S_IFDIR, oe);
-       if (!root_inode)
-               goto out_put_workdir;
+       oe = ovl_alloc_entry(numlower);
+       if (!oe)
+               goto out_put_lower_mnt;
 
-       root_dentry = d_make_root(root_inode);
+       root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
        if (!root_dentry)
-               goto out_put_workdir;
+               goto out_free_oe;
 
        mntput(upperpath.mnt);
-       mntput(lowerpath.mnt);
+       for (i = 0; i < numlower; i++)
+               mntput(stack[i].mnt);
        path_put(&workpath);
+       kfree(lowertmp);
 
        oe->__upperdentry = upperpath.dentry;
-       oe->lowerdentry = lowerpath.dentry;
+       for (i = 0; i < numlower; i++) {
+               oe->lowerstack[i].dentry = stack[i].dentry;
+               oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+       }
 
        root_dentry->d_fsdata = oe;
 
@@ -782,20 +989,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        return 0;
 
+out_free_oe:
+       kfree(oe);
+out_put_lower_mnt:
+       for (i = 0; i < ufs->numlower; i++)
+               mntput(ufs->lower_mnt[i]);
+       kfree(ufs->lower_mnt);
 out_put_workdir:
        dput(ufs->workdir);
-out_put_lower_mnt:
-       mntput(ufs->lower_mnt);
 out_put_upper_mnt:
        mntput(ufs->upper_mnt);
+out_put_lowerpath:
+       for (i = 0; i < numlower; i++)
+               path_put(&stack[i]);
+       kfree(stack);
+out_free_lowertmp:
+       kfree(lowertmp);
 out_put_workpath:
        path_put(&workpath);
-out_put_lowerpath:
-       path_put(&lowerpath);
 out_put_upperpath:
        path_put(&upperpath);
-out_free_oe:
-       kfree(oe);
 out_free_config:
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
index 0855f772cd41599d6c1d1091e7da616d32cccf53..3a48bb789c9f3e4eb227214f72798b90c52af5dc 100644 (file)
@@ -564,13 +564,11 @@ posix_acl_create(struct inode *dir, umode_t *mode,
 
        *acl = posix_acl_clone(p, GFP_NOFS);
        if (!*acl)
-               return -ENOMEM;
+               goto no_mem;
 
        ret = posix_acl_create_masq(*acl, mode);
-       if (ret < 0) {
-               posix_acl_release(*acl);
-               return -ENOMEM;
-       }
+       if (ret < 0)
+               goto no_mem_clone;
 
        if (ret == 0) {
                posix_acl_release(*acl);
@@ -591,6 +589,12 @@ no_acl:
        *default_acl = NULL;
        *acl = NULL;
        return 0;
+
+no_mem_clone:
+       posix_acl_release(*acl);
+no_mem:
+       posix_acl_release(p);
+       return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(posix_acl_create);
 
@@ -772,7 +776,7 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
 
        if (!IS_POSIXACL(dentry->d_inode))
                return -EOPNOTSUPP;
-       if (S_ISLNK(dentry->d_inode->i_mode))
+       if (d_is_symlink(dentry))
                return -EOPNOTSUPP;
 
        acl = get_acl(dentry->d_inode, type);
@@ -832,7 +836,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size,
 
        if (!IS_POSIXACL(dentry->d_inode))
                return -EOPNOTSUPP;
-       if (S_ISLNK(dentry->d_inode->i_mode))
+       if (d_is_symlink(dentry))
                return -EOPNOTSUPP;
 
        if (type == ACL_TYPE_ACCESS)
index 3309f59d421ba6e5b806651dcc1d926143470953..be65b208213518f267d9fe16b5652b5021700cf5 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/mount.h>
 #include <linux/init.h>
 #include <linux/idr.h>
-#include <linux/namei.h>
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum)
        spin_unlock_irqrestore(&proc_inum_lock, flags);
 }
 
-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, __PDE_DATA(dentry->d_inode));
-       return NULL;
-}
-
-static const struct inode_operations proc_link_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = proc_follow_link,
-};
-
 /*
  * Don't create negative dentries here, return -ENOENT by hand
  * instead.
index 13a50a32652dc868ab94084a1e07fb4a809e43da..7697b6621cfd5b13051318ba15920646942ed528 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/mount.h>
 #include <linux/magic.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
 };
 #endif
 
+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+       struct proc_dir_entry *pde = PDE(dentry->d_inode);
+       if (unlikely(!use_pde(pde)))
+               return ERR_PTR(-EINVAL);
+       nd_set_link(nd, pde->data);
+       return pde;
+}
+
+static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+{
+       unuse_pde(p);
+}
+
+const struct inode_operations proc_link_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = proc_follow_link,
+       .put_link       = proc_put_link,
+};
+
 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
        struct inode *inode = new_inode_pseudo(sb);
index 6fcdba573e0fa2471e668e96217f366bed749050..c835b94c0cd3afec0bea4017ca8bacd63b32ff8e 100644 (file)
@@ -200,6 +200,7 @@ struct pde_opener {
        int closing;
        struct completion *c;
 };
+extern const struct inode_operations proc_link_inode_operations;
 
 extern const struct inode_operations proc_pid_link_inode_operations;
 
index 956b75d61809f06cdf433411456bb08278726eb5..6dee68d013ffa69f1f6c9d49873f17f19eb3874d 100644 (file)
@@ -1325,6 +1325,9 @@ out:
 
 static int pagemap_open(struct inode *inode, struct file *file)
 {
+       /* do not disclose physical addresses: attack vector */
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
        pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
                        "to stop being page-shift some time soon. See the "
                        "linux/Documentation/vm/pagemap.txt for details.\n");
index 04b06146bae224f0f9177da58fee0c6fd6d3e747..4e781e697c90bce3b42f0e0097fe46965bbb5258 100644 (file)
@@ -266,7 +266,7 @@ static int reiserfs_for_each_xattr(struct inode *inode,
                for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
                        struct dentry *dentry = buf.dentries[i];
 
-                       if (!S_ISDIR(dentry->d_inode->i_mode))
+                       if (!d_is_dir(dentry))
                                err = action(dentry, data);
 
                        dput(dentry);
@@ -322,7 +322,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
        struct inode *dir = dentry->d_parent->d_inode;
 
        /* This is the xattr dir, handle specially. */
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                return xattr_rmdir(dir, dentry);
 
        return xattr_unlink(dir, dentry);
index 65a53efc1cf4a5d5ce5cb7c6eca39fa28b0d2f7f..2b7dc90ccdbb4ae1ceac7725967cec2a643e21c7 100644 (file)
@@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
        if (!(sc->gfp_mask & __GFP_FS))
                return SHRINK_STOP;
 
-       if (!grab_super_passive(sb))
+       if (!trylock_super(sb))
                return SHRINK_STOP;
 
        if (sb->s_op->nr_cached_objects)
@@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
                freed += sb->s_op->free_cached_objects(sb, sc);
        }
 
-       drop_super(sb);
+       up_read(&sb->s_umount);
        return freed;
 }
 
@@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
        sb = container_of(shrink, struct super_block, s_shrink);
 
        /*
-        * Don't call grab_super_passive as it is a potential
+        * Don't call trylock_super as it is a potential
         * scalability bottleneck. The counts could get updated
         * between super_cache_count and super_cache_scan anyway.
         * Call to super_cache_count with shrinker_rwsem held
@@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
 }
 
 /*
- *     grab_super_passive - acquire a passive reference
+ *     trylock_super - try to grab ->s_umount shared
  *     @sb: reference we are trying to grab
  *
- *     Tries to acquire a passive reference. This is used in places where we
+ *     Try to prevent fs shutdown.  This is used in places where we
  *     cannot take an active reference but we need to ensure that the
- *     superblock does not go away while we are working on it. It returns
- *     false if a reference was not gained, and returns true with the s_umount
- *     lock held in read mode if a reference is gained. On successful return,
- *     the caller must drop the s_umount lock and the passive reference when
- *     done.
+ *     filesystem is not shut down while we are working on it. It returns
+ *     false if we cannot acquire s_umount or if we lose the race and
+ *     filesystem already got into shutdown, and returns true with the s_umount
+ *     lock held in read mode in case of success. On successful return,
+ *     the caller must drop the s_umount lock when done.
+ *
+ *     Note that unlike get_super() et.al. this one does *not* bump ->s_count.
+ *     The reason why it's safe is that we are OK with doing trylock instead
+ *     of down_read().  There's a couple of places that are OK with that, but
+ *     it's very much not a general-purpose interface.
  */
-bool grab_super_passive(struct super_block *sb)
+bool trylock_super(struct super_block *sb)
 {
-       spin_lock(&sb_lock);
-       if (hlist_unhashed(&sb->s_instances)) {
-               spin_unlock(&sb_lock);
-               return false;
-       }
-
-       sb->s_count++;
-       spin_unlock(&sb_lock);
-
        if (down_read_trylock(&sb->s_umount)) {
-               if (sb->s_root && (sb->s_flags & MS_BORN))
+               if (!hlist_unhashed(&sb->s_instances) &&
+                   sb->s_root && (sb->s_flags & MS_BORN))
                        return true;
                up_read(&sb->s_umount);
        }
 
-       put_super(sb);
        return false;
 }
 
index d61799949580a497ccae45883cb2f4c3f8e34495..df6828570e874ae423c48309f51a91d16ce949fd 100644 (file)
@@ -121,3 +121,4 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
 xfs-$(CONFIG_PROC_FS)          += xfs_stats.o
 xfs-$(CONFIG_SYSCTL)           += xfs_sysctl.o
 xfs-$(CONFIG_COMPAT)           += xfs_ioctl32.o
+xfs-$(CONFIG_NFSD_PNFS)                += xfs_pnfs.o
index 5eb4a14e0a0fdc53d45652a3f84b323f040aa2f1..b97359ba2648f12fca1bfa98e25be6efc719c8e3 100644 (file)
@@ -30,6 +30,7 @@
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_log.h"
+#include "xfs_pnfs.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
@@ -245,4 +246,9 @@ const struct export_operations xfs_export_operations = {
        .fh_to_parent           = xfs_fs_fh_to_parent,
        .get_parent             = xfs_fs_get_parent,
        .commit_metadata        = xfs_fs_nfs_commit_metadata,
+#ifdef CONFIG_NFSD_PNFS
+       .get_uuid               = xfs_fs_get_uuid,
+       .map_blocks             = xfs_fs_map_blocks,
+       .commit_blocks          = xfs_fs_commit_blocks,
+#endif
 };
index 1cdba95c78cb3e2475de29e0b6d88df3604e4cdf..a2e1cb8a568bf9d45e32c43539a2e6f8b56d83f4 100644 (file)
@@ -36,6 +36,7 @@
 #include "xfs_trace.h"
 #include "xfs_log.h"
 #include "xfs_icache.h"
+#include "xfs_pnfs.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -396,7 +397,8 @@ STATIC int                          /* error (positive) */
 xfs_zero_last_block(
        struct xfs_inode        *ip,
        xfs_fsize_t             offset,
-       xfs_fsize_t             isize)
+       xfs_fsize_t             isize,
+       bool                    *did_zeroing)
 {
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           last_fsb = XFS_B_TO_FSBT(mp, isize);
@@ -424,6 +426,7 @@ xfs_zero_last_block(
        zero_len = mp->m_sb.sb_blocksize - zero_offset;
        if (isize + zero_len > offset)
                zero_len = offset - isize;
+       *did_zeroing = true;
        return xfs_iozero(ip, isize, zero_len);
 }
 
@@ -442,7 +445,8 @@ int                                 /* error (positive) */
 xfs_zero_eof(
        struct xfs_inode        *ip,
        xfs_off_t               offset,         /* starting I/O offset */
-       xfs_fsize_t             isize)          /* current inode size */
+       xfs_fsize_t             isize,          /* current inode size */
+       bool                    *did_zeroing)
 {
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           start_zero_fsb;
@@ -464,7 +468,7 @@ xfs_zero_eof(
         * We only zero a part of that block so it is handled specially.
         */
        if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
-               error = xfs_zero_last_block(ip, offset, isize);
+               error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
                if (error)
                        return error;
        }
@@ -524,6 +528,7 @@ xfs_zero_eof(
                if (error)
                        return error;
 
+               *did_zeroing = true;
                start_zero_fsb = imap.br_startoff + imap.br_blockcount;
                ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
        }
@@ -554,6 +559,10 @@ restart:
        if (error)
                return error;
 
+       error = xfs_break_layouts(inode, iolock);
+       if (error)
+               return error;
+
        /*
         * If the offset is beyond the size of the file, we need to zero any
         * blocks that fall between the existing EOF and the start of this
@@ -562,13 +571,15 @@ restart:
         * having to redo all checks before.
         */
        if (*pos > i_size_read(inode)) {
+               bool    zero = false;
+
                if (*iolock == XFS_IOLOCK_SHARED) {
                        xfs_rw_iunlock(ip, *iolock);
                        *iolock = XFS_IOLOCK_EXCL;
                        xfs_rw_ilock(ip, *iolock);
                        goto restart;
                }
-               error = xfs_zero_eof(ip, *pos, i_size_read(inode));
+               error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
                if (error)
                        return error;
        }
@@ -822,6 +833,7 @@ xfs_file_fallocate(
        struct xfs_inode        *ip = XFS_I(inode);
        long                    error;
        enum xfs_prealloc_flags flags = 0;
+       uint                    iolock = XFS_IOLOCK_EXCL;
        loff_t                  new_size = 0;
 
        if (!S_ISREG(inode->i_mode))
@@ -830,7 +842,11 @@ xfs_file_fallocate(
                     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
                return -EOPNOTSUPP;
 
-       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       xfs_ilock(ip, iolock);
+       error = xfs_break_layouts(inode, &iolock);
+       if (error)
+               goto out_unlock;
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                error = xfs_free_file_space(ip, offset, len);
                if (error)
@@ -894,7 +910,7 @@ xfs_file_fallocate(
        }
 
 out_unlock:
-       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       xfs_iunlock(ip, iolock);
        return error;
 }
 
index fba6532efba44d0d7114baa24e358716aeb3c922..74efe5b760dcc2907280db8e374afe6f5914d7b3 100644 (file)
@@ -602,6 +602,12 @@ xfs_growfs_data(
        if (!mutex_trylock(&mp->m_growlock))
                return -EWOULDBLOCK;
        error = xfs_growfs_data_private(mp, in);
+       /*
+        * Increment the generation unconditionally, the error could be from
+        * updating the secondary superblocks, in which case the new size
+        * is live already.
+        */
+       mp->m_generation++;
        mutex_unlock(&mp->m_growlock);
        return error;
 }
index daafa1f6d2607722b338c8b5da458a979558d9b3..6163767aa8562f6d611a1442ed4f299aa85bceea 100644 (file)
@@ -2867,6 +2867,10 @@ xfs_rename(
         * Handle RENAME_EXCHANGE flags
         */
        if (flags & RENAME_EXCHANGE) {
+               if (target_ip == NULL) {
+                       error = -EINVAL;
+                       goto error_return;
+               }
                error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
                                         target_dp, target_name, target_ip,
                                         &free_list, &first_block, spaceres);
index 86cd6b39bed7be1dc4bd72be82e9a40b9c8b9825..a1cd55f3f351e1361e2a3ea790f88f5f5070e7e3 100644 (file)
@@ -384,10 +384,11 @@ enum xfs_prealloc_flags {
        XFS_PREALLOC_INVISIBLE  = (1 << 4),
 };
 
-int            xfs_update_prealloc_flags(struct xfs_inode *,
-                       enum xfs_prealloc_flags);
-int            xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
-int            xfs_iozero(struct xfs_inode *, loff_t, size_t);
+int    xfs_update_prealloc_flags(struct xfs_inode *ip,
+                                 enum xfs_prealloc_flags flags);
+int    xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
+                    xfs_fsize_t isize, bool *did_zeroing);
+int    xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
 
 
 #define IHOLD(ip) \
index f7afb86c91487fc0a89c98578a28b3a1dfda83e0..ac4feae45eb308c39629f177c0b6620fae77fb69 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_trans.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -286,7 +287,7 @@ xfs_readlink_by_handle(
                return PTR_ERR(dentry);
 
        /* Restrict this handle operation to symlinks only. */
-       if (!S_ISLNK(dentry->d_inode->i_mode)) {
+       if (!d_is_symlink(dentry)) {
                error = -EINVAL;
                goto out_dput;
        }
@@ -608,6 +609,7 @@ xfs_ioc_space(
 {
        struct iattr            iattr;
        enum xfs_prealloc_flags flags = 0;
+       uint                    iolock = XFS_IOLOCK_EXCL;
        int                     error;
 
        /*
@@ -636,7 +638,10 @@ xfs_ioc_space(
        if (error)
                return error;
 
-       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       xfs_ilock(ip, iolock);
+       error = xfs_break_layouts(inode, &iolock);
+       if (error)
+               goto out_unlock;
 
        switch (bf->l_whence) {
        case 0: /*SEEK_SET*/
@@ -725,7 +730,7 @@ xfs_ioc_space(
        error = xfs_update_prealloc_flags(ip, flags);
 
 out_unlock:
-       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       xfs_iunlock(ip, iolock);
        mnt_drop_write_file(filp);
        return error;
 }
index ce80eeb8faa472fd225571de4df196dd6d689693..e53a903314225c030c45f694b4ffdaa509fa1ce8 100644 (file)
@@ -37,6 +37,7 @@
 #include "xfs_da_btree.h"
 #include "xfs_dir2.h"
 #include "xfs_trans_space.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -505,7 +506,7 @@ xfs_setattr_mode(
        inode->i_mode |= mode & ~S_IFMT;
 }
 
-static void
+void
 xfs_setattr_time(
        struct xfs_inode        *ip,
        struct iattr            *iattr)
@@ -750,6 +751,7 @@ xfs_setattr_size(
        int                     error;
        uint                    lock_flags = 0;
        uint                    commit_flags = 0;
+       bool                    did_zeroing = false;
 
        trace_xfs_setattr(ip);
 
@@ -793,20 +795,16 @@ xfs_setattr_size(
                return error;
 
        /*
-        * Now we can make the changes.  Before we join the inode to the
-        * transaction, take care of the part of the truncation that must be
-        * done without the inode lock.  This needs to be done before joining
-        * the inode to the transaction, because the inode cannot be unlocked
-        * once it is a part of the transaction.
+        * File data changes must be complete before we start the transaction to
+        * modify the inode.  This needs to be done before joining the inode to
+        * the transaction because the inode cannot be unlocked once it is a
+        * part of the transaction.
+        *
+        * Start with zeroing any data block beyond EOF that we may expose on
+        * file extension.
         */
        if (newsize > oldsize) {
-               /*
-                * Do the first part of growing a file: zero any data in the
-                * last block that is beyond the old EOF.  We need to do this
-                * before the inode is joined to the transaction to modify
-                * i_size.
-                */
-               error = xfs_zero_eof(ip, newsize, oldsize);
+               error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
                if (error)
                        return error;
        }
@@ -816,23 +814,18 @@ xfs_setattr_size(
         * any previous writes that are beyond the on disk EOF and the new
         * EOF that have not been written out need to be written here.  If we
         * do not write the data out, we expose ourselves to the null files
-        * problem.
-        *
-        * Only flush from the on disk size to the smaller of the in memory
-        * file size or the new size as that's the range we really care about
-        * here and prevents waiting for other data not within the range we
-        * care about here.
+        * problem. Note that this includes any block zeroing we did above;
+        * otherwise those blocks may not be zeroed after a crash.
         */
-       if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
+       if (newsize > ip->i_d.di_size &&
+           (oldsize != ip->i_d.di_size || did_zeroing)) {
                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                                      ip->i_d.di_size, newsize);
                if (error)
                        return error;
        }
 
-       /*
-        * Wait for all direct I/O to complete.
-        */
+       /* Now wait for all direct I/O to complete. */
        inode_dio_wait(inode);
 
        /*
@@ -979,9 +972,13 @@ xfs_vn_setattr(
        int                     error;
 
        if (iattr->ia_valid & ATTR_SIZE) {
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               error = xfs_setattr_size(ip, iattr);
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+               uint            iolock = XFS_IOLOCK_EXCL;
+
+               xfs_ilock(ip, iolock);
+               error = xfs_break_layouts(dentry->d_inode, &iolock);
+               if (!error)
+                       error = xfs_setattr_size(ip, iattr);
+               xfs_iunlock(ip, iolock);
        } else {
                error = xfs_setattr_nonsize(ip, iattr, 0);
        }
index 1c34e4335920021d5829c5507be2f2c6c6bf60e1..ea7a98e9cb7048820bfa69c5bc294ccbe4102709 100644 (file)
@@ -32,6 +32,7 @@ extern void xfs_setup_inode(struct xfs_inode *);
  */
 #define XFS_ATTR_NOACL         0x01    /* Don't call posix_acl_chmod */
 
+extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
                               int flags);
 extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
index a5b2ff8226535d44443ce08369162f01e1d0cce2..0d8abd6364d97e613d98d4f827814c7eb7beb207 100644 (file)
@@ -174,6 +174,17 @@ typedef struct xfs_mount {
        struct workqueue_struct *m_reclaim_workqueue;
        struct workqueue_struct *m_log_workqueue;
        struct workqueue_struct *m_eofblocks_workqueue;
+
+       /*
+        * Generation of the filesysyem layout.  This is incremented by each
+        * growfs, and used by the pNFS server to ensure the client updates
+        * its view of the block device once it gets a layout that might
+        * reference the newly added blocks.  Does not need to be persistent
+        * as long as we only allow file system size increments, but if we
+        * ever support shrinks it would have to be persisted in addition
+        * to various other kinds of pain inflicted on the pNFS server.
+        */
+       __uint32_t              m_generation;
 } xfs_mount_t;
 
 /*
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
new file mode 100644 (file)
index 0000000..365dd57
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include "xfs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_error.h"
+#include "xfs_iomap.h"
+#include "xfs_shared.h"
+#include "xfs_bit.h"
+#include "xfs_pnfs.h"
+
+/*
+ * Ensure that we do not have any outstanding pNFS layouts that can be used by
+ * clients to directly read from or write to this inode.  This must be called
+ * before every operation that can remove blocks from the extent map.
+ * Additionally we call it during the write operation, where aren't concerned
+ * about exposing unallocated blocks but just want to provide basic
+ * synchronization between a local writer and pNFS clients.  mmap writes would
+ * also benefit from this sort of synchronization, but due to the tricky locking
+ * rules in the page fault path we don't bother.
+ */
+int
+xfs_break_layouts(
+       struct inode            *inode,
+       uint                    *iolock)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       int                     error;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
+
+       while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+               xfs_iunlock(ip, *iolock);
+               error = break_layout(inode, true);
+               *iolock = XFS_IOLOCK_EXCL;
+               xfs_ilock(ip, *iolock);
+       }
+
+       return error;
+}
+
+/*
+ * Get a unique ID including its location so that the client can identify
+ * the exported device.
+ */
+int
+xfs_fs_get_uuid(
+       struct super_block      *sb,
+       u8                      *buf,
+       u32                     *len,
+       u64                     *offset)
+{
+       struct xfs_mount        *mp = XFS_M(sb);
+
+       printk_once(KERN_NOTICE
+"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
+               mp->m_fsname);
+
+       if (*len < sizeof(uuid_t))
+               return -EINVAL;
+
+       memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
+       *len = sizeof(uuid_t);
+       *offset = offsetof(struct xfs_dsb, sb_uuid);
+       return 0;
+}
+
+static void
+xfs_bmbt_to_iomap(
+       struct xfs_inode        *ip,
+       struct iomap            *iomap,
+       struct xfs_bmbt_irec    *imap)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       if (imap->br_startblock == HOLESTARTBLOCK) {
+               iomap->blkno = IOMAP_NULL_BLOCK;
+               iomap->type = IOMAP_HOLE;
+       } else if (imap->br_startblock == DELAYSTARTBLOCK) {
+               iomap->blkno = IOMAP_NULL_BLOCK;
+               iomap->type = IOMAP_DELALLOC;
+       } else {
+               iomap->blkno =
+                       XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
+               if (imap->br_state == XFS_EXT_UNWRITTEN)
+                       iomap->type = IOMAP_UNWRITTEN;
+               else
+                       iomap->type = IOMAP_MAPPED;
+       }
+       iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+       iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
+}
+
+/*
+ * Get a layout for the pNFS client.
+ */
+int
+xfs_fs_map_blocks(
+       struct inode            *inode,
+       loff_t                  offset,
+       u64                     length,
+       struct iomap            *iomap,
+       bool                    write,
+       u32                     *device_generation)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_bmbt_irec    imap;
+       xfs_fileoff_t           offset_fsb, end_fsb;
+       loff_t                  limit;
+       int                     bmapi_flags = XFS_BMAPI_ENTIRE;
+       int                     nimaps = 1;
+       uint                    lock_flags;
+       int                     error = 0;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       /*
+        * We can't export inodes residing on the realtime device.  The realtime
+        * device doesn't have a UUID to identify it, so the client has no way
+        * to find it.
+        */
+       if (XFS_IS_REALTIME_INODE(ip))
+               return -ENXIO;
+
+       /*
+        * Lock out any other I/O before we flush and invalidate the pagecache,
+        * and then hand out a layout to the remote system.  This is very
+        * similar to direct I/O, except that the synchronization is much more
+        * complicated.  See the comment near xfs_break_layouts for a detailed
+        * explanation.
+        */
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       error = -EINVAL;
+       limit = mp->m_super->s_maxbytes;
+       if (!write)
+               limit = max(limit, round_up(i_size_read(inode),
+                                    inode->i_sb->s_blocksize));
+       if (offset > limit)
+               goto out_unlock;
+       if (offset > limit - length)
+               length = limit - offset;
+
+       error = filemap_write_and_wait(inode->i_mapping);
+       if (error)
+               goto out_unlock;
+       error = invalidate_inode_pages2(inode->i_mapping);
+       if (WARN_ON_ONCE(error))
+               return error;
+
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+       lock_flags = xfs_ilock_data_map_shared(ip);
+       error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+                               &imap, &nimaps, bmapi_flags);
+       xfs_iunlock(ip, lock_flags);
+
+       if (error)
+               goto out_unlock;
+
+       if (write) {
+               enum xfs_prealloc_flags flags = 0;
+
+               ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+
+               if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
+                       error = xfs_iomap_write_direct(ip, offset, length,
+                                                      &imap, nimaps);
+                       if (error)
+                               goto out_unlock;
+
+                       /*
+                        * Ensure the next transaction is committed
+                        * synchronously so that the blocks allocated and
+                        * handed out to the client are guaranteed to be
+                        * present even after a server crash.
+                        */
+                       flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
+               }
+
+               error = xfs_update_prealloc_flags(ip, flags);
+               if (error)
+                       goto out_unlock;
+       }
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+
+       xfs_bmbt_to_iomap(ip, iomap, &imap);
+       *device_generation = mp->m_generation;
+       return error;
+out_unlock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+}
+
+/*
+ * Ensure the size update falls into a valid allocated block.
+ */
+static int
+xfs_pnfs_validate_isize(
+       struct xfs_inode        *ip,
+       xfs_off_t               isize)
+{
+       struct xfs_bmbt_irec    imap;
+       int                     nimaps = 1;
+       int                     error = 0;
+
+       xfs_ilock(ip, XFS_ILOCK_SHARED);
+       error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
+                               &imap, &nimaps, 0);
+       xfs_iunlock(ip, XFS_ILOCK_SHARED);
+       if (error)
+               return error;
+
+       if (imap.br_startblock == HOLESTARTBLOCK ||
+           imap.br_startblock == DELAYSTARTBLOCK ||
+           imap.br_state == XFS_EXT_UNWRITTEN)
+               return -EIO;
+       return 0;
+}
+
+/*
+ * Make sure the blocks described by maps are stable on disk.  This includes
+ * converting any unwritten extents, flushing the disk cache and updating the
+ * time stamps.
+ *
+ * Note that we rely on the caller to always send us a timestamp update so that
+ * we always commit a transaction here.  If that stops being true we will have
+ * to manually flush the cache here similar to what the fsync code path does
+ * for datasyncs on files that have no dirty metadata.
+ */
+int
+xfs_fs_commit_blocks(
+       struct inode            *inode,
+       struct iomap            *maps,
+       int                     nr_maps,
+       struct iattr            *iattr)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       bool                    update_isize = false;
+       int                     error, i;
+       loff_t                  size;
+
+       ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
+
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       size = i_size_read(inode);
+       if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
+               update_isize = true;
+               size = iattr->ia_size;
+       }
+
+       for (i = 0; i < nr_maps; i++) {
+               u64 start, length, end;
+
+               start = maps[i].offset;
+               if (start > size)
+                       continue;
+
+               end = start + maps[i].length;
+               if (end > size)
+                       end = size;
+
+               length = end - start;
+               if (!length)
+                       continue;
+       
+               /*
+                * Make sure reads through the pagecache see the new data.
+                */
+               error = invalidate_inode_pages2_range(inode->i_mapping,
+                                       start >> PAGE_CACHE_SHIFT,
+                                       (end - 1) >> PAGE_CACHE_SHIFT);
+               WARN_ON_ONCE(error);
+
+               error = xfs_iomap_write_unwritten(ip, start, length);
+               if (error)
+                       goto out_drop_iolock;
+       }
+
+       if (update_isize) {
+               error = xfs_pnfs_validate_isize(ip, size);
+               if (error)
+                       goto out_drop_iolock;
+       }
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_drop_iolock;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       xfs_setattr_time(ip, iattr);
+       if (update_isize) {
+               i_size_write(inode, iattr->ia_size);
+               ip->i_d.di_size = iattr->ia_size;
+       }
+
+       xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
+
+out_drop_iolock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+}
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
new file mode 100644 (file)
index 0000000..b7fbfce
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _XFS_PNFS_H
+#define _XFS_PNFS_H 1
+
+#ifdef CONFIG_NFSD_PNFS
+int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
+               struct iomap *iomap, bool write, u32 *device_generation);
+int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
+               struct iattr *iattr);
+
+int xfs_break_layouts(struct inode *inode, uint *iolock);
+#else
+static inline int xfs_break_layouts(struct inode *inode, uint *iolock)
+{
+       return 0;
+}
+#endif /* CONFIG_NFSD_PNFS */
+#endif /* _XFS_PNFS_H */
index 53cc2aaf8d2bdfedc12247ae5055feaa95ae31a6..fbbb9e62e274b525a03e7cb1373cae35b49ace1b 100644 (file)
@@ -836,6 +836,11 @@ xfs_qm_reset_dqcounts(
                 */
                xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
                            "xfs_quotacheck");
+               /*
+                * Reset type in case we are reusing group quota file for
+                * project quotas or vice versa
+                */
+               ddq->d_flags = type;
                ddq->d_bcount = 0;
                ddq->d_icount = 0;
                ddq->d_rtbcount = 0;
index a24addfdfcec5568015e4e358402d6afa002e37a..0de6290df4da65f1511658c0ae0bc60bf24679fa 100644 (file)
@@ -68,8 +68,8 @@ struct drm_mm_node {
        unsigned scanned_preceeds_hole : 1;
        unsigned allocated : 1;
        unsigned long color;
-       unsigned long start;
-       unsigned long size;
+       u64 start;
+       u64 size;
        struct drm_mm *mm;
 };
 
@@ -82,16 +82,16 @@ struct drm_mm {
        unsigned int scan_check_range : 1;
        unsigned scan_alignment;
        unsigned long scan_color;
-       unsigned long scan_size;
-       unsigned long scan_hit_start;
-       unsigned long scan_hit_end;
+       u64 scan_size;
+       u64 scan_hit_start;
+       u64 scan_hit_end;
        unsigned scanned_blocks;
-       unsigned long scan_start;
-       unsigned long scan_end;
+       u64 scan_start;
+       u64 scan_end;
        struct drm_mm_node *prev_scanned_node;
 
        void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
-                            unsigned long *start, unsigned long *end);
+                            u64 *start, u64 *end);
 };
 
 /**
@@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
        return mm->hole_stack.next;
 }
 
-static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
        return hole_node->start + hole_node->size;
 }
@@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
  * Returns:
  * Start of the subsequent hole.
  */
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
        BUG_ON(!hole_node->hole_follows);
        return __drm_mm_hole_node_start(hole_node);
 }
 
-static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 {
        return list_entry(hole_node->node_list.next,
                          struct drm_mm_node, node_list)->start;
@@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
  * Returns:
  * End of the subsequent hole.
  */
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 {
        return __drm_mm_hole_node_end(hole_node);
 }
@@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
 
 int drm_mm_insert_node_generic(struct drm_mm *mm,
                               struct drm_mm_node *node,
-                              unsigned long size,
+                              u64 size,
                               unsigned alignment,
                               unsigned long color,
                               enum drm_mm_search_flags sflags,
@@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
  */
 static inline int drm_mm_insert_node(struct drm_mm *mm,
                                     struct drm_mm_node *node,
-                                    unsigned long size,
+                                    u64 size,
                                     unsigned alignment,
                                     enum drm_mm_search_flags flags)
 {
@@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
 
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
                                        struct drm_mm_node *node,
-                                       unsigned long size,
+                                       u64 size,
                                        unsigned alignment,
                                        unsigned long color,
-                                       unsigned long start,
-                                       unsigned long end,
+                                       u64 start,
+                                       u64 end,
                                        enum drm_mm_search_flags sflags,
                                        enum drm_mm_allocator_flags aflags);
 /**
@@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
  */
 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
                                              struct drm_mm_node *node,
-                                             unsigned long size,
+                                             u64 size,
                                              unsigned alignment,
-                                             unsigned long start,
-                                             unsigned long end,
+                                             u64 start,
+                                             u64 end,
                                              enum drm_mm_search_flags flags)
 {
        return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
@@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 void drm_mm_remove_node(struct drm_mm_node *node);
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 void drm_mm_init(struct drm_mm *mm,
-                unsigned long start,
-                unsigned long size);
+                u64 start,
+                u64 size);
 void drm_mm_takedown(struct drm_mm *mm);
 bool drm_mm_clean(struct drm_mm *mm);
 
 void drm_mm_init_scan(struct drm_mm *mm,
-                     unsigned long size,
+                     u64 size,
                      unsigned alignment,
                      unsigned long color);
 void drm_mm_init_scan_with_range(struct drm_mm *mm,
-                                unsigned long size,
+                                u64 size,
                                 unsigned alignment,
                                 unsigned long color,
-                                unsigned long start,
-                                unsigned long end);
+                                u64 start,
+                                u64 end);
 bool drm_mm_scan_add_block(struct drm_mm_node *node);
 bool drm_mm_scan_remove_block(struct drm_mm_node *node);
 
index 180ad0e6de21dd3f0332335f90c122692a553838..d016dc57f0073eede1a5467b798fa35dfa779da5 100644 (file)
        INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
 
 #define _INTEL_BDW_M_IDS(gt, info) \
-       _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
+       _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
        _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
-       _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
+       _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
        _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
 
 #define _INTEL_BDW_D_IDS(gt, info) \
index 0ccf7f267ff94408387d71da446cc89ab1caf6fa..c768ddfbe53ccbbfeedd92809a9d4036adfc1fe2 100644 (file)
@@ -249,7 +249,7 @@ struct ttm_buffer_object {
         * either of these locks held.
         */
 
-       unsigned long offset;
+       uint64_t offset; /* GPU address space is independent of CPU word size */
        uint32_t cur_placement;
 
        struct sg_table *sg;
index 142d752fc450b74b60e964dd6a5c3d12ba38b163..813042cede5728a1bc3e8d463006217d49f35483 100644 (file)
@@ -277,7 +277,7 @@ struct ttm_mem_type_manager {
        bool has_type;
        bool use_type;
        uint32_t flags;
-       unsigned long gpu_offset;
+       uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
        uint64_t size;
        uint32_t available_caching;
        uint32_t default_caching;
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
new file mode 100644 (file)
index 0000000..04e8db2
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ASM9260_H
+#define _DT_BINDINGS_CLK_ASM9260_H
+
+/* ahb gate */
+#define CLKID_AHB_ROM          0
+#define CLKID_AHB_RAM          1
+#define CLKID_AHB_GPIO         2
+#define CLKID_AHB_MAC          3
+#define CLKID_AHB_EMI          4
+#define CLKID_AHB_USB0         5
+#define CLKID_AHB_USB1         6
+#define CLKID_AHB_DMA0         7
+#define CLKID_AHB_DMA1         8
+#define CLKID_AHB_UART0                9
+#define CLKID_AHB_UART1                10
+#define CLKID_AHB_UART2                11
+#define CLKID_AHB_UART3                12
+#define CLKID_AHB_UART4                13
+#define CLKID_AHB_UART5                14
+#define CLKID_AHB_UART6                15
+#define CLKID_AHB_UART7                16
+#define CLKID_AHB_UART8                17
+#define CLKID_AHB_UART9                18
+#define CLKID_AHB_I2S0         19
+#define CLKID_AHB_I2C0         20
+#define CLKID_AHB_I2C1         21
+#define CLKID_AHB_SSP0         22
+#define CLKID_AHB_IOCONFIG     23
+#define CLKID_AHB_WDT          24
+#define CLKID_AHB_CAN0         25
+#define CLKID_AHB_CAN1         26
+#define CLKID_AHB_MPWM         27
+#define CLKID_AHB_SPI0         28
+#define CLKID_AHB_SPI1         29
+#define CLKID_AHB_QEI          30
+#define CLKID_AHB_QUADSPI0     31
+#define CLKID_AHB_CAMIF                32
+#define CLKID_AHB_LCDIF                33
+#define CLKID_AHB_TIMER0       34
+#define CLKID_AHB_TIMER1       35
+#define CLKID_AHB_TIMER2       36
+#define CLKID_AHB_TIMER3       37
+#define CLKID_AHB_IRQ          38
+#define CLKID_AHB_RTC          39
+#define CLKID_AHB_NAND         40
+#define CLKID_AHB_ADC0         41
+#define CLKID_AHB_LED          42
+#define CLKID_AHB_DAC0         43
+#define CLKID_AHB_LCD          44
+#define CLKID_AHB_I2S1         45
+#define CLKID_AHB_MAC1         46
+
+/* devider */
+#define CLKID_SYS_CPU          47
+#define CLKID_SYS_AHB          48
+#define CLKID_SYS_I2S0M                49
+#define CLKID_SYS_I2S0S                50
+#define CLKID_SYS_I2S1M                51
+#define CLKID_SYS_I2S1S                52
+#define CLKID_SYS_UART0                53
+#define CLKID_SYS_UART1                54
+#define CLKID_SYS_UART2                55
+#define CLKID_SYS_UART3                56
+#define CLKID_SYS_UART4                56
+#define CLKID_SYS_UART5                57
+#define CLKID_SYS_UART6                58
+#define CLKID_SYS_UART7                59
+#define CLKID_SYS_UART8                60
+#define CLKID_SYS_UART9                61
+#define CLKID_SYS_SPI0         62
+#define CLKID_SYS_SPI1         63
+#define CLKID_SYS_QUADSPI      64
+#define CLKID_SYS_SSP0         65
+#define CLKID_SYS_NAND         66
+#define CLKID_SYS_TRACE                67
+#define CLKID_SYS_CAMM         68
+#define CLKID_SYS_WDT          69
+#define CLKID_SYS_CLKOUT       70
+#define CLKID_SYS_MAC          71
+#define CLKID_SYS_LCD          72
+#define CLKID_SYS_ADCANA       73
+
+#define MAX_CLKS               74
+#endif
index 34fe28c622d0a6c2cbfbef348be34dfefa2b59f9..c4b1676ea674abb5c6f598525b3f0ca09e39e1e7 100644 (file)
 #define CLK_DIV_MCUISP1                453 /* Exynos4x12 only */
 #define CLK_DIV_ACLK200                454 /* Exynos4x12 only */
 #define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
+#define CLK_DIV_ACP            456
+#define CLK_DIV_DMC            457
+#define CLK_DIV_C2C            458 /* Exynos4x12 only */
+#define CLK_DIV_GDL            459
+#define CLK_DIV_GDR            460
 
 /* must be greater than maximal clock id */
-#define CLK_NR_CLKS            456
+#define CLK_NR_CLKS            461
 
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
index 8e4681b07ae79fcaaf6b9e563ac360da6ac198d0..e33c75a3c09dc7caf26575a7e3469788aa6cc234 100644 (file)
 #define DOUT_SCLK_CC_PLL               4
 #define DOUT_SCLK_MFC_PLL              5
 #define DOUT_ACLK_CCORE_133            6
-#define TOPC_NR_CLK                    7
+#define DOUT_ACLK_MSCL_532             7
+#define ACLK_MSCL_532                  8
+#define DOUT_SCLK_AUD_PLL              9
+#define FOUT_AUD_PLL                   10
+#define TOPC_NR_CLK                    11
 
 /* TOP0 */
 #define DOUT_ACLK_PERIC1               1
 #define CLK_SCLK_UART1                 4
 #define CLK_SCLK_UART2                 5
 #define CLK_SCLK_UART3                 6
-#define TOP0_NR_CLK                    7
+#define CLK_SCLK_SPI0                  7
+#define CLK_SCLK_SPI1                  8
+#define CLK_SCLK_SPI2                  9
+#define CLK_SCLK_SPI3                  10
+#define CLK_SCLK_SPI4                  11
+#define CLK_SCLK_SPDIF                 12
+#define CLK_SCLK_PCM1                  13
+#define CLK_SCLK_I2S1                  14
+#define TOP0_NR_CLK                    15
 
 /* TOP1 */
 #define DOUT_ACLK_FSYS1_200            1
 #define PCLK_HSI2C6                    9
 #define PCLK_HSI2C7                    10
 #define PCLK_HSI2C8                    11
-#define PERIC1_NR_CLK                  12
+#define PCLK_SPI0                      12
+#define PCLK_SPI1                      13
+#define PCLK_SPI2                      14
+#define PCLK_SPI3                      15
+#define PCLK_SPI4                      16
+#define SCLK_SPI0                      17
+#define SCLK_SPI1                      18
+#define SCLK_SPI2                      19
+#define SCLK_SPI3                      20
+#define SCLK_SPI4                      21
+#define PCLK_I2S1                      22
+#define PCLK_PCM1                      23
+#define PCLK_SPDIF                     24
+#define SCLK_I2S1                      25
+#define SCLK_PCM1                      26
+#define SCLK_SPDIF                     27
+#define PERIC1_NR_CLK                  28
 
 /* PERIS */
 #define PCLK_CHIPID                    1
 
 /* FSYS0 */
 #define ACLK_MMC2                      1
-#define FSYS0_NR_CLK                   2
+#define ACLK_AXIUS_USBDRD30X_FSYS0X    2
+#define ACLK_USBDRD300                 3
+#define SCLK_USBDRD300_SUSPENDCLK      4
+#define SCLK_USBDRD300_REFCLK          5
+#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER         6
+#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER            7
+#define OSCCLK_PHY_CLKOUT_USB30_PHY            8
+#define ACLK_PDMA0                     9
+#define ACLK_PDMA1                     10
+#define FSYS0_NR_CLK                   11
 
 /* FSYS1 */
 #define ACLK_MMC1                      1
 #define ACLK_MMC0                      2
 #define FSYS1_NR_CLK                   3
 
+/* MSCL */
+#define USERMUX_ACLK_MSCL_532          1
+#define DOUT_PCLK_MSCL                 2
+#define ACLK_MSCL_0                    3
+#define ACLK_MSCL_1                    4
+#define ACLK_JPEG                      5
+#define ACLK_G2D                       6
+#define ACLK_LH_ASYNC_SI_MSCL_0                7
+#define ACLK_LH_ASYNC_SI_MSCL_1                8
+#define ACLK_AXI2ACEL_BRIDGE           9
+#define ACLK_XIU_MSCLX_0               10
+#define ACLK_XIU_MSCLX_1               11
+#define ACLK_QE_MSCL_0                 12
+#define ACLK_QE_MSCL_1                 13
+#define ACLK_QE_JPEG                   14
+#define ACLK_QE_G2D                    15
+#define ACLK_PPMU_MSCL_0               16
+#define ACLK_PPMU_MSCL_1               17
+#define ACLK_MSCLNP_133                        18
+#define ACLK_AHB2APB_MSCL0P            19
+#define ACLK_AHB2APB_MSCL1P            20
+
+#define PCLK_MSCL_0                    21
+#define PCLK_MSCL_1                    22
+#define PCLK_JPEG                      23
+#define PCLK_G2D                       24
+#define PCLK_QE_MSCL_0                 25
+#define PCLK_QE_MSCL_1                 26
+#define PCLK_QE_JPEG                   27
+#define PCLK_QE_G2D                    28
+#define PCLK_PPMU_MSCL_0               29
+#define PCLK_PPMU_MSCL_1               30
+#define PCLK_AXI2ACEL_BRIDGE           31
+#define PCLK_PMU_MSCL                  32
+#define MSCL_NR_CLK                    33
+
+/* AUD */
+#define SCLK_I2S                       1
+#define SCLK_PCM                       2
+#define PCLK_I2S                       3
+#define PCLK_PCM                       4
+#define ACLK_ADMA                      5
+#define AUD_NR_CLK                     6
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
index b857cadb0bd40ef8c2843693b6fa8c9f7bb11528..04fb29ae30e69801f37713f1284d165396f8525a 100644 (file)
 #define PLL0_VOTE                              221
 #define PLL3                                   222
 #define PLL3_VOTE                              223
-#define PLL4                                   224
 #define PLL4_VOTE                              225
 #define PLL8                                   226
 #define PLL8_VOTE                              227
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
new file mode 100644 (file)
index 0000000..4e944b8
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_LCC_IPQ806X_H
+
+#define PLL4                           0
+#define MI2S_OSR_SRC                   1
+#define MI2S_OSR_CLK                   2
+#define MI2S_DIV_CLK                   3
+#define MI2S_BIT_DIV_CLK               4
+#define MI2S_BIT_CLK                   5
+#define PCM_SRC                                6
+#define PCM_CLK_OUT                    7
+#define PCM_CLK                                8
+#define SPDIF_SRC                      9
+#define SPDIF_CLK                      10
+#define AHBIX_CLK                      11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h
new file mode 100644 (file)
index 0000000..4fb2aa6
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H
+#define _DT_BINDINGS_CLK_LCC_MSM8960_H
+
+#define PLL4                           0
+#define MI2S_OSR_SRC                   1
+#define MI2S_OSR_CLK                   2
+#define MI2S_DIV_CLK                   3
+#define MI2S_BIT_DIV_CLK               4
+#define MI2S_BIT_CLK                   5
+#define PCM_SRC                                6
+#define PCM_CLK_OUT                    7
+#define PCM_CLK                                8
+#define SLIMBUS_SRC                    9
+#define AUDIO_SLIMBUS_CLK              10
+#define SPS_SLIMBUS_CLK                        11
+#define CODEC_I2S_MIC_OSR_SRC          12
+#define CODEC_I2S_MIC_OSR_CLK          13
+#define CODEC_I2S_MIC_DIV_CLK          14
+#define CODEC_I2S_MIC_BIT_DIV_CLK      15
+#define CODEC_I2S_MIC_BIT_CLK          16
+#define SPARE_I2S_MIC_OSR_SRC          17
+#define SPARE_I2S_MIC_OSR_CLK          18
+#define SPARE_I2S_MIC_DIV_CLK          19
+#define SPARE_I2S_MIC_BIT_DIV_CLK      20
+#define SPARE_I2S_MIC_BIT_CLK          21
+#define CODEC_I2S_SPKR_OSR_SRC         22
+#define CODEC_I2S_SPKR_OSR_CLK         23
+#define CODEC_I2S_SPKR_DIV_CLK         24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK     25
+#define CODEC_I2S_SPKR_BIT_CLK         26
+#define SPARE_I2S_SPKR_OSR_SRC         27
+#define SPARE_I2S_SPKR_OSR_CLK         28
+#define SPARE_I2S_SPKR_DIV_CLK         29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK     30
+#define SPARE_I2S_SPKR_BIT_CLK         31
+
+#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
new file mode 100644 (file)
index 0000000..ae2eb17
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-car or
+ * nvidia,tegra132-car.
+ *
+ * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
+ * registers. These IDs often match those in the CAR's RST_DEVICES registers,
+ * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
+ * this case, those clocks are assigned IDs above 185 in order to highlight
+ * this issue. Implementations that interpret these clock IDs as bit values
+ * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
+ * explicitly handle these special cases.
+ *
+ * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
+ * above.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+
+/* 0 */
+/* 1 */
+/* 2 */
+#define TEGRA124_CLK_ISPB 3
+#define TEGRA124_CLK_RTC 4
+#define TEGRA124_CLK_TIMER 5
+#define TEGRA124_CLK_UARTA 6
+/* 7 (register bit affects uartb and vfir) */
+/* 8 */
+#define TEGRA124_CLK_SDMMC2 9
+/* 10 (register bit affects spdif_in and spdif_out) */
+#define TEGRA124_CLK_I2S1 11
+#define TEGRA124_CLK_I2C1 12
+/* 13 */
+#define TEGRA124_CLK_SDMMC1 14
+#define TEGRA124_CLK_SDMMC4 15
+/* 16 */
+#define TEGRA124_CLK_PWM 17
+#define TEGRA124_CLK_I2S2 18
+/* 20 (register bit affects vi and vi_sensor) */
+/* 21 */
+#define TEGRA124_CLK_USBD 22
+#define TEGRA124_CLK_ISP 23
+/* 26 */
+/* 25 */
+#define TEGRA124_CLK_DISP2 26
+#define TEGRA124_CLK_DISP1 27
+#define TEGRA124_CLK_HOST1X 28
+#define TEGRA124_CLK_VCP 29
+#define TEGRA124_CLK_I2S0 30
+/* 31 */
+
+#define TEGRA124_CLK_MC 32
+/* 33 */
+#define TEGRA124_CLK_APBDMA 34
+/* 35 */
+#define TEGRA124_CLK_KBC 36
+/* 37 */
+/* 38 */
+/* 39 (register bit affects fuse and fuse_burn) */
+#define TEGRA124_CLK_KFUSE 40
+#define TEGRA124_CLK_SBC1 41
+#define TEGRA124_CLK_NOR 42
+/* 43 */
+#define TEGRA124_CLK_SBC2 44
+/* 45 */
+#define TEGRA124_CLK_SBC3 46
+#define TEGRA124_CLK_I2C5 47
+#define TEGRA124_CLK_DSIA 48
+/* 49 */
+#define TEGRA124_CLK_MIPI 50
+#define TEGRA124_CLK_HDMI 51
+#define TEGRA124_CLK_CSI 52
+/* 53 */
+#define TEGRA124_CLK_I2C2 54
+#define TEGRA124_CLK_UARTC 55
+#define TEGRA124_CLK_MIPI_CAL 56
+#define TEGRA124_CLK_EMC 57
+#define TEGRA124_CLK_USB2 58
+#define TEGRA124_CLK_USB3 59
+/* 60 */
+#define TEGRA124_CLK_VDE 61
+#define TEGRA124_CLK_BSEA 62
+#define TEGRA124_CLK_BSEV 63
+
+/* 64 */
+#define TEGRA124_CLK_UARTD 65
+/* 66 */
+#define TEGRA124_CLK_I2C3 67
+#define TEGRA124_CLK_SBC4 68
+#define TEGRA124_CLK_SDMMC3 69
+#define TEGRA124_CLK_PCIE 70
+#define TEGRA124_CLK_OWR 71
+#define TEGRA124_CLK_AFI 72
+#define TEGRA124_CLK_CSITE 73
+/* 74 */
+/* 75 */
+#define TEGRA124_CLK_LA 76
+#define TEGRA124_CLK_TRACE 77
+#define TEGRA124_CLK_SOC_THERM 78
+#define TEGRA124_CLK_DTV 79
+/* 80 */
+#define TEGRA124_CLK_I2CSLOW 81
+#define TEGRA124_CLK_DSIB 82
+#define TEGRA124_CLK_TSEC 83
+/* 84 */
+/* 85 */
+/* 86 */
+/* 87 */
+/* 88 */
+#define TEGRA124_CLK_XUSB_HOST 89
+/* 90 */
+#define TEGRA124_CLK_MSENC 91
+#define TEGRA124_CLK_CSUS 92
+/* 93 */
+/* 94 */
+/* 95 (bit affects xusb_dev and xusb_dev_src) */
+
+/* 96 */
+/* 97 */
+/* 98 */
+#define TEGRA124_CLK_MSELECT 99
+#define TEGRA124_CLK_TSENSOR 100
+#define TEGRA124_CLK_I2S3 101
+#define TEGRA124_CLK_I2S4 102
+#define TEGRA124_CLK_I2C4 103
+#define TEGRA124_CLK_SBC5 104
+#define TEGRA124_CLK_SBC6 105
+#define TEGRA124_CLK_D_AUDIO 106
+#define TEGRA124_CLK_APBIF 107
+#define TEGRA124_CLK_DAM0 108
+#define TEGRA124_CLK_DAM1 109
+#define TEGRA124_CLK_DAM2 110
+#define TEGRA124_CLK_HDA2CODEC_2X 111
+/* 112 */
+#define TEGRA124_CLK_AUDIO0_2X 113
+#define TEGRA124_CLK_AUDIO1_2X 114
+#define TEGRA124_CLK_AUDIO2_2X 115
+#define TEGRA124_CLK_AUDIO3_2X 116
+#define TEGRA124_CLK_AUDIO4_2X 117
+#define TEGRA124_CLK_SPDIF_2X 118
+#define TEGRA124_CLK_ACTMON 119
+#define TEGRA124_CLK_EXTERN1 120
+#define TEGRA124_CLK_EXTERN2 121
+#define TEGRA124_CLK_EXTERN3 122
+#define TEGRA124_CLK_SATA_OOB 123
+#define TEGRA124_CLK_SATA 124
+#define TEGRA124_CLK_HDA 125
+/* 126 */
+#define TEGRA124_CLK_SE 127
+
+#define TEGRA124_CLK_HDA2HDMI 128
+#define TEGRA124_CLK_SATA_COLD 129
+/* 130 */
+/* 131 */
+/* 132 */
+/* 133 */
+/* 134 */
+/* 135 */
+/* 136 */
+/* 137 */
+/* 138 */
+/* 139 */
+/* 140 */
+/* 141 */
+/* 142 */
+/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
+/*      xusb_host_src and xusb_ss_src) */
+#define TEGRA124_CLK_CILAB 144
+#define TEGRA124_CLK_CILCD 145
+#define TEGRA124_CLK_CILE 146
+#define TEGRA124_CLK_DSIALP 147
+#define TEGRA124_CLK_DSIBLP 148
+#define TEGRA124_CLK_ENTROPY 149
+#define TEGRA124_CLK_DDS 150
+/* 151 */
+#define TEGRA124_CLK_DP2 152
+#define TEGRA124_CLK_AMX 153
+#define TEGRA124_CLK_ADX 154
+/* 155 (bit affects dfll_ref and dfll_soc) */
+#define TEGRA124_CLK_XUSB_SS 156
+/* 157 */
+/* 158 */
+/* 159 */
+
+/* 160 */
+/* 161 */
+/* 162 */
+/* 163 */
+/* 164 */
+/* 165 */
+#define TEGRA124_CLK_I2C6 166
+/* 167 */
+/* 168 */
+/* 169 */
+/* 170 */
+#define TEGRA124_CLK_VIM2_CLK 171
+/* 172 */
+/* 173 */
+/* 174 */
+/* 175 */
+#define TEGRA124_CLK_HDMI_AUDIO 176
+#define TEGRA124_CLK_CLK72MHZ 177
+#define TEGRA124_CLK_VIC03 178
+/* 179 */
+#define TEGRA124_CLK_ADX1 180
+#define TEGRA124_CLK_DPAUX 181
+#define TEGRA124_CLK_SOR0 182
+/* 183 */
+#define TEGRA124_CLK_GPU 184
+#define TEGRA124_CLK_AMX1 185
+/* 186 */
+/* 187 */
+/* 188 */
+/* 189 */
+/* 190 */
+/* 191 */
+#define TEGRA124_CLK_UARTB 192
+#define TEGRA124_CLK_VFIR 193
+#define TEGRA124_CLK_SPDIF_IN 194
+#define TEGRA124_CLK_SPDIF_OUT 195
+#define TEGRA124_CLK_VI 196
+#define TEGRA124_CLK_VI_SENSOR 197
+#define TEGRA124_CLK_FUSE 198
+#define TEGRA124_CLK_FUSE_BURN 199
+#define TEGRA124_CLK_CLK_32K 200
+#define TEGRA124_CLK_CLK_M 201
+#define TEGRA124_CLK_CLK_M_DIV2 202
+#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_PLL_REF 204
+#define TEGRA124_CLK_PLL_C 205
+#define TEGRA124_CLK_PLL_C_OUT1 206
+#define TEGRA124_CLK_PLL_C2 207
+#define TEGRA124_CLK_PLL_C3 208
+#define TEGRA124_CLK_PLL_M 209
+#define TEGRA124_CLK_PLL_M_OUT1 210
+#define TEGRA124_CLK_PLL_P 211
+#define TEGRA124_CLK_PLL_P_OUT1 212
+#define TEGRA124_CLK_PLL_P_OUT2 213
+#define TEGRA124_CLK_PLL_P_OUT3 214
+#define TEGRA124_CLK_PLL_P_OUT4 215
+#define TEGRA124_CLK_PLL_A 216
+#define TEGRA124_CLK_PLL_A_OUT0 217
+#define TEGRA124_CLK_PLL_D 218
+#define TEGRA124_CLK_PLL_D_OUT0 219
+#define TEGRA124_CLK_PLL_D2 220
+#define TEGRA124_CLK_PLL_D2_OUT0 221
+#define TEGRA124_CLK_PLL_U 222
+#define TEGRA124_CLK_PLL_U_480M 223
+
+#define TEGRA124_CLK_PLL_U_60M 224
+#define TEGRA124_CLK_PLL_U_48M 225
+#define TEGRA124_CLK_PLL_U_12M 226
+/* 227 */
+/* 228 */
+#define TEGRA124_CLK_PLL_RE_VCO 229
+#define TEGRA124_CLK_PLL_RE_OUT 230
+#define TEGRA124_CLK_PLL_E 231
+#define TEGRA124_CLK_SPDIF_IN_SYNC 232
+#define TEGRA124_CLK_I2S0_SYNC 233
+#define TEGRA124_CLK_I2S1_SYNC 234
+#define TEGRA124_CLK_I2S2_SYNC 235
+#define TEGRA124_CLK_I2S3_SYNC 236
+#define TEGRA124_CLK_I2S4_SYNC 237
+#define TEGRA124_CLK_VIMCLK_SYNC 238
+#define TEGRA124_CLK_AUDIO0 239
+#define TEGRA124_CLK_AUDIO1 240
+#define TEGRA124_CLK_AUDIO2 241
+#define TEGRA124_CLK_AUDIO3 242
+#define TEGRA124_CLK_AUDIO4 243
+#define TEGRA124_CLK_SPDIF 244
+#define TEGRA124_CLK_CLK_OUT_1 245
+#define TEGRA124_CLK_CLK_OUT_2 246
+#define TEGRA124_CLK_CLK_OUT_3 247
+#define TEGRA124_CLK_BLINK 248
+/* 249 */
+/* 250 */
+/* 251 */
+#define TEGRA124_CLK_XUSB_HOST_SRC 252
+#define TEGRA124_CLK_XUSB_FALCON_SRC 253
+#define TEGRA124_CLK_XUSB_FS_SRC 254
+#define TEGRA124_CLK_XUSB_SS_SRC 255
+
+#define TEGRA124_CLK_XUSB_DEV_SRC 256
+#define TEGRA124_CLK_XUSB_DEV 257
+#define TEGRA124_CLK_XUSB_HS_SRC 258
+#define TEGRA124_CLK_SCLK 259
+#define TEGRA124_CLK_HCLK 260
+#define TEGRA124_CLK_PCLK 261
+/* 262 */
+/* 263 */
+#define TEGRA124_CLK_DFLL_REF 264
+#define TEGRA124_CLK_DFLL_SOC 265
+#define TEGRA124_CLK_VI_SENSOR2 266
+#define TEGRA124_CLK_PLL_P_OUT5 267
+#define TEGRA124_CLK_CML0 268
+#define TEGRA124_CLK_CML1 269
+#define TEGRA124_CLK_PLL_C4 270
+#define TEGRA124_CLK_PLL_DP 271
+#define TEGRA124_CLK_PLL_E_MUX 272
+#define TEGRA124_CLK_PLLD_DSI 273
+/* 274 */
+/* 275 */
+/* 276 */
+/* 277 */
+/* 278 */
+/* 279 */
+/* 280 */
+/* 281 */
+/* 282 */
+/* 283 */
+/* 284 */
+/* 285 */
+/* 286 */
+/* 287 */
+
+/* 288 */
+/* 289 */
+/* 290 */
+/* 291 */
+/* 292 */
+/* 293 */
+/* 294 */
+/* 295 */
+/* 296 */
+/* 297 */
+/* 298 */
+/* 299 */
+#define TEGRA124_CLK_AUDIO0_MUX 300
+#define TEGRA124_CLK_AUDIO1_MUX 301
+#define TEGRA124_CLK_AUDIO2_MUX 302
+#define TEGRA124_CLK_AUDIO3_MUX 303
+#define TEGRA124_CLK_AUDIO4_MUX 304
+#define TEGRA124_CLK_SPDIF_MUX 305
+#define TEGRA124_CLK_CLK_OUT_1_MUX 306
+#define TEGRA124_CLK_CLK_OUT_2_MUX 307
+#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 309 */
+/* 310 */
+#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_XUSB_SS_DIV2 312
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */
index af9bc9a3ddbc561840b60f6f5416f19f846e8567..2860737f04436ba0515895b7b04b85e4bfe7885c 100644 (file)
 /*
- * This header provides constants for binding nvidia,tegra124-car.
- *
- * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
- * registers. These IDs often match those in the CAR's RST_DEVICES registers,
- * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
- * this case, those clocks are assigned IDs above 185 in order to highlight
- * this issue. Implementations that interpret these clock IDs as bit values
- * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
- * explicitly handle these special cases.
- *
- * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
- * above.
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
  */
 
+#include <dt-bindings/clock/tegra124-car-common.h>
+
 #ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 #define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 
-/* 0 */
-/* 1 */
-/* 2 */
-#define TEGRA124_CLK_ISPB 3
-#define TEGRA124_CLK_RTC 4
-#define TEGRA124_CLK_TIMER 5
-#define TEGRA124_CLK_UARTA 6
-/* 7 (register bit affects uartb and vfir) */
-/* 8 */
-#define TEGRA124_CLK_SDMMC2 9
-/* 10 (register bit affects spdif_in and spdif_out) */
-#define TEGRA124_CLK_I2S1 11
-#define TEGRA124_CLK_I2C1 12
-/* 13 */
-#define TEGRA124_CLK_SDMMC1 14
-#define TEGRA124_CLK_SDMMC4 15
-/* 16 */
-#define TEGRA124_CLK_PWM 17
-#define TEGRA124_CLK_I2S2 18
-/* 20 (register bit affects vi and vi_sensor) */
-/* 21 */
-#define TEGRA124_CLK_USBD 22
-#define TEGRA124_CLK_ISP 23
-/* 26 */
-/* 25 */
-#define TEGRA124_CLK_DISP2 26
-#define TEGRA124_CLK_DISP1 27
-#define TEGRA124_CLK_HOST1X 28
-#define TEGRA124_CLK_VCP 29
-#define TEGRA124_CLK_I2S0 30
-/* 31 */
-
-#define TEGRA124_CLK_MC 32
-/* 33 */
-#define TEGRA124_CLK_APBDMA 34
-/* 35 */
-#define TEGRA124_CLK_KBC 36
-/* 37 */
-/* 38 */
-/* 39 (register bit affects fuse and fuse_burn) */
-#define TEGRA124_CLK_KFUSE 40
-#define TEGRA124_CLK_SBC1 41
-#define TEGRA124_CLK_NOR 42
-/* 43 */
-#define TEGRA124_CLK_SBC2 44
-/* 45 */
-#define TEGRA124_CLK_SBC3 46
-#define TEGRA124_CLK_I2C5 47
-#define TEGRA124_CLK_DSIA 48
-/* 49 */
-#define TEGRA124_CLK_MIPI 50
-#define TEGRA124_CLK_HDMI 51
-#define TEGRA124_CLK_CSI 52
-/* 53 */
-#define TEGRA124_CLK_I2C2 54
-#define TEGRA124_CLK_UARTC 55
-#define TEGRA124_CLK_MIPI_CAL 56
-#define TEGRA124_CLK_EMC 57
-#define TEGRA124_CLK_USB2 58
-#define TEGRA124_CLK_USB3 59
-/* 60 */
-#define TEGRA124_CLK_VDE 61
-#define TEGRA124_CLK_BSEA 62
-#define TEGRA124_CLK_BSEV 63
-
-/* 64 */
-#define TEGRA124_CLK_UARTD 65
-/* 66 */
-#define TEGRA124_CLK_I2C3 67
-#define TEGRA124_CLK_SBC4 68
-#define TEGRA124_CLK_SDMMC3 69
-#define TEGRA124_CLK_PCIE 70
-#define TEGRA124_CLK_OWR 71
-#define TEGRA124_CLK_AFI 72
-#define TEGRA124_CLK_CSITE 73
-/* 74 */
-/* 75 */
-#define TEGRA124_CLK_LA 76
-#define TEGRA124_CLK_TRACE 77
-#define TEGRA124_CLK_SOC_THERM 78
-#define TEGRA124_CLK_DTV 79
-/* 80 */
-#define TEGRA124_CLK_I2CSLOW 81
-#define TEGRA124_CLK_DSIB 82
-#define TEGRA124_CLK_TSEC 83
-/* 84 */
-/* 85 */
-/* 86 */
-/* 87 */
-/* 88 */
-#define TEGRA124_CLK_XUSB_HOST 89
-/* 90 */
-#define TEGRA124_CLK_MSENC 91
-#define TEGRA124_CLK_CSUS 92
-/* 93 */
-/* 94 */
-/* 95 (bit affects xusb_dev and xusb_dev_src) */
-
-/* 96 */
-/* 97 */
-/* 98 */
-#define TEGRA124_CLK_MSELECT 99
-#define TEGRA124_CLK_TSENSOR 100
-#define TEGRA124_CLK_I2S3 101
-#define TEGRA124_CLK_I2S4 102
-#define TEGRA124_CLK_I2C4 103
-#define TEGRA124_CLK_SBC5 104
-#define TEGRA124_CLK_SBC6 105
-#define TEGRA124_CLK_D_AUDIO 106
-#define TEGRA124_CLK_APBIF 107
-#define TEGRA124_CLK_DAM0 108
-#define TEGRA124_CLK_DAM1 109
-#define TEGRA124_CLK_DAM2 110
-#define TEGRA124_CLK_HDA2CODEC_2X 111
-/* 112 */
-#define TEGRA124_CLK_AUDIO0_2X 113
-#define TEGRA124_CLK_AUDIO1_2X 114
-#define TEGRA124_CLK_AUDIO2_2X 115
-#define TEGRA124_CLK_AUDIO3_2X 116
-#define TEGRA124_CLK_AUDIO4_2X 117
-#define TEGRA124_CLK_SPDIF_2X 118
-#define TEGRA124_CLK_ACTMON 119
-#define TEGRA124_CLK_EXTERN1 120
-#define TEGRA124_CLK_EXTERN2 121
-#define TEGRA124_CLK_EXTERN3 122
-#define TEGRA124_CLK_SATA_OOB 123
-#define TEGRA124_CLK_SATA 124
-#define TEGRA124_CLK_HDA 125
-/* 126 */
-#define TEGRA124_CLK_SE 127
-
-#define TEGRA124_CLK_HDA2HDMI 128
-#define TEGRA124_CLK_SATA_COLD 129
-/* 130 */
-/* 131 */
-/* 132 */
-/* 133 */
-/* 134 */
-/* 135 */
-/* 136 */
-/* 137 */
-/* 138 */
-/* 139 */
-/* 140 */
-/* 141 */
-/* 142 */
-/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
-/*      xusb_host_src and xusb_ss_src) */
-#define TEGRA124_CLK_CILAB 144
-#define TEGRA124_CLK_CILCD 145
-#define TEGRA124_CLK_CILE 146
-#define TEGRA124_CLK_DSIALP 147
-#define TEGRA124_CLK_DSIBLP 148
-#define TEGRA124_CLK_ENTROPY 149
-#define TEGRA124_CLK_DDS 150
-/* 151 */
-#define TEGRA124_CLK_DP2 152
-#define TEGRA124_CLK_AMX 153
-#define TEGRA124_CLK_ADX 154
-/* 155 (bit affects dfll_ref and dfll_soc) */
-#define TEGRA124_CLK_XUSB_SS 156
-/* 157 */
-/* 158 */
-/* 159 */
-
-/* 160 */
-/* 161 */
-/* 162 */
-/* 163 */
-/* 164 */
-/* 165 */
-#define TEGRA124_CLK_I2C6 166
-/* 167 */
-/* 168 */
-/* 169 */
-/* 170 */
-#define TEGRA124_CLK_VIM2_CLK 171
-/* 172 */
-/* 173 */
-/* 174 */
-/* 175 */
-#define TEGRA124_CLK_HDMI_AUDIO 176
-#define TEGRA124_CLK_CLK72MHZ 177
-#define TEGRA124_CLK_VIC03 178
-/* 179 */
-#define TEGRA124_CLK_ADX1 180
-#define TEGRA124_CLK_DPAUX 181
-#define TEGRA124_CLK_SOR0 182
-/* 183 */
-#define TEGRA124_CLK_GPU 184
-#define TEGRA124_CLK_AMX1 185
-/* 186 */
-/* 187 */
-/* 188 */
-/* 189 */
-/* 190 */
-/* 191 */
-#define TEGRA124_CLK_UARTB 192
-#define TEGRA124_CLK_VFIR 193
-#define TEGRA124_CLK_SPDIF_IN 194
-#define TEGRA124_CLK_SPDIF_OUT 195
-#define TEGRA124_CLK_VI 196
-#define TEGRA124_CLK_VI_SENSOR 197
-#define TEGRA124_CLK_FUSE 198
-#define TEGRA124_CLK_FUSE_BURN 199
-#define TEGRA124_CLK_CLK_32K 200
-#define TEGRA124_CLK_CLK_M 201
-#define TEGRA124_CLK_CLK_M_DIV2 202
-#define TEGRA124_CLK_CLK_M_DIV4 203
-#define TEGRA124_CLK_PLL_REF 204
-#define TEGRA124_CLK_PLL_C 205
-#define TEGRA124_CLK_PLL_C_OUT1 206
-#define TEGRA124_CLK_PLL_C2 207
-#define TEGRA124_CLK_PLL_C3 208
-#define TEGRA124_CLK_PLL_M 209
-#define TEGRA124_CLK_PLL_M_OUT1 210
-#define TEGRA124_CLK_PLL_P 211
-#define TEGRA124_CLK_PLL_P_OUT1 212
-#define TEGRA124_CLK_PLL_P_OUT2 213
-#define TEGRA124_CLK_PLL_P_OUT3 214
-#define TEGRA124_CLK_PLL_P_OUT4 215
-#define TEGRA124_CLK_PLL_A 216
-#define TEGRA124_CLK_PLL_A_OUT0 217
-#define TEGRA124_CLK_PLL_D 218
-#define TEGRA124_CLK_PLL_D_OUT0 219
-#define TEGRA124_CLK_PLL_D2 220
-#define TEGRA124_CLK_PLL_D2_OUT0 221
-#define TEGRA124_CLK_PLL_U 222
-#define TEGRA124_CLK_PLL_U_480M 223
-
-#define TEGRA124_CLK_PLL_U_60M 224
-#define TEGRA124_CLK_PLL_U_48M 225
-#define TEGRA124_CLK_PLL_U_12M 226
-#define TEGRA124_CLK_PLL_X 227
-#define TEGRA124_CLK_PLL_X_OUT0 228
-#define TEGRA124_CLK_PLL_RE_VCO 229
-#define TEGRA124_CLK_PLL_RE_OUT 230
-#define TEGRA124_CLK_PLL_E 231
-#define TEGRA124_CLK_SPDIF_IN_SYNC 232
-#define TEGRA124_CLK_I2S0_SYNC 233
-#define TEGRA124_CLK_I2S1_SYNC 234
-#define TEGRA124_CLK_I2S2_SYNC 235
-#define TEGRA124_CLK_I2S3_SYNC 236
-#define TEGRA124_CLK_I2S4_SYNC 237
-#define TEGRA124_CLK_VIMCLK_SYNC 238
-#define TEGRA124_CLK_AUDIO0 239
-#define TEGRA124_CLK_AUDIO1 240
-#define TEGRA124_CLK_AUDIO2 241
-#define TEGRA124_CLK_AUDIO3 242
-#define TEGRA124_CLK_AUDIO4 243
-#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
-/* 249 */
-/* 250 */
-/* 251 */
-#define TEGRA124_CLK_XUSB_HOST_SRC 252
-#define TEGRA124_CLK_XUSB_FALCON_SRC 253
-#define TEGRA124_CLK_XUSB_FS_SRC 254
-#define TEGRA124_CLK_XUSB_SS_SRC 255
-
-#define TEGRA124_CLK_XUSB_DEV_SRC 256
-#define TEGRA124_CLK_XUSB_DEV 257
-#define TEGRA124_CLK_XUSB_HS_SRC 258
-#define TEGRA124_CLK_SCLK 259
-#define TEGRA124_CLK_HCLK 260
-#define TEGRA124_CLK_PCLK 261
-#define TEGRA124_CLK_CCLK_G 262
-#define TEGRA124_CLK_CCLK_LP 263
-#define TEGRA124_CLK_DFLL_REF 264
-#define TEGRA124_CLK_DFLL_SOC 265
-#define TEGRA124_CLK_VI_SENSOR2 266
-#define TEGRA124_CLK_PLL_P_OUT5 267
-#define TEGRA124_CLK_CML0 268
-#define TEGRA124_CLK_CML1 269
-#define TEGRA124_CLK_PLL_C4 270
-#define TEGRA124_CLK_PLL_DP 271
-#define TEGRA124_CLK_PLL_E_MUX 272
-/* 273 */
-/* 274 */
-/* 275 */
-/* 276 */
-/* 277 */
-/* 278 */
-/* 279 */
-/* 280 */
-/* 281 */
-/* 282 */
-/* 283 */
-/* 284 */
-/* 285 */
-/* 286 */
-/* 287 */
-
-/* 288 */
-/* 289 */
-/* 290 */
-/* 291 */
-/* 292 */
-/* 293 */
-/* 294 */
-/* 295 */
-/* 296 */
-/* 297 */
-/* 298 */
-/* 299 */
-#define TEGRA124_CLK_AUDIO0_MUX 300
-#define TEGRA124_CLK_AUDIO1_MUX 301
-#define TEGRA124_CLK_AUDIO2_MUX 302
-#define TEGRA124_CLK_AUDIO3_MUX 303
-#define TEGRA124_CLK_AUDIO4_MUX 304
-#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
-#define TEGRA124_CLK_DSIA_MUX 309
-#define TEGRA124_CLK_DSIB_MUX 310
-#define TEGRA124_CLK_SOR0_LVDS 311
-#define TEGRA124_CLK_XUSB_SS_DIV2 312
+#define TEGRA124_CLK_PLL_X             227
+#define TEGRA124_CLK_PLL_X_OUT0                228
 
-#define TEGRA124_CLK_PLL_M_UD 313
-#define TEGRA124_CLK_PLL_C_UD 314
+#define TEGRA124_CLK_CCLK_G            262
+#define TEGRA124_CLK_CCLK_LP           263
 
-#define TEGRA124_CLK_CLK_MAX 315
+#define TEGRA124_CLK_CLK_MAX           315
 
 #endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
index 2fbc804e1a45cea943c9241309601f941abd5d14..226f77246a70c689f34c6720f1796cf59b4b8a3b 100644 (file)
@@ -13,7 +13,8 @@
 
 #define PULL_DISABLE           (1 << 3)
 #define INPUT_EN               (1 << 5)
-#define SLEWCTRL_FAST          (1 << 6)
+#define SLEWCTRL_SLOW          (1 << 6)
+#define SLEWCTRL_FAST          0
 
 /* update macro depending on INPUT_EN and PULL_ENA */
 #undef PIN_OUTPUT
index 9c2e4f82381e8abc7b21ac59447fefb84d5a1ea3..5f4d01898c9c153ff73feebd46930524a2d50a4d 100644 (file)
@@ -18,7 +18,8 @@
 #define PULL_DISABLE           (1 << 16)
 #define PULL_UP                        (1 << 17)
 #define INPUT_EN               (1 << 18)
-#define SLEWCTRL_FAST          (1 << 19)
+#define SLEWCTRL_SLOW          (1 << 19)
+#define SLEWCTRL_FAST          0
 #define DS0_PULL_UP_DOWN_EN    (1 << 27)
 
 #define PIN_OUTPUT             (PULL_DISABLE)
index 7c55dd5dd2c9faf2202692ca5f669f064c173ed4..66203b268984ebedd72d5bd1b2f54440e56011bc 100644 (file)
@@ -114,6 +114,7 @@ struct vgic_ops {
        void    (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
        u64     (*get_elrsr)(const struct kvm_vcpu *vcpu);
        u64     (*get_eisr)(const struct kvm_vcpu *vcpu);
+       void    (*clear_eisr)(struct kvm_vcpu *vcpu);
        u32     (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
        void    (*enable_underflow)(struct kvm_vcpu *vcpu);
        void    (*disable_underflow)(struct kvm_vcpu *vcpu);
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
deleted file mode 100644 (file)
index 0ca5f60..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- *  linux/include/linux/clk-private.h
- *
- *  Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
- *  Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_CLK_PRIVATE_H
-#define __LINUX_CLK_PRIVATE_H
-
-#include <linux/clk-provider.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-
-/*
- * WARNING: Do not include clk-private.h from any file that implements struct
- * clk_ops.  Doing so is a layering violation!
- *
- * This header exists only to allow for statically initialized clock data.  Any
- * static clock data must be defined in a separate file from the logic that
- * implements the clock operations for that same data.
- */
-
-#ifdef CONFIG_COMMON_CLK
-
-struct module;
-
-struct clk {
-       const char              *name;
-       const struct clk_ops    *ops;
-       struct clk_hw           *hw;
-       struct module           *owner;
-       struct clk              *parent;
-       const char              **parent_names;
-       struct clk              **parents;
-       u8                      num_parents;
-       u8                      new_parent_index;
-       unsigned long           rate;
-       unsigned long           new_rate;
-       struct clk              *new_parent;
-       struct clk              *new_child;
-       unsigned long           flags;
-       unsigned int            enable_count;
-       unsigned int            prepare_count;
-       unsigned long           accuracy;
-       int                     phase;
-       struct hlist_head       children;
-       struct hlist_node       child_node;
-       struct hlist_node       debug_node;
-       unsigned int            notifier_count;
-#ifdef CONFIG_DEBUG_FS
-       struct dentry           *dentry;
-#endif
-       struct kref             ref;
-};
-
-/*
- * DOC: Basic clock implementations common to many platforms
- *
- * Each basic clock hardware type is comprised of a structure describing the
- * clock hardware, implementations of the relevant callbacks in struct clk_ops,
- * unique flags for that hardware type, a registration function and an
- * alternative macro for static initialization
- */
-
-#define DEFINE_CLK(_name, _ops, _flags, _parent_names,         \
-               _parents)                                       \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &_ops,                                   \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _parent_names,                  \
-               .num_parents = ARRAY_SIZE(_parent_names),       \
-               .parents = _parents,                            \
-               .flags = _flags | CLK_IS_BASIC,                 \
-       }
-
-#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate,            \
-                               _fixed_rate_flags)              \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {};         \
-       static struct clk_fixed_rate _name##_hw = {             \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .fixed_rate = _rate,                            \
-               .flags = _fixed_rate_flags,                     \
-       };                                                      \
-       DEFINE_CLK(_name, clk_fixed_rate_ops, _flags,           \
-                       _name##_parent_names, NULL);
-
-#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr,      \
-                               _flags, _reg, _bit_idx,         \
-                               _gate_flags, _lock)             \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_gate _name##_hw = {                   \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .bit_idx = _bit_idx,                            \
-               .flags = _gate_flags,                           \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_gate_ops, _flags,                 \
-                       _name##_parent_names, _name##_parents);
-
-#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,  \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _table, _lock)  \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_divider _name##_hw = {                \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .shift = _shift,                                \
-               .width = _width,                                \
-               .flags = _divider_flags,                        \
-               .table = _table,                                \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_divider_ops, _flags,              \
-                       _name##_parent_names, _name##_parents);
-
-#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _lock)          \
-       _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, NULL, _lock)
-
-#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name,          \
-                               _parent_ptr, _flags, _reg,      \
-                               _shift, _width, _divider_flags, \
-                               _table, _lock)                  \
-       _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _table, _lock)  \
-
-#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
-                               _reg, _shift, _width,           \
-                               _mux_flags, _lock)              \
-       static struct clk _name;                                \
-       static struct clk_mux _name##_hw = {                    \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .shift = _shift,                                \
-               .mask = BIT(_width) - 1,                        \
-               .flags = _mux_flags,                            \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names,   \
-                       _parents);
-
-#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name,           \
-                               _parent_ptr, _flags,            \
-                               _mult, _div)                    \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_fixed_factor _name##_hw = {           \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .mult = _mult,                                  \
-               .div = _div,                                    \
-       };                                                      \
-       DEFINE_CLK(_name, clk_fixed_factor_ops, _flags,         \
-                       _name##_parent_names, _name##_parents);
-
-/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev:       device initializing this clk, placeholder for now
- * @clk:       clk being initialized
- *
- * Initializes the lists in struct clk, queries the hardware for the
- * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- *     .name
- *     .ops
- *     .hw
- *     .parent_names
- *     .num_parents
- *     .flags
- *
- * It is not necessary to call clk_register if __clk_init is used directly with
- * statically initialized clock data.
- *
- * Returns 0 on success, otherwise an error code.
- */
-int __clk_init(struct device *dev, struct clk *clk);
-
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
-
-#endif /* CONFIG_COMMON_CLK */
-#endif /* CLK_PRIVATE_H */
index d936409520f8db609994f7ddab629a99981883dc..5591ea71a8d14054bf923dcad45dc94f71bd029a 100644 (file)
@@ -33,6 +33,7 @@
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
 
 struct clk_hw;
+struct clk_core;
 struct dentry;
 
 /**
@@ -174,9 +175,12 @@ struct clk_ops {
                                        unsigned long parent_rate);
        long            (*round_rate)(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *parent_rate);
-       long            (*determine_rate)(struct clk_hw *hw, unsigned long rate,
-                                       unsigned long *best_parent_rate,
-                                       struct clk_hw **best_parent_hw);
+       long            (*determine_rate)(struct clk_hw *hw,
+                                         unsigned long rate,
+                                         unsigned long min_rate,
+                                         unsigned long max_rate,
+                                         unsigned long *best_parent_rate,
+                                         struct clk_hw **best_parent_hw);
        int             (*set_parent)(struct clk_hw *hw, u8 index);
        u8              (*get_parent)(struct clk_hw *hw);
        int             (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -216,13 +220,17 @@ struct clk_init_data {
  * clk_foo and then referenced by the struct clk instance that uses struct
  * clk_foo's clk_ops
  *
- * @clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
+ * @core: pointer to the struct clk_core instance that points back to this
+ * struct clk_hw instance
+ *
+ * @clk: pointer to the per-user struct clk instance that can be used to call
+ * into the clk API
  *
  * @init: pointer to struct clk_init_data that contains the init data shared
  * with the common clock framework.
  */
 struct clk_hw {
+       struct clk_core *core;
        struct clk *clk;
        const struct clk_init_data *init;
 };
@@ -294,6 +302,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 bit_idx,
                u8 clk_gate_flags, spinlock_t *lock);
+void clk_unregister_gate(struct clk *clk);
 
 struct clk_div_table {
        unsigned int    val;
@@ -352,6 +361,17 @@ struct clk_divider {
 #define CLK_DIVIDER_READ_ONLY          BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
+
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+               unsigned int val, const struct clk_div_table *table,
+               unsigned long flags);
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *prate, const struct clk_div_table *table,
+               u8 width, unsigned long flags);
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+               const struct clk_div_table *table, u8 width,
+               unsigned long flags);
+
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -361,6 +381,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_divider_flags, const struct clk_div_table *table,
                spinlock_t *lock);
+void clk_unregister_divider(struct clk *clk);
 
 /**
  * struct clk_mux - multiplexer clock
@@ -382,6 +403,8 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
  *     register, and mask of mux bits are in higher 16-bit of this register.
  *     While setting the mux bits, higher 16-bit should also be updated to
  *     indicate changing mux bits.
+ * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
+ *     frequency.
  */
 struct clk_mux {
        struct clk_hw   hw;
@@ -396,7 +419,8 @@ struct clk_mux {
 #define CLK_MUX_INDEX_ONE              BIT(0)
 #define CLK_MUX_INDEX_BIT              BIT(1)
 #define CLK_MUX_HIWORD_MASK            BIT(2)
-#define CLK_MUX_READ_ONLY      BIT(3) /* mux setting cannot be changed */
+#define CLK_MUX_READ_ONLY              BIT(3) /* mux can't be changed */
+#define CLK_MUX_ROUND_CLOSEST          BIT(4)
 
 extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
@@ -411,6 +435,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
                void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 
+void clk_unregister_mux(struct clk *clk);
+
 void of_fixed_factor_clk_setup(struct device_node *node);
 
 /**
@@ -550,15 +576,29 @@ bool __clk_is_prepared(struct clk *clk);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p);
+unsigned long __clk_determine_rate(struct clk_hw *core,
+                                  unsigned long rate,
+                                  unsigned long min_rate,
+                                  unsigned long max_rate);
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p);
+
+static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+{
+       dst->clk = src->clk;
+       dst->core = src->core;
+}
 
 /*
  * FIXME clock api without lock protection
  */
-int __clk_prepare(struct clk *clk);
-void __clk_unprepare(struct clk *clk);
-void __clk_reparent(struct clk *clk, struct clk *new_parent);
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
 
 struct of_device_id;
index c7f258a81761d22b1b204654d3582af55044915a..68c16a6bedb36462c3cec290c9eee81abe2072f9 100644 (file)
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees);
  */
 int clk_get_phase(struct clk *clk);
 
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q);
+
 #else
 
 static inline long clk_get_accuracy(struct clk *clk)
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk)
        return -ENOTSUPP;
 }
 
+static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+       return p == q;
+}
+
 #endif
 
 /**
@@ -301,6 +319,46 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
 
+/**
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
+ *
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
+ */
+bool clk_has_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
 /**
  * clk_set_parent - set the parent clock source for this clock
  * @clk: clock source
@@ -374,6 +432,11 @@ static inline long clk_round_rate(struct clk *clk, unsigned long rate)
        return 0;
 }
 
+static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+       return true;
+}
+
 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
 {
        return 0;
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644 (file)
index aed28c4..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2013 - Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-#include <linux/clk.h>
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output);
-
-#endif
index 3ca9fca827a2f1299ed7eea5ca9ec908e57f2fc8..19c4208f4752fd6957712b8a9e900a6baa0b4945 100644 (file)
@@ -120,6 +120,4 @@ static inline void tegra_cpu_clock_resume(void)
 }
 #endif
 
-void tegra_clocks_apply_init_table(void);
-
 #endif /* __LINUX_CLK_TEGRA_H_ */
index 55ef529a0dbf905995781bd051bf926396b837c5..67844003493de5936809dc1acac72cd88b5bc521 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __LINUX_CLK_TI_H__
 #define __LINUX_CLK_TI_H__
 
+#include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 
 /**
@@ -217,6 +218,13 @@ struct ti_dt_clk {
 /* Maximum number of clock memmaps */
 #define CLK_MAX_MEMMAPS                        4
 
+/* Static memmap indices */
+enum {
+       TI_CLKM_CM = 0,
+       TI_CLKM_PRM,
+       TI_CLKM_SCRM,
+};
+
 typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
 
 /**
@@ -263,6 +271,8 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
                                           u8 index);
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
                                       unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_clk);
 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
@@ -272,6 +282,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
                                    unsigned long *parent_rate);
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
                                        unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk);
 u8 omap2_init_dpll_parent(struct clk_hw *hw);
@@ -348,4 +360,17 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
 
+#ifdef CONFIG_ATAGS
+int omap3430_clk_legacy_init(void);
+int omap3430es1_clk_legacy_init(void);
+int omap36xx_clk_legacy_init(void);
+int am35xx_clk_legacy_init(void);
+#else
+static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
+static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
+#endif
+
+
 #endif
index d1ec10a940ffffb01a94bcedf9d113e1940619c7..1b45e4a0519b2c34033db91e37fd1f1f0b367f8c 100644 (file)
@@ -202,7 +202,7 @@ static __always_inline void data_access_exceeds_word_size(void)
 {
 }
 
-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
@@ -259,10 +259,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  */
 
 #define READ_ONCE(x) \
-       ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+       ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index f551a9299ac987c5121a6f58383a953a8704d729..306178d7309f193cb32443f49c888297ffa9573e 100644 (file)
@@ -126,6 +126,8 @@ struct cpuidle_driver {
 
 #ifdef CONFIG_CPU_IDLE
 extern void disable_cpuidle(void);
+extern bool cpuidle_not_available(struct cpuidle_driver *drv,
+                                 struct cpuidle_device *dev);
 
 extern int cpuidle_select(struct cpuidle_driver *drv,
                          struct cpuidle_device *dev);
@@ -150,11 +152,17 @@ extern void cpuidle_resume(void);
 extern int cpuidle_enable_device(struct cpuidle_device *dev);
 extern void cpuidle_disable_device(struct cpuidle_device *dev);
 extern int cpuidle_play_dead(void);
-extern void cpuidle_enter_freeze(void);
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+                                     struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+                               struct cpuidle_device *dev);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
 #else
 static inline void disable_cpuidle(void) { }
+static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
+                                        struct cpuidle_device *dev)
+{return true; }
 static inline int cpuidle_select(struct cpuidle_driver *drv,
                                 struct cpuidle_device *dev)
 {return -ENODEV; }
@@ -183,7 +191,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
 {return -ENODEV; }
 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
-static inline void cpuidle_enter_freeze(void) { }
+static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+                                            struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+                                      struct cpuidle_device *dev)
+{return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
        struct cpuidle_device *dev) {return NULL; }
 #endif
index 92c08cf7670e2afa128a1a15cd454b7f2398fbbf..d8358799c59411f5b715e9dfbb139b94b8dbca8f 100644 (file)
@@ -215,13 +215,16 @@ struct dentry_operations {
 #define DCACHE_LRU_LIST                        0x00080000
 
 #define DCACHE_ENTRY_TYPE              0x00700000
-#define DCACHE_MISS_TYPE               0x00000000 /* Negative dentry */
-#define DCACHE_DIRECTORY_TYPE          0x00100000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE            0x00200000 /* Lookupless directory (presumed automount) */
-#define DCACHE_SYMLINK_TYPE            0x00300000 /* Symlink */
-#define DCACHE_FILE_TYPE               0x00400000 /* Other file type */
+#define DCACHE_MISS_TYPE               0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
+#define DCACHE_WHITEOUT_TYPE           0x00100000 /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE          0x00200000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE            0x00300000 /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE            0x00400000 /* Regular file type (or fallthru to such) */
+#define DCACHE_SPECIAL_TYPE            0x00500000 /* Other file type (or fallthru to such) */
+#define DCACHE_SYMLINK_TYPE            0x00600000 /* Symlink (or fallthru to such) */
 
 #define DCACHE_MAY_FREE                        0x00800000
+#define DCACHE_FALLTHRU                        0x01000000 /* Fall through to lower layer */
 
 extern seqlock_t rename_lock;
 
@@ -423,6 +426,16 @@ static inline unsigned __d_entry_type(const struct dentry *dentry)
        return dentry->d_flags & DCACHE_ENTRY_TYPE;
 }
 
+static inline bool d_is_miss(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_whiteout(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
+}
+
 static inline bool d_can_lookup(const struct dentry *dentry)
 {
        return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
@@ -443,14 +456,25 @@ static inline bool d_is_symlink(const struct dentry *dentry)
        return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
 }
 
+static inline bool d_is_reg(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
+}
+
+static inline bool d_is_special(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
+}
+
 static inline bool d_is_file(const struct dentry *dentry)
 {
-       return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+       return d_is_reg(dentry) || d_is_special(dentry);
 }
 
 static inline bool d_is_negative(const struct dentry *dentry)
 {
-       return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+       // TODO: check d_is_whiteout(dentry) also.
+       return d_is_miss(dentry);
 }
 
 static inline bool d_is_positive(const struct dentry *dentry)
@@ -458,10 +482,75 @@ static inline bool d_is_positive(const struct dentry *dentry)
        return !d_is_negative(dentry);
 }
 
+extern void d_set_fallthru(struct dentry *dentry);
+
+static inline bool d_is_fallthru(const struct dentry *dentry)
+{
+       return dentry->d_flags & DCACHE_FALLTHRU;
+}
+
+
 extern int sysctl_vfs_cache_pressure;
 
 static inline unsigned long vfs_pressure_ratio(unsigned long val)
 {
        return mult_frac(val, sysctl_vfs_cache_pressure, 100);
 }
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+       return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+       return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file.  The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+       struct inode *inode = upper->d_inode;
+
+       return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file.  It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+       return upper;
+}
+
 #endif /* __LINUX_DCACHE_H */
index 51f7ccadf923c337ddb5627491a958c5e74fa610..4173a8fdad9efd052870b8738547ac1fa1962526 100644 (file)
@@ -33,6 +33,8 @@
  * @units:             Measurment unit for this attribute.
  * @unit_expo:         Exponent used in the data.
  * @size:              Size in bytes for data size.
+ * @logical_minimum:   Logical minimum value for this attribute.
+ * @logical_maximum:   Logical maximum value for this attribute.
  */
 struct hid_sensor_hub_attribute_info {
        u32 usage_id;
@@ -146,6 +148,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
 
 /**
 * sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @hsdev:      Hub device instance.
 * @usage_id:   Attribute usage id of parent physical device as per spec
 * @attr_usage_id:      Attribute usage id as per spec
 * @report_id:  Report id to look for
@@ -160,6 +163,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                        u32 attr_usage_id, u32 report_id);
 /**
 * sensor_hub_set_feature() - Feature set request
+* @hsdev:      Hub device instance.
 * @report_id:  Report id to look for
 * @field_index:        Field index inside a report
 * @value:      Value to set
@@ -172,6 +176,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
 
 /**
 * sensor_hub_get_feature() - Feature get request
+* @hsdev:      Hub device instance.
 * @report_id:  Report id to look for
 * @field_index:        Field index inside a report
 * @value:      Place holder for return value
index 7c7695940dddeae9d3d22129ce4a14eaf70e1a5e..f17da50402a4dad6bf4d4aa907a32e8df1d7dea7 100644 (file)
@@ -130,8 +130,6 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
  * @probe: Callback for device binding
  * @remove: Callback for device unbinding
  * @shutdown: Callback for device shutdown
- * @suspend: Callback for device suspend
- * @resume: Callback for device resume
  * @alert: Alert callback, for example for the SMBus alert protocol
  * @command: Callback for bus-wide signaling (optional)
  * @driver: Device driver model driver
@@ -174,8 +172,6 @@ struct i2c_driver {
 
        /* driver model interfaces that don't relate to enumeration  */
        void (*shutdown)(struct i2c_client *);
-       int (*suspend)(struct i2c_client *, pm_message_t mesg);
-       int (*resume)(struct i2c_client *);
 
        /* Alert callback, for example for the SMBus alert protocol.
         * The format and meaning of the data value depends on the protocol.
index d9b05b5bf8c7954f63ace548e0a6348f5fa0e327..2e88580194f0238430de1255c5a63f99bf975e0a 100644 (file)
  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
  *                Used by threaded interrupts which need to keep the
  *                irq line disabled until the threaded handler has been run.
- * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
+ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
+ *                   that this interrupt will wake the system from a suspended
+ *                   state.  See Documentation/power/suspend-and-interrupts.txt
  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
  * IRQF_NO_THREAD - Interrupt cannot be threaded
  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
  *                resume time.
+ * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
+ *                interrupt handler after suspending interrupts. For system
+ *                wakeup devices users need to implement wakeup detection in
+ *                their interrupt handlers.
  */
 #define IRQF_DISABLED          0x00000020
 #define IRQF_SHARED            0x00000080
@@ -70,6 +76,7 @@
 #define IRQF_FORCE_RESUME      0x00008000
 #define IRQF_NO_THREAD         0x00010000
 #define IRQF_EARLY_RESUME      0x00020000
+#define IRQF_COND_SUSPEND      0x00040000
 
 #define IRQF_TIMER             (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
index 800544bc7bfdd7f0fdda6fb96e6be104870d9d9a..781974afff9f14e576a7912039a5fb68009cdb25 100644 (file)
 
 #define GITS_TRANSLATER                        0x10040
 
+#define GITS_CTLR_ENABLE               (1U << 0)
+#define GITS_CTLR_QUIESCENT            (1U << 31)
+
+#define GITS_TYPER_DEVBITS_SHIFT       13
+#define GITS_TYPER_DEVBITS(r)          ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
 #define GITS_TYPER_PTA                 (1UL << 19)
 
 #define GITS_CBASER_VALID              (1UL << 63)
index 420f77b34d02639192bae47ae2fcc95462eaf8e6..e6a6aac451db4614df9cc4e58911f9ff6629620d 100644 (file)
@@ -243,7 +243,6 @@ extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
-extern unsigned int gic_get_timer_pending(void);
 extern int gic_get_c0_compare_int(void);
 extern int gic_get_c0_perfcount_int(void);
 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
index faf433af425e41e2da532939af63ec258f8fd619..dd1109fb241e42263e5851ddbc325f469c42a87c 100644 (file)
@@ -78,6 +78,7 @@ struct irq_desc {
 #ifdef CONFIG_PM_SLEEP
        unsigned int            nr_actions;
        unsigned int            no_suspend_depth;
+       unsigned int            cond_suspend_depth;
        unsigned int            force_resume_depth;
 #endif
 #ifdef CONFIG_PROC_FS
index 72ba725ddf9c73054533256a7246f5425c95771b..5bb074431eb0ce571b32d785b45ad85ee8c22b34 100644 (file)
@@ -5,6 +5,7 @@
 
 struct kmem_cache;
 struct page;
+struct vm_struct;
 
 #ifdef CONFIG_KASAN
 
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size);
 void kasan_slab_alloc(struct kmem_cache *s, void *object);
 void kasan_slab_free(struct kmem_cache *s, void *object);
 
-#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
-
 int kasan_module_alloc(void *addr, size_t size);
-void kasan_module_free(void *addr);
+void kasan_free_shadow(const struct vm_struct *vm);
 
 #else /* CONFIG_KASAN */
 
-#define MODULE_ALIGN 1
-
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
 static inline void kasan_enable_current(void) {}
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
 static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
 
 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_module_free(void *addr) {}
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
 #endif /* CONFIG_KASAN */
 
index 75ae2e2631fceaa27915f3d100b1f03244b17500..a19bcf9e762e1d4886e8bd5ed202e4485ba8cf3e 100644 (file)
@@ -156,8 +156,14 @@ typedef enum {
        KDB_REASON_SYSTEM_NMI,  /* In NMI due to SYSTEM cmd; regs valid */
 } kdb_reason_t;
 
+enum kdb_msgsrc {
+       KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
+       KDB_MSGSRC_PRINTK, /* trapped from printk() */
+};
+
 extern int kdb_trap_printk;
-extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
+extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+                                     va_list args);
 extern __printf(1, 2) int kdb_printf(const char *, ...);
 typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
 
index 2bbc62aa818a374d1c488f2eecf4232230bd3f4e..551f85456c11574a144bf64d1c38ec8031313b3e 100644 (file)
@@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg {
 
 enum mlx4_update_qp_attr {
        MLX4_UPDATE_QP_SMAC             = 1 << 0,
-       MLX4_UPDATE_QP_VSD              = 1 << 2,
+       MLX4_UPDATE_QP_VSD              = 1 << 1,
        MLX4_UPDATE_QP_SUPPORTED_ATTRS  = (1 << 2) - 1
 };
 
index 42999fe2dbd0fb8ec057dbd07d4c2143d6225c65..b03485bcb82a0a4d1f6ebbbac8b30f37beb722a2 100644 (file)
@@ -344,6 +344,10 @@ struct module {
        unsigned long *ftrace_callsites;
 #endif
 
+#ifdef CONFIG_LIVEPATCH
+       bool klp_alive;
+#endif
+
 #ifdef CONFIG_MODULE_UNLOAD
        /* What modules depend on me? */
        struct list_head source_list;
index f7556261fe3c54adb52b28789b7cb7b19b280b13..4d0cb9bba93e4650d76b314cc50160e5b8e7e65d 100644 (file)
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod);
 
 /* Any cleanup before freeing mod->module_init */
 void module_arch_freeing_init(struct module *mod);
+
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
 #endif
index 5897b4ea5a3f9e0f07f511f57d8fa8bfe7019205..dcf6ec27739b1d8bfb98c82e8902ca0b697df5f8 100644 (file)
@@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     Used to add FDB entries to dump requests. Implementers should add
  *     entries to skb and update idx with the number of entries.
  *
- * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
+ * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
+ *                          u16 flags)
  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
  *                          struct net_device *dev, u32 filter_mask)
+ * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
+ *                          u16 flags);
  *
  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  *     Called to change device carrier. Soft-devices (like dummy, team, etc)
@@ -2342,6 +2345,7 @@ struct gro_remcsum {
 
 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
 {
+       grc->offset = 0;
        grc->delta = 0;
 }
 
index 6d627b92df537ada3886af5850c0fabbac53b66a..b01ccf371fdcaf229f07bddbd2f12f9bbb5c53b0 100644 (file)
@@ -180,7 +180,6 @@ struct nfs_inode {
         /* NFSv4 state */
        struct list_head        open_states;
        struct nfs_delegation __rcu *delegation;
-       fmode_t                  delegation_state;
        struct rw_semaphore     rwsem;
 
        /* pNFS layout information */
@@ -344,6 +343,7 @@ extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
 extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
 extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
 extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
@@ -356,8 +356,9 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
 extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
+extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
 extern int nfs_setattr(struct dentry *, struct iattr *);
-extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
+extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
 extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
                                struct nfs4_label *label);
 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
@@ -370,6 +371,7 @@ extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ct
 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
 extern u64 nfs_compat_user_ino64(u64 fileid);
 extern void nfs_fattr_init(struct nfs_fattr *fattr);
+extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
 extern unsigned long nfs_inc_attr_generation_counter(void);
 
 extern struct nfs_fattr *nfs_alloc_fattr(void);
index 38d96ba935c2d7fd91aaf444a598b832b1c0ee4e..4cb3eaa89cf708a57038049db0df75144155d920 100644 (file)
@@ -1167,8 +1167,15 @@ struct nfs41_impl_id {
        struct nfstime4                 date;
 };
 
+struct nfs41_bind_conn_to_session_args {
+       struct nfs_client               *client;
+       struct nfs4_sessionid           sessionid;
+       u32                             dir;
+       bool                            use_conn_in_rdma_mode;
+};
+
 struct nfs41_bind_conn_to_session_res {
-       struct nfs4_session             *session;
+       struct nfs4_sessionid           sessionid;
        u32                             dir;
        bool                            use_conn_in_rdma_mode;
 };
@@ -1185,6 +1192,8 @@ struct nfs41_exchange_id_res {
 
 struct nfs41_create_session_args {
        struct nfs_client              *client;
+       u64                             clientid;
+       uint32_t                        seqid;
        uint32_t                        flags;
        uint32_t                        cb_program;
        struct nfs4_channel_attrs       fc_attrs;       /* Fore Channel */
@@ -1192,7 +1201,11 @@ struct nfs41_create_session_args {
 };
 
 struct nfs41_create_session_res {
-       struct nfs_client              *client;
+       struct nfs4_sessionid           sessionid;
+       uint32_t                        seqid;
+       uint32_t                        flags;
+       struct nfs4_channel_attrs       fc_attrs;       /* Fore Channel */
+       struct nfs4_channel_attrs       bc_attrs;       /* Back Channel */
 };
 
 struct nfs41_reclaim_complete_args {
@@ -1351,7 +1364,7 @@ struct nfs_commit_completion_ops {
 };
 
 struct nfs_commit_info {
-       spinlock_t                      *lock;
+       spinlock_t                      *lock;  /* inode->i_lock */
        struct nfs_mds_commit_info      *mds;
        struct pnfs_ds_commit_info      *ds;
        struct nfs_direct_req           *dreq;  /* O_DIRECT request */
index 19a5d4b23209302bc55cce74c12f69cbd91f260d..0adad4a5419b7cbd7560422de51f9abc97a755a0 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <uapi/linux/nvme.h>
 #include <linux/pci.h>
-#include <linux/miscdevice.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
 
@@ -62,8 +61,6 @@ enum {
        NVME_CSTS_SHST_MASK     = 3 << 2,
 };
 
-#define NVME_VS(major, minor)  (major << 16 | minor)
-
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT        (nvme_io_timeout * HZ)
 
@@ -91,9 +88,10 @@ struct nvme_dev {
        struct nvme_bar __iomem *bar;
        struct list_head namespaces;
        struct kref kref;
-       struct miscdevice miscdev;
+       struct device *device;
        work_func_t reset_workfn;
        struct work_struct reset_work;
+       struct work_struct probe_work;
        char name[12];
        char serial[20];
        char model[40];
@@ -105,7 +103,6 @@ struct nvme_dev {
        u16 abort_limit;
        u8 event_limit;
        u8 vwc;
-       u8 initialized;
 };
 
 /*
@@ -121,6 +118,7 @@ struct nvme_ns {
        unsigned ns_id;
        int lba_shift;
        int ms;
+       int pi_type;
        u64 mode_select_num_blocks;
        u32 mode_select_block_len;
 };
@@ -138,6 +136,7 @@ struct nvme_iod {
        int nents;              /* Used in scatterlist */
        int length;             /* Of data, in bytes */
        dma_addr_t first_dma;
+       struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
        struct scatterlist sg[0];
 };
 
index 8a860f096c351fa93267dabaebab8f3c7605ac18..611a691145c48d7c38c370daa63b047b5c68c17a 100644 (file)
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root,
 static inline void of_platform_depopulate(struct device *parent) { }
 #endif
 
-#ifdef CONFIG_OF_DYNAMIC
+#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
 extern void of_platform_register_reconfig_notifier(void);
 #else
 static inline void of_platform_register_reconfig_notifier(void) { }
index 72c0415d6c21758d31fcd2ab80c33751e41489d5..18eccefea06e40a0ea10f77e857105c6feaf4ff9 100644 (file)
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
 
 static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
 {
-       return ERR_PTR(-ENOSYS);
+       return NULL;
 }
 
 static inline void pinctrl_put(struct pinctrl *p)
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
                                                        struct pinctrl *p,
                                                        const char *name)
 {
-       return ERR_PTR(-ENOSYS);
+       return NULL;
 }
 
 static inline int pinctrl_select_state(struct pinctrl *p,
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p,
 
 static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
 {
-       return ERR_PTR(-ENOSYS);
+       return NULL;
 }
 
 static inline void devm_pinctrl_put(struct pinctrl *p)
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
new file mode 100644 (file)
index 0000000..9882937
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * board initialization should put one of these structures into platform_data
+ * and place the bfin-rotary onto platform_bus named "bfin-rotary".
+ *
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BFIN_ROTARY_H
+#define _BFIN_ROTARY_H
+
+/* mode bitmasks */
+#define ROT_QUAD_ENC   CNTMODE_QUADENC /* quadrature/grey code encoder mode */
+#define ROT_BIN_ENC    CNTMODE_BINENC  /* binary encoder mode */
+#define ROT_UD_CNT     CNTMODE_UDCNT   /* rotary counter mode */
+#define ROT_DIR_CNT    CNTMODE_DIRCNT  /* direction counter mode */
+
+#define ROT_DEBE       DEBE            /* Debounce Enable */
+
+#define ROT_CDGINV     CDGINV          /* CDG Pin Polarity Invert */
+#define ROT_CUDINV     CUDINV          /* CUD Pin Polarity Invert */
+#define ROT_CZMINV     CZMINV          /* CZM Pin Polarity Invert */
+
+struct bfin_rotary_platform_data {
+       /* set rotary UP KEY_### or BTN_### in case you prefer
+        * bfin-rotary to send EV_KEY otherwise set 0
+        */
+       unsigned int rotary_up_key;
+       /* set rotary DOWN KEY_### or BTN_### in case you prefer
+        * bfin-rotary to send EV_KEY otherwise set 0
+        */
+       unsigned int rotary_down_key;
+       /* set rotary BUTTON KEY_### or BTN_### */
+       unsigned int rotary_button_key;
+       /* set rotary Relative Axis REL_### in case you prefer
+        * bfin-rotary to send EV_REL otherwise set 0
+        */
+       unsigned int rotary_rel_code;
+       unsigned short debounce;        /* 0..17 */
+       unsigned short mode;
+       unsigned short pm_wakeup;
+       unsigned short *pin_list;
+};
+
+/* CNT_CONFIG bitmasks */
+#define CNTE           (1 << 0)        /* Counter Enable */
+#define DEBE           (1 << 1)        /* Debounce Enable */
+#define CDGINV         (1 << 4)        /* CDG Pin Polarity Invert */
+#define CUDINV         (1 << 5)        /* CUD Pin Polarity Invert */
+#define CZMINV         (1 << 6)        /* CZM Pin Polarity Invert */
+#define CNTMODE_SHIFT  8
+#define CNTMODE                (0x7 << CNTMODE_SHIFT)  /* Counter Operating Mode */
+#define ZMZC           (1 << 1)        /* CZM Zeroes Counter Enable */
+#define BNDMODE_SHIFT  12
+#define BNDMODE                (0x3 << BNDMODE_SHIFT)  /* Boundary register Mode */
+#define INPDIS         (1 << 15)       /* CUG and CDG Input Disable */
+
+#define CNTMODE_QUADENC        (0 << CNTMODE_SHIFT)    /* quadrature encoder mode */
+#define CNTMODE_BINENC (1 << CNTMODE_SHIFT)    /* binary encoder mode */
+#define CNTMODE_UDCNT  (2 << CNTMODE_SHIFT)    /* up/down counter mode */
+#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT)    /* direction counter mode */
+#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT)    /* direction timer mode */
+
+#define BNDMODE_COMP   (0 << BNDMODE_SHIFT)    /* boundary compare mode */
+#define BNDMODE_ZERO   (1 << BNDMODE_SHIFT)    /* boundary compare and zero mode */
+#define BNDMODE_CAPT   (2 << BNDMODE_SHIFT)    /* boundary capture mode */
+#define BNDMODE_AEXT   (3 << BNDMODE_SHIFT)    /* boundary auto-extend mode */
+
+/* CNT_IMASK bitmasks */
+#define ICIE           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Enable */
+#define UCIE           (1 << 1)        /* Up count Interrupt Enable */
+#define DCIE           (1 << 2)        /* Down count Interrupt Enable */
+#define MINCIE         (1 << 3)        /* Min Count Interrupt Enable */
+#define MAXCIE         (1 << 4)        /* Max Count Interrupt Enable */
+#define COV31IE                (1 << 5)        /* Bit 31 Overflow Interrupt Enable */
+#define COV15IE                (1 << 6)        /* Bit 15 Overflow Interrupt Enable */
+#define CZEROIE                (1 << 7)        /* Count to Zero Interrupt Enable */
+#define CZMIE          (1 << 8)        /* CZM Pin Interrupt Enable */
+#define CZMEIE         (1 << 9)        /* CZM Error Interrupt Enable */
+#define CZMZIE         (1 << 10)       /* CZM Zeroes Counter Interrupt Enable */
+
+/* CNT_STATUS bitmasks */
+#define ICII           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Identifier */
+#define UCII           (1 << 1)        /* Up count Interrupt Identifier */
+#define DCII           (1 << 2)        /* Down count Interrupt Identifier */
+#define MINCII         (1 << 3)        /* Min Count Interrupt Identifier */
+#define MAXCII         (1 << 4)        /* Max Count Interrupt Identifier */
+#define COV31II                (1 << 5)        /* Bit 31 Overflow Interrupt Identifier */
+#define COV15II                (1 << 6)        /* Bit 15 Overflow Interrupt Identifier */
+#define CZEROII                (1 << 7)        /* Count to Zero Interrupt Identifier */
+#define CZMII          (1 << 8)        /* CZM Pin Interrupt Identifier */
+#define CZMEII         (1 << 9)        /* CZM Error Interrupt Identifier */
+#define CZMZII         (1 << 10)       /* CZM Zeroes Counter Interrupt Identifier */
+
+/* CNT_COMMAND bitmasks */
+#define W1LCNT         0xf             /* Load Counter Register */
+#define W1LMIN         0xf0            /* Load Min Register */
+#define W1LMAX         0xf00           /* Load Max Register */
+#define W1ZMONCE       (1 << 12)       /* Enable CZM Clear Counter Once */
+
+#define W1LCNT_ZERO    (1 << 0)        /* write 1 to load CNT_COUNTER with zero */
+#define W1LCNT_MIN     (1 << 2)        /* write 1 to load CNT_COUNTER from CNT_MIN */
+#define W1LCNT_MAX     (1 << 3)        /* write 1 to load CNT_COUNTER from CNT_MAX */
+
+#define W1LMIN_ZERO    (1 << 4)        /* write 1 to load CNT_MIN with zero */
+#define W1LMIN_CNT     (1 << 5)        /* write 1 to load CNT_MIN from CNT_COUNTER */
+#define W1LMIN_MAX     (1 << 7)        /* write 1 to load CNT_MIN from CNT_MAX */
+
+#define W1LMAX_ZERO    (1 << 8)        /* write 1 to load CNT_MAX with zero */
+#define W1LMAX_CNT     (1 << 9)        /* write 1 to load CNT_MAX from CNT_COUNTER */
+#define W1LMAX_MIN     (1 << 10)       /* write 1 to load CNT_MAX from CNT_MIN */
+
+/* CNT_DEBOUNCE bitmasks */
+#define DPRESCALE      0xf             /* Load Counter Register */
+
+#endif
index 58851275fed98c352fdd4995e95f1ebe806649e7..d438eeb08bff407043b32d5f52f58d08fac8838f 100644 (file)
@@ -54,10 +54,11 @@ struct rhash_head {
  * @buckets: size * hash buckets
  */
 struct bucket_table {
-       size_t                          size;
-       unsigned int                    locks_mask;
-       spinlock_t                      *locks;
-       struct rhash_head __rcu         *buckets[];
+       size_t                  size;
+       unsigned int            locks_mask;
+       spinlock_t              *locks;
+
+       struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
@@ -78,12 +79,6 @@ struct rhashtable;
  * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
  * @hashfn: Function to hash key
  * @obj_hashfn: Function to hash object
- * @grow_decision: If defined, may return true if table should expand
- * @shrink_decision: If defined, may return true if table should shrink
- *
- * Note: when implementing the grow and shrink decision function, min/max
- * shift must be enforced, otherwise, resizing watermarks they set may be
- * useless.
  */
 struct rhashtable_params {
        size_t                  nelem_hint;
@@ -97,10 +92,6 @@ struct rhashtable_params {
        size_t                  locks_mul;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
-       bool                    (*grow_decision)(const struct rhashtable *ht,
-                                                size_t new_size);
-       bool                    (*shrink_decision)(const struct rhashtable *ht,
-                                                  size_t new_size);
 };
 
 /**
@@ -192,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
 
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
-
 int rhashtable_expand(struct rhashtable *ht);
 int rhashtable_shrink(struct rhashtable *ht);
 
index 41c60e5302d703525c1d99f259de9297ea1b36fd..6d77432e14ff971bffd4ca211dccb917768b2c8c 100644 (file)
@@ -363,9 +363,6 @@ extern void show_regs(struct pt_regs *);
  */
 extern void show_stack(struct task_struct *task, unsigned long *sp);
 
-void io_schedule(void);
-long io_schedule_timeout(long timeout);
-
 extern void cpu_init (void);
 extern void trap_init(void);
 extern void update_process_times(int user);
@@ -422,6 +419,13 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 
+extern long io_schedule_timeout(long timeout);
+
+static inline void io_schedule(void)
+{
+       io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
 struct nsproxy;
 struct user_namespace;
 
index baf3e1d08416faaf34edd51bca72c2801e7e7e06..d10965f0d8a4af6526a07046ba040ae9708279a4 100644 (file)
@@ -143,13 +143,13 @@ struct uart_port {
        unsigned char           iotype;                 /* io access style */
        unsigned char           unused1;
 
-#define UPIO_PORT              (0)                     /* 8b I/O port access */
-#define UPIO_HUB6              (1)                     /* Hub6 ISA card */
-#define UPIO_MEM               (2)                     /* 8b MMIO access */
-#define UPIO_MEM32             (3)                     /* 32b little endian */
-#define UPIO_MEM32BE           (4)                     /* 32b big endian */
-#define UPIO_AU                        (5)                     /* Au1x00 and RT288x type IO */
-#define UPIO_TSI               (6)                     /* Tsi108/109 type IO */
+#define UPIO_PORT              (SERIAL_IO_PORT)        /* 8b I/O port access */
+#define UPIO_HUB6              (SERIAL_IO_HUB6)        /* Hub6 ISA card */
+#define UPIO_MEM               (SERIAL_IO_MEM)         /* 8b MMIO access */
+#define UPIO_MEM32             (SERIAL_IO_MEM32)       /* 32b little endian */
+#define UPIO_AU                        (SERIAL_IO_AU)          /* Au1x00 and RT288x type IO */
+#define UPIO_TSI               (SERIAL_IO_TSI)         /* Tsi108/109 type IO */
+#define UPIO_MEM32BE           (SERIAL_IO_MEM32BE)     /* 32b big endian */
 
        unsigned int            read_status_mask;       /* driver specific */
        unsigned int            ignore_status_mask;     /* driver specific */
index 30007afe70b3541cdb4300919c2a5cfbbc0063f2..f54d6659713ae76e96391334dba700eac8be122f 100644 (file)
@@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
        to->l4_hash = from->l4_hash;
 };
 
+static inline void skb_sender_cpu_clear(struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+       skb->sender_cpu = 0;
+#endif
+}
+
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 {
index ed9489d893a487f250868f8de603c84e707feaf8..856d34dde79bc9d81faae4171fb0c5492829c133 100644 (file)
@@ -649,7 +649,7 @@ struct spi_transfer {
  * sequence completes.  On some systems, many such sequences can execute as
  * as single programmed DMA transfer.  On all systems, these messages are
  * queued, and might complete after transactions to other devices.  Messages
- * sent to a given spi_device are alway executed in FIFO order.
+ * sent to a given spi_device are always executed in FIFO order.
  *
  * The code that submits an spi_message (and its spi_transfers)
  * to the lower layers is responsible for managing its memory.
index 7e61a17030a4843bf0f80fda4dc2ae9f379409ef..694eecb2f1b5dffd8fe7f43c2979528ae2639042 100644 (file)
@@ -89,8 +89,11 @@ void                 rpc_free_iostats(struct rpc_iostats *);
 static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
 static inline void rpc_count_iostats(const struct rpc_task *task,
                                     struct rpc_iostats *stats) {}
-static inline void rpc_count_iostats_metrics(const struct rpc_task *,
-                                            struct rpc_iostats *) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
+                                            struct rpc_iostats *stats)
+{
+}
+
 static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
 static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
 
index fc52e307efab8768effbb7880702986653e0a07c..5eac316490eab9ca0ad1a864be2b6fedb823f332 100644 (file)
@@ -314,6 +314,8 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
 }
 
 #endif
+
+#if IS_ENABLED(CONFIG_THERMAL)
 struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
                void *, struct thermal_zone_device_ops *,
                const struct thermal_zone_params *, int, int);
@@ -340,8 +342,58 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
                struct thermal_cooling_device *, int);
 void thermal_cdev_update(struct thermal_cooling_device *);
 void thermal_notify_framework(struct thermal_zone_device *, int);
-
-#ifdef CONFIG_NET
+#else
+static inline struct thermal_zone_device *thermal_zone_device_register(
+       const char *type, int trips, int mask, void *devdata,
+       struct thermal_zone_device_ops *ops,
+       const struct thermal_zone_params *tzp,
+       int passive_delay, int polling_delay)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_zone_device_unregister(
+       struct thermal_zone_device *tz)
+{ }
+static inline int thermal_zone_bind_cooling_device(
+       struct thermal_zone_device *tz, int trip,
+       struct thermal_cooling_device *cdev,
+       unsigned long upper, unsigned long lower)
+{ return -ENODEV; }
+static inline int thermal_zone_unbind_cooling_device(
+       struct thermal_zone_device *tz, int trip,
+       struct thermal_cooling_device *cdev)
+{ return -ENODEV; }
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+{ }
+static inline struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+       const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+       char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cooling_device_unregister(
+       struct thermal_cooling_device *cdev)
+{ }
+static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
+               const char *name)
+{ return ERR_PTR(-ENODEV); }
+static inline int thermal_zone_get_temp(
+               struct thermal_zone_device *tz, unsigned long *temp)
+{ return -ENODEV; }
+static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
+{ return -ENODEV; }
+static inline struct thermal_instance *
+get_thermal_instance(struct thermal_zone_device *tz,
+       struct thermal_cooling_device *cdev, int trip)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{ }
+static inline void thermal_notify_framework(struct thermal_zone_device *tz,
+       int trip)
+{ }
+#endif /* CONFIG_THERMAL */
+
+#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
 extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
                                                enum events event);
 #else
index 07a022641996f0ccf74cd872ccd9e4bc8aa5f538..71880299ed487b68dc7b278248a4fb29ddb6b6ec 100644 (file)
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
 
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
+
 static inline size_t iov_iter_count(struct iov_iter *i)
 {
        return i->count;
index 9bb547c7bce7c7ce0942fdb4e313188d4878b371..704a1ab8240ca124f29c5ce361c871090d28ea5b 100644 (file)
@@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
  * @num_ports: the number of different ports this device will have.
  * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
  *     (0 = end-point size)
- * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer
- *     (0 = end-point size)
+ * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
  * @calc_num_ports: pointer to a function to determine how many ports this
  *     device has dynamically.  It will be called after the probe()
  *     callback is called, but before attach()
index d3204115f15d21dd7ef3d879df2393884795b037..2d67b8998fd8b49d877d65b0b94a022be47d4e28 100644 (file)
@@ -26,6 +26,7 @@
  * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
  *         operations documented below
  * @mmap: Perform mmap(2) on a region of the device file descriptor
+ * @request: Request for the bus driver to release the device
  */
 struct vfio_device_ops {
        char    *name;
@@ -38,6 +39,7 @@ struct vfio_device_ops {
        long    (*ioctl)(void *device_data, unsigned int cmd,
                         unsigned long arg);
        int     (*mmap)(void *device_data, struct vm_area_struct *vma);
+       void    (*request)(void *device_data, unsigned int count);
 };
 
 extern int vfio_add_group_dev(struct device *dev,
index 7d7acb35603d6d6e4bb8a468fcb635d666fea001..0ec598381f9766182db52f246afc2f0a5f28b36f 100644 (file)
@@ -17,6 +17,7 @@ struct vm_area_struct;                /* vma defining user mapping in mm_types.h */
 #define VM_VPAGES              0x00000010      /* buffer for pages was vmalloc'ed */
 #define VM_UNINITIALIZED       0x00000020      /* vm_struct is not fully initialized */
 #define VM_NO_GUARD            0x00000040      /* don't add guard page */
+#define VM_KASAN               0x00000080      /* has allocated kasan shadow memory */
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
index 74db135f99571e37f61e0aecc34d7b5ddba66756..f597846ff605cccaaf36d3cc4e6d603b630af156 100644 (file)
@@ -70,7 +70,8 @@ enum {
        /* data contains off-queue information when !WORK_STRUCT_PWQ */
        WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_COLOR_SHIFT,
 
-       WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
+       __WORK_OFFQ_CANCELING   = WORK_OFFQ_FLAG_BASE,
+       WORK_OFFQ_CANCELING     = (1 << __WORK_OFFQ_CANCELING),
 
        /*
         * When a work item is off queue, its high bits point to the last
index 1c1ad46250d5c9e0abb0910f1b7debaa6aaa9608..fe328c52c46bd179b651d6bbb14e58a89f017557 100644 (file)
@@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
  * @return    Checksum of buffer.
  */
 
-u16 cfpkt_iterate(struct cfpkt *pkt,
+int cfpkt_iterate(struct cfpkt *pkt,
                u16 (*iter_func)(u16 chks, void *buf, u16 len),
                u16 data);
 
index a8ae4e760778d8fe49ff21951bd1e088ac3aefca..0fb99a26e97372613da1851d9ee4d9bb432aba3f 100644 (file)
@@ -481,6 +481,7 @@ void dst_init(void);
 enum {
        XFRM_LOOKUP_ICMP = 1 << 0,
        XFRM_LOOKUP_QUEUE = 1 << 1,
+       XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
 };
 
 struct flowi;
index 9eaaa788458607004cb5f160e77c38de02da17ec..decb9a095ae7c4df5ace79f366762138c223958c 100644 (file)
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
                           const struct nft_data *data,
                           enum nft_data_types type);
 
+
+/**
+ *     struct nft_userdata - user defined data associated with an object
+ *
+ *     @len: length of the data
+ *     @data: content
+ *
+ *     The presence of user data is indicated in an object specific fashion,
+ *     so a length of zero can't occur and the value "len" indicates data
+ *     of length len + 1.
+ */
+struct nft_userdata {
+       u8                      len;
+       unsigned char           data[0];
+};
+
 /**
  *     struct nft_set_elem - generic representation of set elements
  *
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
  *     @handle: rule handle
  *     @genmask: generation mask
  *     @dlen: length of expression data
- *     @ulen: length of user data (used for comments)
+ *     @udata: user data is appended to the rule
  *     @data: expression data
  */
 struct nft_rule {
@@ -388,7 +404,7 @@ struct nft_rule {
        u64                             handle:42,
                                        genmask:2,
                                        dlen:12,
-                                       ulen:8;
+                                       udata:1;
        unsigned char                   data[]
                __attribute__((aligned(__alignof__(struct nft_expr))));
 };
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
        return (struct nft_expr *)&rule->data[rule->dlen];
 }
 
-static inline void *nft_userdata(const struct nft_rule *rule)
+static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
 {
        return (void *)&rule->data[rule->dlen];
 }
index eabd3a038674dd00f69263b47828134dd3a24bf1..c73e7abbbaa50e917e65a1117e881bd78665b12a 100644 (file)
@@ -91,6 +91,7 @@ struct vxlanhdr {
 
 #define VXLAN_N_VID     (1u << 24)
 #define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
 struct vxlan_metadata {
index 0210797abf2e95e88f67dd480ae24597325842d3..dc10c52e0e9199e7e878a87a714569106a955df3 100644 (file)
@@ -92,7 +92,7 @@
 #define                AT91_DDRSDRC_UPD_MR     (3 << 20)        /* Update load mode register and extended mode register */
 
 #define AT91_DDRSDRC_MDR       0x20    /* Memory Device Register */
-#define                AT91_DDRSDRC_MD         (3 << 0)                /* Memory Device Type */
+#define                AT91_DDRSDRC_MD         (7 << 0)        /* Memory Device Type */
 #define                        AT91_DDRSDRC_MD_SDR             0
 #define                        AT91_DDRSDRC_MD_LOW_POWER_SDR   1
 #define                        AT91_DDRSDRC_MD_LOW_POWER_DDR   3
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
new file mode 100644 (file)
index 0000000..d3583d3
--- /dev/null
@@ -0,0 +1,897 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION                 "v4.1.0"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT    2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT    2
+#define SECONDS_FOR_ASYNC_LOGOUT       10
+#define SECONDS_FOR_ASYNC_TEXT         10
+#define SECONDS_FOR_LOGOUT_COMP                15
+#define WHITE_SPACE                    " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS                        16
+#define ISCSIT_EXTRA_TAGS              8
+#define ISCSIT_TCP_BACKLOG             256
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT             3
+#define NA_DATAOUT_TIMEOUT_MAX         60
+#define NA_DATAOUT_TIMEOUT_MIX         2
+#define NA_DATAOUT_TIMEOUT_RETRIES     5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT               15
+#define NA_NOPIN_TIMEOUT_MAX           60
+#define NA_NOPIN_TIMEOUT_MIN           3
+#define NA_NOPIN_RESPONSE_TIMEOUT      30
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX  60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN  3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS   0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS   0
+#define NA_RANDOM_R2T_OFFSETS          0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION              1
+#define TA_LOGIN_TIMEOUT               15
+#define TA_LOGIN_TIMEOUT_MAX           30
+#define TA_LOGIN_TIMEOUT_MIN           5
+#define TA_NETIF_TIMEOUT               2
+#define TA_NETIF_TIMEOUT_MAX           15
+#define TA_NETIF_TIMEOUT_MIN           2
+#define TA_GENERATE_NODE_ACLS          0
+#define TA_DEFAULT_CMDSN_DEPTH         64
+#define TA_DEFAULT_CMDSN_DEPTH_MAX     512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN     1
+#define TA_CACHE_DYNAMIC_ACLS          0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT     1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT     0
+#define TA_DEMO_MODE_DISCOVERY         1
+#define TA_DEFAULT_ERL                 0
+#define TA_CACHE_CORE_NPS              0
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI              0
+
+#define ISCSI_IOV_DATA_BUFFER          5
+
+enum iscsit_transport_type {
+       ISCSI_TCP                               = 0,
+       ISCSI_SCTP_TCP                          = 1,
+       ISCSI_SCTP_UDP                          = 2,
+       ISCSI_IWARP_TCP                         = 3,
+       ISCSI_IWARP_SCTP                        = 4,
+       ISCSI_INFINIBAND                        = 5,
+};
+
+/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+       TARG_CONN_STATE_FREE                    = 0x1,
+       TARG_CONN_STATE_XPT_UP                  = 0x3,
+       TARG_CONN_STATE_IN_LOGIN                = 0x4,
+       TARG_CONN_STATE_LOGGED_IN               = 0x5,
+       TARG_CONN_STATE_IN_LOGOUT               = 0x6,
+       TARG_CONN_STATE_LOGOUT_REQUESTED        = 0x7,
+       TARG_CONN_STATE_CLEANUP_WAIT            = 0x8,
+};
+
+/* RFC-3720 7.3.2  Session State Diagram for a Target */
+enum target_sess_state_table {
+       TARG_SESS_STATE_FREE                    = 0x1,
+       TARG_SESS_STATE_ACTIVE                  = 0x2,
+       TARG_SESS_STATE_LOGGED_IN               = 0x3,
+       TARG_SESS_STATE_FAILED                  = 0x4,
+       TARG_SESS_STATE_IN_CONTINUE             = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+       ISCSI_RX_DATA   = 1,
+       ISCSI_TX_DATA   = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+       DATAIN_COMPLETE_NORMAL                  = 1,
+       DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+       DATAIN_COMPLETE_CONNECTION_RECOVERY     = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+       DATAIN_WITHIN_COMMAND_RECOVERY          = 1,
+       DATAIN_CONNECTION_RECOVERY              = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+       TPG_STATE_FREE                          = 0,
+       TPG_STATE_ACTIVE                        = 1,
+       TPG_STATE_INACTIVE                      = 2,
+       TPG_STATE_COLD_RESET                    = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+       TIQN_STATE_ACTIVE                       = 1,
+       TIQN_STATE_SHUTDOWN                     = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+       ICF_GOT_LAST_DATAOUT                    = 0x00000001,
+       ICF_GOT_DATACK_SNACK                    = 0x00000002,
+       ICF_NON_IMMEDIATE_UNSOLICITED_DATA      = 0x00000004,
+       ICF_SENT_LAST_R2T                       = 0x00000008,
+       ICF_WITHIN_COMMAND_RECOVERY             = 0x00000010,
+       ICF_CONTIG_MEMORY                       = 0x00000020,
+       ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
+       ICF_OOO_CMDSN                           = 0x00000080,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+       ISTATE_NO_STATE                 = 0,
+       ISTATE_NEW_CMD                  = 1,
+       ISTATE_DEFERRED_CMD             = 2,
+       ISTATE_UNSOLICITED_DATA         = 3,
+       ISTATE_RECEIVE_DATAOUT          = 4,
+       ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+       ISTATE_RECEIVED_LAST_DATAOUT    = 6,
+       ISTATE_WITHIN_DATAOUT_RECOVERY  = 7,
+       ISTATE_IN_CONNECTION_RECOVERY   = 8,
+       ISTATE_RECEIVED_TASKMGT         = 9,
+       ISTATE_SEND_ASYNCMSG            = 10,
+       ISTATE_SENT_ASYNCMSG            = 11,
+       ISTATE_SEND_DATAIN              = 12,
+       ISTATE_SEND_LAST_DATAIN         = 13,
+       ISTATE_SENT_LAST_DATAIN         = 14,
+       ISTATE_SEND_LOGOUTRSP           = 15,
+       ISTATE_SENT_LOGOUTRSP           = 16,
+       ISTATE_SEND_NOPIN               = 17,
+       ISTATE_SENT_NOPIN               = 18,
+       ISTATE_SEND_REJECT              = 19,
+       ISTATE_SENT_REJECT              = 20,
+       ISTATE_SEND_R2T                 = 21,
+       ISTATE_SENT_R2T                 = 22,
+       ISTATE_SEND_R2T_RECOVERY        = 23,
+       ISTATE_SENT_R2T_RECOVERY        = 24,
+       ISTATE_SEND_LAST_R2T            = 25,
+       ISTATE_SENT_LAST_R2T            = 26,
+       ISTATE_SEND_LAST_R2T_RECOVERY   = 27,
+       ISTATE_SENT_LAST_R2T_RECOVERY   = 28,
+       ISTATE_SEND_STATUS              = 29,
+       ISTATE_SEND_STATUS_BROKEN_PC    = 30,
+       ISTATE_SENT_STATUS              = 31,
+       ISTATE_SEND_STATUS_RECOVERY     = 32,
+       ISTATE_SENT_STATUS_RECOVERY     = 33,
+       ISTATE_SEND_TASKMGTRSP          = 34,
+       ISTATE_SENT_TASKMGTRSP          = 35,
+       ISTATE_SEND_TEXTRSP             = 36,
+       ISTATE_SENT_TEXTRSP             = 37,
+       ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+       ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+       ISTATE_SEND_NOPIN_NO_RESPONSE   = 40,
+       ISTATE_REMOVE                   = 41,
+       ISTATE_FREE                     = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+       CMDSN_ERROR_CANNOT_RECOVER      = -1,
+       CMDSN_NORMAL_OPERATION          = 0,
+       CMDSN_LOWER_THAN_EXP            = 1,
+       CMDSN_HIGHER_THAN_EXP           = 2,
+       CMDSN_MAXCMDSN_OVERRUN          = 3,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+       IMMEDIATE_DATA_CANNOT_RECOVER   = -1,
+       IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+       IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+       DATAOUT_CANNOT_RECOVER          = -1,
+       DATAOUT_NORMAL                  = 0,
+       DATAOUT_SEND_R2T                = 1,
+       DATAOUT_SEND_TO_TRANSPORT       = 2,
+       DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+       NAF_USERID_SET                  = 0x01,
+       NAF_PASSWORD_SET                = 0x02,
+       NAF_USERID_IN_SET               = 0x04,
+       NAF_PASSWORD_IN_SET             = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+       ISCSI_TF_RUNNING                = 0x01,
+       ISCSI_TF_STOP                   = 0x02,
+       ISCSI_TF_EXPIRED                = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+       NPF_IP_NETWORK          = 0x00,
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+       ISCSI_NP_THREAD_ACTIVE          = 1,
+       ISCSI_NP_THREAD_INACTIVE        = 2,
+       ISCSI_NP_THREAD_RESET           = 3,
+       ISCSI_NP_THREAD_SHUTDOWN        = 4,
+       ISCSI_NP_THREAD_EXIT            = 5,
+};
+
+struct iscsi_conn_ops {
+       u8      HeaderDigest;                   /* [0,1] == [None,CRC32C] */
+       u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
+       u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
+       u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
+       u8      OFMarker;                       /* [0,1] == [No,Yes] */
+       u8      IFMarker;                       /* [0,1] == [No,Yes] */
+       u32     OFMarkInt;                      /* [1..65535] */
+       u32     IFMarkInt;                      /* [1..65535] */
+       /*
+        * iSER specific connection parameters
+        */
+       u32     InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
+       u32     TargetRecvDataSegmentLength;    /* [512..2**24-1] */
+};
+
+struct iscsi_sess_ops {
+       char    InitiatorName[224];
+       char    InitiatorAlias[256];
+       char    TargetName[224];
+       char    TargetAlias[256];
+       char    TargetAddress[256];
+       u16     TargetPortalGroupTag;           /* [0..65535] */
+       u16     MaxConnections;                 /* [1..65535] */
+       u8      InitialR2T;                     /* [0,1] == [No,Yes] */
+       u8      ImmediateData;                  /* [0,1] == [No,Yes] */
+       u32     MaxBurstLength;                 /* [512..2**24-1] */
+       u32     FirstBurstLength;               /* [512..2**24-1] */
+       u16     DefaultTime2Wait;               /* [0..3600] */
+       u16     DefaultTime2Retain;             /* [0..3600] */
+       u16     MaxOutstandingR2T;              /* [1..65535] */
+       u8      DataPDUInOrder;                 /* [0,1] == [No,Yes] */
+       u8      DataSequenceInOrder;            /* [0,1] == [No,Yes] */
+       u8      ErrorRecoveryLevel;             /* [0..2] */
+       u8      SessionType;                    /* [0,1] == [Normal,Discovery]*/
+       /*
+        * iSER specific session parameters
+        */
+       u8      RDMAExtensions;                 /* [0,1] == [No,Yes] */
+};
+
+struct iscsi_queue_req {
+       int                     state;
+       struct iscsi_cmd        *cmd;
+       struct list_head        qr_list;
+};
+
+struct iscsi_data_count {
+       int                     data_length;
+       int                     sync_and_steering;
+       enum data_count_type    type;
+       u32                     iov_count;
+       u32                     ss_iov_count;
+       u32                     ss_marker_count;
+       struct kvec             *iov;
+};
+
+struct iscsi_param_list {
+       bool                    iser;
+       struct list_head        param_list;
+       struct list_head        extra_response_list;
+};
+
+struct iscsi_datain_req {
+       enum datain_req_comp_table dr_complete;
+       int                     generate_recovery_values;
+       enum datain_req_rec_table recovery;
+       u32                     begrun;
+       u32                     runlength;
+       u32                     data_length;
+       u32                     data_offset;
+       u32                     data_sn;
+       u32                     next_burst_len;
+       u32                     read_data_done;
+       u32                     seq_send_order;
+       struct list_head        cmd_datain_node;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+       u16                     cid;
+       u32                     batch_count;
+       u32                     cmdsn;
+       u32                     exp_cmdsn;
+       struct iscsi_cmd        *cmd;
+       struct list_head        ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+       u8                      flags;
+       u32                     data_sn;
+       u32                     length;
+       u32                     offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+       int                     seq_complete;
+       int                     recovery_r2t;
+       int                     sent_r2t;
+       u32                     r2t_sn;
+       u32                     offset;
+       u32                     targ_xfer_tag;
+       u32                     xfer_len;
+       struct list_head        r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+       enum iscsi_timer_flags_table dataout_timer_flags;
+       /* DataOUT timeout retries */
+       u8                      dataout_timeout_retries;
+       /* Within command recovery count */
+       u8                      error_recovery_count;
+       /* iSCSI dependent state for out or order CmdSNs */
+       enum cmd_i_state_table  deferred_i_state;
+       /* iSCSI dependent state */
+       enum cmd_i_state_table  i_state;
+       /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+       u8                      immediate_cmd;
+       /* Immediate data present */
+       u8                      immediate_data;
+       /* iSCSI Opcode */
+       u8                      iscsi_opcode;
+       /* iSCSI Response Code */
+       u8                      iscsi_response;
+       /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+       u8                      logout_reason;
+       /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+       u8                      logout_response;
+       /* MaxCmdSN has been incremented */
+       u8                      maxcmdsn_inc;
+       /* Immediate Unsolicited Dataout */
+       u8                      unsolicited_data;
+       /* Reject reason code */
+       u8                      reject_reason;
+       /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+       u16                     logout_cid;
+       /* Command flags */
+       enum cmd_flags_table    cmd_flags;
+       /* Initiator Task Tag assigned from Initiator */
+       itt_t                   init_task_tag;
+       /* Target Transfer Tag assigned from Target */
+       u32                     targ_xfer_tag;
+       /* CmdSN assigned from Initiator */
+       u32                     cmd_sn;
+       /* ExpStatSN assigned from Initiator */
+       u32                     exp_stat_sn;
+       /* StatSN assigned to this ITT */
+       u32                     stat_sn;
+       /* DataSN Counter */
+       u32                     data_sn;
+       /* R2TSN Counter */
+       u32                     r2t_sn;
+       /* Last DataSN acknowledged via DataAck SNACK */
+       u32                     acked_data_sn;
+       /* Used for echoing NOPOUT ping data */
+       u32                     buf_ptr_size;
+       /* Used to store DataDigest */
+       u32                     data_crc;
+       /* Counter for MaxOutstandingR2T */
+       u32                     outstanding_r2ts;
+       /* Next R2T Offset when DataSequenceInOrder=Yes */
+       u32                     r2t_offset;
+       /* Iovec current and orig count for iscsi_cmd->iov_data */
+       u32                     iov_data_count;
+       u32                     orig_iov_data_count;
+       /* Number of miscellaneous iovecs used for IP stack calls */
+       u32                     iov_misc_count;
+       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       u32                     pdu_count;
+       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+       u32                     pdu_send_order;
+       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       u32                     pdu_start;
+       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+       u32                     seq_send_order;
+       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+       u32                     seq_count;
+       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+       u32                     seq_no;
+       /* Lowest offset in current DataOUT sequence */
+       u32                     seq_start_offset;
+       /* Highest offset in current DataOUT sequence */
+       u32                     seq_end_offset;
+       /* Total size in bytes received so far of READ data */
+       u32                     read_data_done;
+       /* Total size in bytes received so far of WRITE data */
+       u32                     write_data_done;
+       /* Counter for FirstBurstLength key */
+       u32                     first_burst_len;
+       /* Counter for MaxBurstLength key */
+       u32                     next_burst_len;
+       /* Transfer size used for IP stack calls */
+       u32                     tx_size;
+       /* Buffer used for various purposes */
+       void                    *buf_ptr;
+       /* Used by SendTargets=[iqn.,eui.] discovery */
+       void                    *text_in_ptr;
+       /* See include/linux/dma-mapping.h */
+       enum dma_data_direction data_direction;
+       /* iSCSI PDU Header + CRC */
+       unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+       /* Number of times struct iscsi_cmd is present in immediate queue */
+       atomic_t                immed_queue_count;
+       atomic_t                response_queue_count;
+       spinlock_t              datain_lock;
+       spinlock_t              dataout_timeout_lock;
+       /* spinlock for protecting struct iscsi_cmd->i_state */
+       spinlock_t              istate_lock;
+       /* spinlock for adding within command recovery entries */
+       spinlock_t              error_lock;
+       /* spinlock for adding R2Ts */
+       spinlock_t              r2t_lock;
+       /* DataIN List */
+       struct list_head        datain_list;
+       /* R2T List */
+       struct list_head        cmd_r2t_list;
+       /* Timer for DataOUT */
+       struct timer_list       dataout_timer;
+       /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+       struct kvec             *iov_data;
+       /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS                      5
+       struct kvec             iov_misc[ISCSI_MISC_IOVECS];
+       /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+       struct iscsi_pdu        *pdu_list;
+       /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+       struct iscsi_pdu        *pdu_ptr;
+       /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+       struct iscsi_seq        *seq_list;
+       /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+       struct iscsi_seq        *seq_ptr;
+       /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+       struct iscsi_tmr_req    *tmr_req;
+       /* Connection this command is alligient to */
+       struct iscsi_conn       *conn;
+       /* Pointer to connection recovery entry */
+       struct iscsi_conn_recovery *cr;
+       /* Session the command is part of,  used for connection recovery */
+       struct iscsi_session    *sess;
+       /* list_head for connection list */
+       struct list_head        i_conn_node;
+       /* The TCM I/O descriptor that is accessed via container_of() */
+       struct se_cmd           se_cmd;
+       /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
+       unsigned char           sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+       u32                     padding;
+       u8                      pad_bytes[4];
+
+       struct scatterlist      *first_data_sg;
+       u32                     first_data_sg_off;
+       u32                     kmapped_nents;
+       sense_reason_t          sense_reason;
+}  ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+       bool                    task_reassign:1;
+       u32                     exp_data_sn;
+       struct iscsi_cmd        *ref_cmd;
+       struct iscsi_conn_recovery *conn_recovery;
+       struct se_tmr_req       *se_tmr_req;
+};
+
+struct iscsi_conn {
+       wait_queue_head_t       queues_wq;
+       /* Authentication Successful for this connection */
+       u8                      auth_complete;
+       /* State connection is currently in */
+       u8                      conn_state;
+       u8                      conn_logout_reason;
+       u8                      network_transport;
+       enum iscsi_timer_flags_table nopin_timer_flags;
+       enum iscsi_timer_flags_table nopin_response_timer_flags;
+       /* Used to know what thread encountered a transport failure */
+       u8                      which_thread;
+       /* connection id assigned by the Initiator */
+       u16                     cid;
+       /* Remote TCP Port */
+       u16                     login_port;
+       u16                     local_port;
+       int                     net_size;
+       int                     login_family;
+       u32                     auth_id;
+       u32                     conn_flags;
+       /* Used for iscsi_tx_login_rsp() */
+       itt_t                   login_itt;
+       u32                     exp_statsn;
+       /* Per connection status sequence number */
+       u32                     stat_sn;
+       /* IFMarkInt's Current Value */
+       u32                     if_marker;
+       /* OFMarkInt's Current Value */
+       u32                     of_marker;
+       /* Used for calculating OFMarker offset to next PDU */
+       u32                     of_marker_offset;
+#define IPV6_ADDRESS_SPACE                             48
+       unsigned char           login_ip[IPV6_ADDRESS_SPACE];
+       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
+       int                     conn_usage_count;
+       int                     conn_waiting_on_uc;
+       atomic_t                check_immediate_queue;
+       atomic_t                conn_logout_remove;
+       atomic_t                connection_exit;
+       atomic_t                connection_recovery;
+       atomic_t                connection_reinstatement;
+       atomic_t                connection_wait_rcfr;
+       atomic_t                sleep_on_conn_wait_comp;
+       atomic_t                transport_failed;
+       struct completion       conn_post_wait_comp;
+       struct completion       conn_wait_comp;
+       struct completion       conn_wait_rcfr_comp;
+       struct completion       conn_waiting_on_uc_comp;
+       struct completion       conn_logout_comp;
+       struct completion       tx_half_close_comp;
+       struct completion       rx_half_close_comp;
+       /* socket used by this connection */
+       struct socket           *sock;
+       void                    (*orig_data_ready)(struct sock *);
+       void                    (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE                1
+#define LOGIN_FLAGS_CLOSED             2
+#define LOGIN_FLAGS_READY              4
+       unsigned long           login_flags;
+       struct delayed_work     login_work;
+       struct delayed_work     login_cleanup_work;
+       struct iscsi_login      *login;
+       struct timer_list       nopin_timer;
+       struct timer_list       nopin_response_timer;
+       struct timer_list       transport_timer;
+       struct task_struct      *login_kworker;
+       /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+       spinlock_t              cmd_lock;
+       spinlock_t              conn_usage_lock;
+       spinlock_t              immed_queue_lock;
+       spinlock_t              nopin_timer_lock;
+       spinlock_t              response_queue_lock;
+       spinlock_t              state_lock;
+       /* libcrypto RX and TX contexts for crc32c */
+       struct hash_desc        conn_rx_hash;
+       struct hash_desc        conn_tx_hash;
+       /* Used for scheduling TX and RX connection kthreads */
+       cpumask_var_t           conn_cpumask;
+       unsigned int            conn_rx_reset_cpumask:1;
+       unsigned int            conn_tx_reset_cpumask:1;
+       /* list_head of struct iscsi_cmd for this connection */
+       struct list_head        conn_cmd_list;
+       struct list_head        immed_queue_list;
+       struct list_head        response_queue_list;
+       struct iscsi_conn_ops   *conn_ops;
+       struct iscsi_login      *conn_login;
+       struct iscsit_transport *conn_transport;
+       struct iscsi_param_list *param_list;
+       /* Used for per connection auth state machine */
+       void                    *auth_protocol;
+       void                    *context;
+       struct iscsi_login_thread_s *login_thread;
+       struct iscsi_portal_group *tpg;
+       struct iscsi_tpg_np     *tpg_np;
+       /* Pointer to parent session */
+       struct iscsi_session    *sess;
+       /* Pointer to thread_set in use for this conn's threads */
+       struct iscsi_thread_set *thread_set;
+       /* list_head for session connection list */
+       struct list_head        conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+       u16                     cid;
+       u32                     cmd_count;
+       u32                     maxrecvdatasegmentlength;
+       u32                     maxxmitdatasegmentlength;
+       int                     ready_for_reallegiance;
+       struct list_head        conn_recovery_cmd_list;
+       spinlock_t              conn_recovery_cmd_lock;
+       struct timer_list       time2retain_timer;
+       struct iscsi_session    *sess;
+       struct list_head        cr_list;
+}  ____cacheline_aligned;
+
+struct iscsi_session {
+       u8                      initiator_vendor;
+       u8                      isid[6];
+       enum iscsi_timer_flags_table time2retain_timer_flags;
+       u8                      version_active;
+       u16                     cid_called;
+       u16                     conn_recovery_count;
+       u16                     tsih;
+       /* state session is currently in */
+       u32                     session_state;
+       /* session wide counter: initiator assigned task tag */
+       itt_t                   init_task_tag;
+       /* session wide counter: target assigned task tag */
+       u32                     targ_xfer_tag;
+       u32                     cmdsn_window;
+
+       /* protects cmdsn values */
+       struct mutex            cmdsn_mutex;
+       /* session wide counter: expected command sequence number */
+       u32                     exp_cmd_sn;
+       /* session wide counter: maximum allowed command sequence number */
+       u32                     max_cmd_sn;
+       struct list_head        sess_ooo_cmdsn_list;
+
+       /* LIO specific session ID */
+       u32                     sid;
+       char                    auth_type[8];
+       /* unique within the target */
+       int                     session_index;
+       /* Used for session reference counting */
+       int                     session_usage_count;
+       int                     session_waiting_on_uc;
+       atomic_long_t           cmd_pdus;
+       atomic_long_t           rsp_pdus;
+       atomic_long_t           tx_data_octets;
+       atomic_long_t           rx_data_octets;
+       atomic_long_t           conn_digest_errors;
+       atomic_long_t           conn_timeout_errors;
+       u64                     creation_time;
+       /* Number of active connections */
+       atomic_t                nconn;
+       atomic_t                session_continuation;
+       atomic_t                session_fall_back_to_erl0;
+       atomic_t                session_logout;
+       atomic_t                session_reinstatement;
+       atomic_t                session_stop_active;
+       atomic_t                sleep_on_sess_wait_comp;
+       /* connection list */
+       struct list_head        sess_conn_list;
+       struct list_head        cr_active_list;
+       struct list_head        cr_inactive_list;
+       spinlock_t              conn_lock;
+       spinlock_t              cr_a_lock;
+       spinlock_t              cr_i_lock;
+       spinlock_t              session_usage_lock;
+       spinlock_t              ttt_lock;
+       struct completion       async_msg_comp;
+       struct completion       reinstatement_comp;
+       struct completion       session_wait_comp;
+       struct completion       session_waiting_on_uc_comp;
+       struct timer_list       time2retain_timer;
+       struct iscsi_sess_ops   *sess_ops;
+       struct se_session       *se_sess;
+       struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+       u8 auth_complete;
+       u8 checked_for_existing;
+       u8 current_stage;
+       u8 leading_connection;
+       u8 first_request;
+       u8 version_min;
+       u8 version_max;
+       u8 login_complete;
+       u8 login_failed;
+       bool zero_tsih;
+       char isid[6];
+       u32 cmd_sn;
+       itt_t init_task_tag;
+       u32 initial_exp_statsn;
+       u32 rsp_length;
+       u16 cid;
+       u16 tsih;
+       char req[ISCSI_HDR_LEN];
+       char rsp[ISCSI_HDR_LEN];
+       char *req_buf;
+       char *rsp_buf;
+       struct iscsi_conn *conn;
+       struct iscsi_np *np;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+       u32                     dataout_timeout;
+       u32                     dataout_timeout_retries;
+       u32                     default_erl;
+       u32                     nopin_timeout;
+       u32                     nopin_response_timeout;
+       u32                     random_datain_pdu_offsets;
+       u32                     random_datain_seq_offsets;
+       u32                     random_r2t_offsets;
+       u32                     tmr_cold_reset;
+       u32                     tmr_warm_reset;
+       struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+       enum naf_flags_table    naf_flags;
+       int                     authenticate_target;
+       /* Used for iscsit_global->discovery_auth,
+        * set to zero (auth disabled) by default */
+       int                     enforce_discovery_auth;
+#define MAX_USER_LEN                           256
+#define MAX_PASS_LEN                           256
+       char                    userid[MAX_USER_LEN];
+       char                    password[MAX_PASS_LEN];
+       char                    userid_mutual[MAX_USER_LEN];
+       char                    password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+       struct config_group     iscsi_sess_stats_group;
+       struct config_group     iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+       struct iscsi_node_attrib node_attrib;
+       struct iscsi_node_auth  node_auth;
+       struct iscsi_node_stat_grps node_stat_grps;
+       struct se_node_acl      se_node_acl;
+};
+
+struct iscsi_tpg_attrib {
+       u32                     authentication;
+       u32                     login_timeout;
+       u32                     netif_timeout;
+       u32                     generate_node_acls;
+       u32                     cache_dynamic_acls;
+       u32                     default_cmdsn_depth;
+       u32                     demo_mode_write_protect;
+       u32                     prod_mode_write_protect;
+       u32                     demo_mode_discovery;
+       u32                     default_erl;
+       u8                      t10_pi;
+       struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+       int                     np_network_transport;
+       int                     np_ip_proto;
+       int                     np_sock_type;
+       enum np_thread_state_table np_thread_state;
+       bool                    enabled;
+       enum iscsi_timer_flags_table np_login_timer_flags;
+       u32                     np_exports;
+       enum np_flags_table     np_flags;
+       unsigned char           np_ip[IPV6_ADDRESS_SPACE];
+       u16                     np_port;
+       spinlock_t              np_thread_lock;
+       struct completion       np_restart_comp;
+       struct socket           *np_socket;
+       struct __kernel_sockaddr_storage np_sockaddr;
+       struct task_struct      *np_thread;
+       struct timer_list       np_login_timer;
+       void                    *np_context;
+       struct iscsit_transport *np_transport;
+       struct list_head        np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+       struct iscsi_np         *tpg_np;
+       struct iscsi_portal_group *tpg;
+       struct iscsi_tpg_np     *tpg_np_parent;
+       struct list_head        tpg_np_list;
+       struct list_head        tpg_np_child_list;
+       struct list_head        tpg_np_parent_list;
+       struct se_tpg_np        se_tpg_np;
+       spinlock_t              tpg_np_parent_lock;
+       struct completion       tpg_np_comp;
+       struct kref             tpg_np_kref;
+};
+
+struct iscsi_portal_group {
+       unsigned char           tpg_chap_id;
+       /* TPG State */
+       enum tpg_state_table    tpg_state;
+       /* Target Portal Group Tag */
+       u16                     tpgt;
+       /* Id assigned to target sessions */
+       u16                     ntsih;
+       /* Number of active sessions */
+       u32                     nsessions;
+       /* Number of Network Portals available for this TPG */
+       u32                     num_tpg_nps;
+       /* Per TPG LIO specific session ID. */
+       u32                     sid;
+       /* Spinlock for adding/removing Network Portals */
+       spinlock_t              tpg_np_lock;
+       spinlock_t              tpg_state_lock;
+       struct se_portal_group tpg_se_tpg;
+       struct mutex            tpg_access_lock;
+       struct semaphore        np_login_sem;
+       struct iscsi_tpg_attrib tpg_attrib;
+       struct iscsi_node_auth  tpg_demo_auth;
+       /* Pointer to default list of iSCSI parameters for TPG */
+       struct iscsi_param_list *param_list;
+       struct iscsi_tiqn       *tpg_tiqn;
+       struct list_head        tpg_gnp_list;
+       struct list_head        tpg_list;
+} ____cacheline_aligned;
+
+struct iscsi_wwn_stat_grps {
+       struct config_group     iscsi_stat_group;
+       struct config_group     iscsi_instance_group;
+       struct config_group     iscsi_sess_err_group;
+       struct config_group     iscsi_tgt_attr_group;
+       struct config_group     iscsi_login_stats_group;
+       struct config_group     iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN                          224
+       unsigned char           tiqn[ISCSI_IQN_LEN];
+       enum tiqn_state_table   tiqn_state;
+       int                     tiqn_access_count;
+       u32                     tiqn_active_tpgs;
+       u32                     tiqn_ntpgs;
+       u32                     tiqn_num_tpg_nps;
+       u32                     tiqn_nsessions;
+       struct list_head        tiqn_list;
+       struct list_head        tiqn_tpg_list;
+       spinlock_t              tiqn_state_lock;
+       spinlock_t              tiqn_tpg_lock;
+       struct se_wwn           tiqn_wwn;
+       struct iscsi_wwn_stat_grps tiqn_stat_grps;
+       int                     tiqn_index;
+       struct iscsi_sess_err_stats  sess_err_stats;
+       struct iscsi_login_stats     login_stats;
+       struct iscsi_logout_stats    logout_stats;
+} ____cacheline_aligned;
+
+struct iscsit_global {
+       /* In core shutdown */
+       u32                     in_shutdown;
+       u32                     active_ts;
+       /* Unique identifier used for the authentication daemon */
+       u32                     auth_id;
+       u32                     inactive_ts;
+       /* Thread Set bitmap count */
+       int                     ts_bitmap_count;
+       /* Thread Set bitmap pointer */
+       unsigned long           *ts_bitmap;
+       /* Used for iSCSI discovery session authentication */
+       struct iscsi_node_acl   discovery_acl;
+       struct iscsi_portal_group       *discovery_tpg;
+};
+
+static inline u32 session_get_next_ttt(struct iscsi_session *session)
+{
+       u32 ttt;
+
+       spin_lock_bh(&session->ttt_lock);
+       ttt = session->targ_xfer_tag++;
+       if (ttt == 0xFFFFFFFF)
+               ttt = session->targ_xfer_tag++;
+       spin_unlock_bh(&session->ttt_lock);
+
+       return ttt;
+}
+
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
new file mode 100644 (file)
index 0000000..3ff76b4
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN         0
+#define ISCSI_SESS_ERR_DIGEST          1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT     2
+#define ISCSI_SESS_ERR_PDU_FORMAT      3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+       spinlock_t      lock;
+       u32             digest_errors;
+       u32             cxn_timeout_errors;
+       u32             pdu_format_errors;
+       u32             last_sess_failure_type;
+       char            last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER         2
+#define ISCSI_LOGIN_FAIL_REDIRECT      3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE     4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE  5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE     6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+       spinlock_t      lock;
+       u32             accepts;
+       u32             other_fails;
+       u32             redirects;
+       u32             authorize_fails;
+       u32             authenticate_fails;
+       u32             negotiate_fails;        /* used for notifications */
+       u64             last_fail_time;         /* time stamp (jiffies) */
+       u32             last_fail_type;
+       int             last_intr_fail_ip_family;
+       unsigned char   last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+       char            last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+       spinlock_t      lock;
+       u32             normal_logouts;
+       u32             abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif   /*** ISCSI_TARGET_STAT_H ***/
index daef9daa500c11f0ff7d92151fafbf9080a80e02..e6bb166f12c212aac238d0d951594cce65851145 100644 (file)
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 #include <linux/list.h>
-#include "../../../drivers/target/iscsi/iscsi_target_core.h"
+#include "iscsi_target_core.h"
 
 struct iscsit_transport {
 #define ISCSIT_TRANSPORT_NAME  16
index 4a8795a87b9e99f30ee07f43fdce3984ddc658e0..672150b6aaf52bc24c640d16f1f7c841e0be655d 100644 (file)
@@ -407,7 +407,7 @@ struct t10_reservation {
        /* Activate Persistence across Target Power Loss enabled
         * for SCSI device */
        int pr_aptpl_active;
-#define PR_APTPL_BUF_LEN                       8192
+#define PR_APTPL_BUF_LEN                       262144
        u32 pr_generation;
        spinlock_t registration_lock;
        spinlock_t aptpl_reg_lock;
index 26386cf3db444cbca7bc9e7138f8b0e01c0669b6..aef9a81b2d75b9524000f3b1b1bf858464e575e2 100644 (file)
@@ -115,7 +115,13 @@ struct nvme_id_ns {
        __le16                  nawun;
        __le16                  nawupf;
        __le16                  nacwu;
-       __u8                    rsvd40[80];
+       __le16                  nabsn;
+       __le16                  nabo;
+       __le16                  nabspf;
+       __u16                   rsvd46;
+       __le64                  nvmcap[2];
+       __u8                    rsvd64[40];
+       __u8                    nguid[16];
        __u8                    eui64[8];
        struct nvme_lbaf        lbaf[16];
        __u8                    rsvd192[192];
@@ -124,10 +130,22 @@ struct nvme_id_ns {
 
 enum {
        NVME_NS_FEAT_THIN       = 1 << 0,
+       NVME_NS_FLBAS_LBA_MASK  = 0xf,
+       NVME_NS_FLBAS_META_EXT  = 0x10,
        NVME_LBAF_RP_BEST       = 0,
        NVME_LBAF_RP_BETTER     = 1,
        NVME_LBAF_RP_GOOD       = 2,
        NVME_LBAF_RP_DEGRADED   = 3,
+       NVME_NS_DPC_PI_LAST     = 1 << 4,
+       NVME_NS_DPC_PI_FIRST    = 1 << 3,
+       NVME_NS_DPC_PI_TYPE3    = 1 << 2,
+       NVME_NS_DPC_PI_TYPE2    = 1 << 1,
+       NVME_NS_DPC_PI_TYPE1    = 1 << 0,
+       NVME_NS_DPS_PI_FIRST    = 1 << 3,
+       NVME_NS_DPS_PI_MASK     = 0x7,
+       NVME_NS_DPS_PI_TYPE1    = 1,
+       NVME_NS_DPS_PI_TYPE2    = 2,
+       NVME_NS_DPS_PI_TYPE3    = 3,
 };
 
 struct nvme_smart_log {
@@ -261,6 +279,10 @@ enum {
        NVME_RW_DSM_LATENCY_LOW         = 3 << 4,
        NVME_RW_DSM_SEQ_REQ             = 1 << 6,
        NVME_RW_DSM_COMPRESSED          = 1 << 7,
+       NVME_RW_PRINFO_PRCHK_REF        = 1 << 10,
+       NVME_RW_PRINFO_PRCHK_APP        = 1 << 11,
+       NVME_RW_PRINFO_PRCHK_GUARD      = 1 << 12,
+       NVME_RW_PRINFO_PRACT            = 1 << 13,
 };
 
 struct nvme_dsm_cmd {
@@ -549,6 +571,8 @@ struct nvme_passthru_cmd {
        __u32   result;
 };
 
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID          _IO('N', 0x40)
index 89f63503f903dd25f6c6a99594f7ace7dc5bac68..31891d9535e2a4ede364627a805d6d346fae8b9c 100644 (file)
@@ -185,4 +185,9 @@ struct prctl_mm_map {
 #define PR_MPX_ENABLE_MANAGEMENT  43
 #define PR_MPX_DISABLE_MANAGEMENT 44
 
+#define PR_SET_FP_MODE         45
+#define PR_GET_FP_MODE         46
+# define PR_FP_MODE_FR         (1 << 0)        /* 64b FP registers */
+# define PR_FP_MODE_FRE                (1 << 1)        /* 32b compatibility */
+
 #endif /* _LINUX_PRCTL_H */
index 5e0d0ed61cf3b76d2ee7cae605834d8139cf73ee..25331f9faa7682a88222d6db22a4213472219d06 100644 (file)
@@ -65,6 +65,10 @@ struct serial_struct {
 #define SERIAL_IO_PORT 0
 #define SERIAL_IO_HUB6 1
 #define SERIAL_IO_MEM  2
+#define SERIAL_IO_MEM32          3
+#define SERIAL_IO_AU     4
+#define SERIAL_IO_TSI    5
+#define SERIAL_IO_MEM32BE 6
 
 #define UART_CLEAR_FIFO                0x01
 #define UART_USE_FIFO          0x02
index 19d5219b0b991eda86a5bb8a0274d35a5a88ce17..242cf0c6e33d37f229a224839ca6f679bb142674 100644 (file)
@@ -9,3 +9,4 @@ header-y += tc_pedit.h
 header-y += tc_skbedit.h
 header-y += tc_vlan.h
 header-y += tc_bpf.h
+header-y += tc_connmark.h
index 29715d27548f21b20303861c24f56faeab835cc9..82889c30f4f5a79fb820c1dd20e1e8a0f99b4bf0 100644 (file)
@@ -333,6 +333,7 @@ enum {
        VFIO_PCI_MSI_IRQ_INDEX,
        VFIO_PCI_MSIX_IRQ_INDEX,
        VFIO_PCI_ERR_IRQ_INDEX,
+       VFIO_PCI_REQ_IRQ_INDEX,
        VFIO_PCI_NUM_IRQS
 };
 
index 3c53eec4ae22697ecb87522bf83de79f4e7e2b0e..19c66fcbab8afb1ee5239140fab657b4fe52ed67 100644 (file)
@@ -60,7 +60,7 @@ struct virtio_blk_config {
        __u32 size_max;
        /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
        __u32 seg_max;
-       /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */
+       /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
        struct virtio_blk_geometry {
                __u16 cylinders;
                __u8 heads;
@@ -119,7 +119,11 @@ struct virtio_blk_config {
 #define VIRTIO_BLK_T_BARRIER   0x80000000
 #endif /* !VIRTIO_BLK_NO_LEGACY */
 
-/* This is the first element of the read scatter-gather list. */
+/*
+ * This comes first in the read scatter-gather list.
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
+ * this is the first element of the read scatter-gather list.
+ */
 struct virtio_blk_outhdr {
        /* VIRTIO_BLK_T* */
        __virtio32 type;
index 42b9370771b014d4b959cd4c0c67a7bcc7eee41d..cc18ef8825c0eddb493a1d1b8bc618f5d3ce74e8 100644 (file)
 
 #include <linux/virtio_types.h>
 
-#define VIRTIO_SCSI_CDB_SIZE   32
-#define VIRTIO_SCSI_SENSE_SIZE 96
+/* Default values of the CDB and sense data size configuration fields */
+#define VIRTIO_SCSI_CDB_DEFAULT_SIZE   32
+#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96
+
+#ifndef VIRTIO_SCSI_CDB_SIZE
+#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE
+#endif
+#ifndef VIRTIO_SCSI_SENSE_SIZE
+#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE
+#endif
 
 /* SCSI command request, followed by data-out */
 struct virtio_scsi_cmd_req {
index 867cc5084afbfce8ab24cfd87d5f52cda93697de..b513e662d8e4999401f3c7366368bfb3c7900664 100644 (file)
@@ -90,6 +90,7 @@ enum {
 };
 
 enum {
+       IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
        IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
        __u8  reserved[4];
 };
 
+struct ib_uverbs_ex_query_device {
+       __u32 comp_mask;
+       __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+       __u64 general_caps;
+       struct {
+               __u32 rc_odp_caps;
+               __u32 uc_odp_caps;
+               __u32 ud_odp_caps;
+       } per_transport_caps;
+       __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+       struct ib_uverbs_query_device_resp base;
+       __u32 comp_mask;
+       __u32 response_length;
+       struct ib_uverbs_odp_caps odp_caps;
+};
+
 struct ib_uverbs_query_port {
        __u64 response;
        __u8  port_num;
index 60de61fea8e364069969faa9bc08a7f9e19c4433..c8ed15daad02d37c5a8eec0b3787dfabf8ad9d3f 100644 (file)
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops {
 };
 
 struct omap_dss_device {
+       struct kobject kobj;
        struct device *dev;
 
        struct module *owner;
index 7491ee5d81647d704a7d34ce17e107c7c374b64d..83338210ee045277b785e2af35b11b30b24c1e74 100644 (file)
@@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void)
 }
 #endif
 
+#ifdef CONFIG_PREEMPT
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+}
+
+#else
+
+DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+       __this_cpu_write(xen_in_preemptible_hcall, true);
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+       __this_cpu_write(xen_in_preemptible_hcall, false);
+}
+
+#endif /* CONFIG_PREEMPT */
+
 #endif /* INCLUDE_XEN_OPS_H */
index b78f21caf55aa074d3f883d0a0269f1c7d268339..b0f1c9e5d6878117c43a1b3e402e5f97ceb15657 100644 (file)
@@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
                                           const char *mod_name);
 
 #define xenbus_register_frontend(drv) \
-       __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+       __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME)
 #define xenbus_register_backend(drv) \
-       __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
+       __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME)
 
 void xenbus_unregister_driver(struct xenbus_driver *drv);
 
index 1d1fe9361d29882369f76e426b56ddd002582d2c..fc7f4748d34a9fe017bd9a42353db7a4095240d2 100644 (file)
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
 
        rcu_read_lock();
        cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
-               if (cp == root_cs)
-                       continue;
-
                /* skip the whole subtree if @cp doesn't have any CPU */
                if (cpumask_empty(cp->cpus_allowed)) {
                        pos_css = css_rightmost_descendant(pos_css);
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
                 * If it becomes empty, inherit the effective mask of the
                 * parent, which is guaranteed to have some CPUs.
                 */
-               if (cpumask_empty(new_cpus))
+               if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
                        cpumask_copy(new_cpus, parent->effective_cpus);
 
                /* Skip the whole subtree if the cpumask remains the same. */
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
                 * If it becomes empty, inherit the effective mask of the
                 * parent, which is guaranteed to have some MEMs.
                 */
-               if (nodes_empty(*new_mems))
+               if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
                        *new_mems = parent->effective_mems;
 
                /* Skip the whole subtree if the nodemask remains the same. */
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
 
        spin_lock_irq(&callback_lock);
        cs->mems_allowed = parent->mems_allowed;
+       cs->effective_mems = parent->mems_allowed;
        cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+       cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
        spin_unlock_irq(&callback_lock);
 out_unlock:
        mutex_unlock(&cpuset_mutex);
index 07ce18ca71e0cd46b70155269a77b04af23f6526..0874e2edd2756bcbe5542a496fb6b131b092b3ea 100644 (file)
@@ -604,7 +604,7 @@ return_normal:
                   online_cpus)
                cpu_relax();
        if (!time_left)
-               pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
+               pr_crit("Timed out waiting for secondary CPUs.\n");
 
        /*
         * At this point the primary processor is completely
@@ -696,6 +696,14 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
 
        if (arch_kgdb_ops.enable_nmi)
                arch_kgdb_ops.enable_nmi(0);
+       /*
+        * Avoid entering the debugger if we were triggered due to an oops
+        * but panic_timeout indicates the system should automatically
+        * reboot on panic. We don't want to get stuck waiting for input
+        * on such systems, especially if its "just" an oops.
+        */
+       if (signo != SIGTRAP && panic_timeout)
+               return 1;
 
        memset(ks, 0, sizeof(struct kgdb_state));
        ks->cpu                 = raw_smp_processor_id();
@@ -828,6 +836,15 @@ static int kgdb_panic_event(struct notifier_block *self,
                            unsigned long val,
                            void *data)
 {
+       /*
+        * Avoid entering the debugger if we were triggered due to a panic
+        * We don't want to get stuck waiting for input from user in such case.
+        * panic_timeout indicates the system should automatically
+        * reboot on panic.
+        */
+       if (panic_timeout)
+               return NOTIFY_DONE;
+
        if (dbg_kdb_mode)
                kdb_printf("PANIC: %s\n", (char *)data);
        kgdb_breakpoint();
index 7c70812caea5b3223fe1a9b7d9252980f90a02fe..fc1ef736253c79954686d018a2deca4c86300fa6 100644 (file)
@@ -439,7 +439,7 @@ poll_again:
  *     substituted for %d, %x or %o in the prompt.
  */
 
-char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
 {
        if (prompt && kdb_prompt_str != prompt)
                strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
@@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor)
        return 0;
 }
 
-int vkdb_printf(const char *fmt, va_list ap)
+int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 {
        int diag;
        int linecount;
@@ -680,6 +680,12 @@ int vkdb_printf(const char *fmt, va_list ap)
                        size_avail = sizeof(kdb_buffer) - len;
                        goto kdb_print_out;
                }
+               if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
+                       /*
+                        * This was a interactive search (using '/' at more
+                        * prompt) and it has completed. Clear the flag.
+                        */
+                       kdb_grepping_flag = 0;
                /*
                 * at this point the string is a full line and
                 * should be printed, up to the null.
@@ -691,19 +697,20 @@ kdb_printit:
         * Write to all consoles.
         */
        retlen = strlen(kdb_buffer);
+       cp = (char *) printk_skip_level(kdb_buffer);
        if (!dbg_kdb_mode && kgdb_connected) {
-               gdbstub_msg_write(kdb_buffer, retlen);
+               gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
        } else {
                if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = retlen;
-                       cp = kdb_buffer;
+                       len = retlen - (cp - kdb_buffer);
+                       cp2 = cp;
                        while (len--) {
-                               dbg_io_ops->write_char(*cp);
-                               cp++;
+                               dbg_io_ops->write_char(*cp2);
+                               cp2++;
                        }
                }
                while (c) {
-                       c->write(c, kdb_buffer, retlen);
+                       c->write(c, cp, retlen - (cp - kdb_buffer));
                        touch_nmi_watchdog();
                        c = c->next;
                }
@@ -711,7 +718,10 @@ kdb_printit:
        if (logging) {
                saved_loglevel = console_loglevel;
                console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-               printk(KERN_INFO "%s", kdb_buffer);
+               if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
+                       printk("%s", kdb_buffer);
+               else
+                       pr_info("%s", kdb_buffer);
        }
 
        if (KDB_STATE(PAGER)) {
@@ -794,11 +804,23 @@ kdb_printit:
                        kdb_nextline = linecount - 1;
                        kdb_printf("\r");
                        suspend_grep = 1; /* for this recursion */
+               } else if (buf1[0] == '/' && !kdb_grepping_flag) {
+                       kdb_printf("\r");
+                       kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
+                                  kdbgetenv("SEARCHPROMPT") ?: "search> ");
+                       *strchrnul(kdb_grep_string, '\n') = '\0';
+                       kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
+                       suspend_grep = 1; /* for this recursion */
                } else if (buf1[0] && buf1[0] != '\n') {
                        /* user hit something other than enter */
                        suspend_grep = 1; /* for this recursion */
-                       kdb_printf("\nOnly 'q' or 'Q' are processed at more "
-                                  "prompt, input ignored\n");
+                       if (buf1[0] != '/')
+                               kdb_printf(
+                                   "\nOnly 'q', 'Q' or '/' are processed at "
+                                   "more prompt, input ignored\n");
+                       else
+                               kdb_printf("\n'/' cannot be used during | "
+                                          "grep filtering, input ignored\n");
                } else if (kdb_grepping_flag) {
                        /* user hit enter */
                        suspend_grep = 1; /* for this recursion */
@@ -844,7 +866,7 @@ int kdb_printf(const char *fmt, ...)
        int r;
 
        va_start(ap, fmt);
-       r = vkdb_printf(fmt, ap);
+       r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
        va_end(ap);
 
        return r;
index 7b40c5f07dce8d09e1ebaba547e401b5655befbb..4121345498e0e48f10b414a4b12e0b5f15daeabd 100644 (file)
@@ -50,8 +50,7 @@
 static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
 module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
 
-#define GREP_LEN 256
-char kdb_grep_string[GREP_LEN];
+char kdb_grep_string[KDB_GREP_STRLEN];
 int kdb_grepping_flag;
 EXPORT_SYMBOL(kdb_grepping_flag);
 int kdb_grep_leading;
@@ -870,7 +869,7 @@ static void parse_grep(const char *str)
        len = strlen(cp);
        if (!len)
                return;
-       if (len >= GREP_LEN) {
+       if (len >= KDB_GREP_STRLEN) {
                kdb_printf("search string too long\n");
                return;
        }
@@ -915,13 +914,12 @@ int kdb_parse(const char *cmdstr)
        char *cp;
        char *cpp, quoted;
        kdbtab_t *tp;
-       int i, escaped, ignore_errors = 0, check_grep;
+       int i, escaped, ignore_errors = 0, check_grep = 0;
 
        /*
         * First tokenize the command string.
         */
        cp = (char *)cmdstr;
-       kdb_grepping_flag = check_grep = 0;
 
        if (KDB_FLAG(CMD_INTERRUPT)) {
                /* Previous command was interrupted, newline must not
@@ -1247,7 +1245,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                kdb_printf("due to NonMaskable Interrupt @ "
                           kdb_machreg_fmt "\n",
                           instruction_pointer(regs));
-               kdb_dumpregs(regs);
                break;
        case KDB_REASON_SSTEP:
        case KDB_REASON_BREAK:
@@ -1281,6 +1278,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                 */
                kdb_nextline = 1;
                KDB_STATE_CLEAR(SUPPRESS);
+               kdb_grepping_flag = 0;
+               /* ensure the old search does not leak into '/' commands */
+               kdb_grep_string[0] = '\0';
 
                cmdbuf = cmd_cur;
                *cmdbuf = '\0';
@@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
        /*
         * Validate cpunum
         */
-       if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
+       if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
                return KDB_BADCPUNUM;
 
        dbg_switch_cpu = cpunum;
@@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv)
 #define K(x) ((x) << (PAGE_SHIFT - 10))
        kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
                   "Buffers:        %8lu kB\n",
-                  val.totalram, val.freeram, val.bufferram);
+                  K(val.totalram), K(val.freeram), K(val.bufferram));
        return 0;
 }
 
index eaacd1693954b13aa55c59028c597b6ed91488f3..75014d7f45681b1a75643fd06dbc119e8ea16cb7 100644 (file)
@@ -196,7 +196,9 @@ extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
 
 /* Miscellaneous functions and data areas */
 extern int kdb_grepping_flag;
+#define KDB_GREPPING_FLAG_SEARCH 0x8000
 extern char kdb_grep_string[];
+#define KDB_GREP_STRLEN 256
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
@@ -209,7 +211,7 @@ extern void kdb_ps1(const struct task_struct *p);
 extern void kdb_print_nameval(const char *name, unsigned long val);
 extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
 extern void kdb_meminfo_proc_show(void);
-extern char *kdb_getstr(char *, size_t, char *);
+extern char *kdb_getstr(char *, size_t, const char *);
 extern void kdb_gdb_state_pass(char *buf);
 
 /* Defines for kdb_symbol_print */
index f04daabfd1cffb78856e03b634b9d7c914faf4d1..453ef61311d4cf069669fb449270a0700ae0f2f9 100644 (file)
@@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event)
        ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
        WARN_ON_ONCE(ctx->parent_ctx);
        perf_remove_from_context(event, true);
-       mutex_unlock(&ctx->mutex);
+       perf_event_ctx_unlock(event, ctx);
 
        _free_event(event);
 }
index 196a06fbc122fed81333c3bfd7205cfcef9aa73f..886d09e691d5a8d180826f2d5581c6bfb252d63c 100644 (file)
@@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
         * otherwise we'll have trouble later trying to figure out
         * which interrupt is which (messes up the interrupt freeing
         * logic etc).
+        *
+        * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
+        * it cannot be set along with IRQF_NO_SUSPEND.
         */
-       if ((irqflags & IRQF_SHARED) && !dev_id)
+       if (((irqflags & IRQF_SHARED) && !dev_id) ||
+           (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
+           ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
                return -EINVAL;
 
        desc = irq_to_desc(irq);
index 3ca5325927045572edfa7d3eebd79f01b2a4c29a..5204a6d1b9854feecfc2fff678e1d2b8eef33c42 100644 (file)
@@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
 
        if (action->flags & IRQF_NO_SUSPEND)
                desc->no_suspend_depth++;
+       else if (action->flags & IRQF_COND_SUSPEND)
+               desc->cond_suspend_depth++;
 
        WARN_ON_ONCE(desc->no_suspend_depth &&
-                    desc->no_suspend_depth != desc->nr_actions);
+                    (desc->no_suspend_depth +
+                       desc->cond_suspend_depth) != desc->nr_actions);
 }
 
 /*
@@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
 
        if (action->flags & IRQF_NO_SUSPEND)
                desc->no_suspend_depth--;
+       else if (action->flags & IRQF_COND_SUSPEND)
+               desc->cond_suspend_depth--;
 }
 
 static bool suspend_device_irq(struct irq_desc *desc, int irq)
index ff7f47d026ac48b21d6239f9db36ee8662585a45..3f9f1d6b4c2e5726217556a40454c46dbe368b89 100644 (file)
@@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj)
 /* sets obj->mod if object is not vmlinux and module is found */
 static void klp_find_object_module(struct klp_object *obj)
 {
+       struct module *mod;
+
        if (!klp_is_module(obj))
                return;
 
        mutex_lock(&module_mutex);
        /*
-        * We don't need to take a reference on the module here because we have
-        * the klp_mutex, which is also taken by the module notifier.  This
-        * prevents any module from unloading until we release the klp_mutex.
+        * We do not want to block removal of patched modules and therefore
+        * we do not take a reference here. The patches are removed by
+        * a going module handler instead.
+        */
+       mod = find_module(obj->name);
+       /*
+        * Do not mess work of the module coming and going notifiers.
+        * Note that the patch might still be needed before the going handler
+        * is called. Module functions can be called even in the GOING state
+        * until mod->exit() finishes. This is especially important for
+        * patches that modify semantic of the functions.
         */
-       obj->mod = find_module(obj->name);
+       if (mod && mod->klp_alive)
+               obj->mod = mod;
+
        mutex_unlock(&module_mutex);
 }
 
@@ -248,11 +260,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name,
        /* first, check if it's an exported symbol */
        preempt_disable();
        sym = find_symbol(name, NULL, NULL, true, true);
-       preempt_enable();
        if (sym) {
                *addr = sym->value;
+               preempt_enable();
                return 0;
        }
+       preempt_enable();
 
        /* otherwise check if it's in another .o within the patch module */
        return klp_find_object_symbol(pmod->name, name, addr);
@@ -314,12 +327,12 @@ static void notrace klp_ftrace_handler(unsigned long ip,
        rcu_read_lock();
        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
                                      stack_node);
-       rcu_read_unlock();
-
        if (WARN_ON_ONCE(!func))
-               return;
+               goto unlock;
 
        klp_arch_set_pc(regs, (unsigned long)func->new_func);
+unlock:
+       rcu_read_unlock();
 }
 
 static int klp_disable_func(struct klp_func *func)
@@ -731,7 +744,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
        func->state = KLP_DISABLED;
 
        return kobject_init_and_add(&func->kobj, &klp_ktype_func,
-                                   obj->kobj, func->old_name);
+                                   obj->kobj, "%s", func->old_name);
 }
 
 /* parts of the initialization that is done only when the object is loaded */
@@ -766,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
                return -EINVAL;
 
        obj->state = KLP_DISABLED;
+       obj->mod = NULL;
 
        klp_find_object_module(obj);
 
@@ -807,7 +821,7 @@ static int klp_init_patch(struct klp_patch *patch)
        patch->state = KLP_DISABLED;
 
        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
-                                  klp_root_kobj, patch->mod->name);
+                                  klp_root_kobj, "%s", patch->mod->name);
        if (ret)
                goto unlock;
 
@@ -960,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action,
 
        mutex_lock(&klp_mutex);
 
+       /*
+        * Each module has to know that the notifier has been called.
+        * We never know what module will get patched by a new patch.
+        */
+       if (action == MODULE_STATE_COMING)
+               mod->klp_alive = true;
+       else /* MODULE_STATE_GOING */
+               mod->klp_alive = false;
+
        list_for_each_entry(patch, &klp_patches, list) {
                for (obj = patch->objs; obj->funcs; obj++) {
                        if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
index 3059bc2f022daa6e4d8d976c39a7d8a8f546d813..6357265a31ad1a34b881aba31abe27ab6d3921ec 100644 (file)
@@ -1193,7 +1193,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
        if (unlikely(ret)) {
-               remove_waiter(lock, &waiter);
+               __set_current_state(TASK_RUNNING);
+               if (rt_mutex_has_waiters(lock))
+                       remove_waiter(lock, &waiter);
                rt_mutex_handle_deadlock(ret, chwalk, &waiter);
        }
 
index b34813f725e970fa79b97e625fab6c568543aaf8..b3d634ed06c94f1b2ce986293d0cda84078d4a46 100644 (file)
@@ -56,7 +56,6 @@
 #include <linux/async.h>
 #include <linux/percpu.h>
 #include <linux/kmemleak.h>
-#include <linux/kasan.h>
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { }
 void __weak module_memfree(void *module_region)
 {
        vfree(module_region);
-       kasan_module_free(module_region);
 }
 
 void __weak module_arch_cleanup(struct module *mod)
@@ -2313,11 +2311,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
        info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
        info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
        mod->core_size += strtab_size;
+       mod->core_size = debug_align(mod->core_size);
 
        /* Put string table section at end of init part of module. */
        strsect->sh_flags |= SHF_ALLOC;
        strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
                                         info->index.str) | INIT_OFFSET_MASK;
+       mod->init_size = debug_align(mod->init_size);
        pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
 }
 
index cbd69d842341175e4c6db7c68a4b6e3689f5c928..2ca4a8b5fe57960c584d74e7dabf76b22bc71613 100644 (file)
@@ -3,7 +3,7 @@
 
 struct console_cmdline
 {
-       char    name[8];                        /* Name of the driver       */
+       char    name[16];                       /* Name of the driver       */
        int     index;                          /* Minor dev. to use        */
        char    *options;                       /* Options for the driver   */
 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
index c06df7de0963a3c82889c274dd2d707507d31d50..bb0635bd74f26a2ecb9f651de9e0c4113e4f2476 100644 (file)
@@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args)
 
 #ifdef CONFIG_KGDB_KDB
        if (unlikely(kdb_trap_printk)) {
-               r = vkdb_printf(fmt, args);
+               r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
                return r;
        }
 #endif
@@ -2464,6 +2464,7 @@ void register_console(struct console *newcon)
        for (i = 0, c = console_cmdline;
             i < MAX_CMDLINECONSOLES && c->name[0];
             i++, c++) {
+               BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
                if (strcmp(c->name, newcon->name) != 0)
                        continue;
                if (newcon->index >= 0 &&
index 0d7bbe3095ad717c369b142079f16236a58e1af7..0a571e9a0f1d00868c74c5032912c1a0b24dce94 100644 (file)
@@ -326,6 +326,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        special = t->rcu_read_unlock_special;
        if (special.b.need_qs) {
                rcu_preempt_qs();
+               t->rcu_read_unlock_special.b.need_qs = false;
                if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
                        return;
index 8a2e230fb86ad43e3196488961f2b71d1e8b28ed..eae160dd669d9d8d58bb911595c6391b2732edb4 100644 (file)
@@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
         * so we don't have to move tasks around upon policy change,
         * or flail around trying to allocate bandwidth on the fly.
         * A bandwidth exception in __sched_setscheduler() allows
-        * the policy change to proceed.  Thereafter, task_group()
-        * returns &root_task_group, so zero bandwidth is required.
+        * the policy change to proceed.
         */
        free_rt_sched_group(tg);
        tg->rt_se = root_task_group.rt_se;
@@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
        if (tg != &root_task_group)
                return false;
 
-       if (p->sched_class != &fair_sched_class)
-               return false;
-
        /*
         * We can only assume the task group can't go away on us if
         * autogroup_move_group() can see us on ->thread_group list.
index 7052d3fd4e7bd87a29bd144cbca1086621040251..8d0f35debf35657689908a4b37df7230ba7d6710 100644 (file)
@@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x)
         * first without taking the lock so we can
         * return early in the blocking case.
         */
-       if (!ACCESS_ONCE(x->done))
+       if (!READ_ONCE(x->done))
                return 0;
 
        spin_lock_irqsave(&x->wait.lock, flags);
@@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
-       return !!ACCESS_ONCE(x->done);
+       if (!READ_ONCE(x->done))
+               return false;
+
+       /*
+        * If ->done, we need to wait for complete() to release ->wait.lock
+        * otherwise we can end up freeing the completion before complete()
+        * is done referencing it.
+        *
+        * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
+        * the loads of ->done and ->wait.lock such that we cannot observe
+        * the lock before complete() acquires it while observing the ->done
+        * after it's acquired the lock.
+        */
+       smp_rmb();
+       spin_unlock_wait(&x->wait.lock);
+       return true;
 }
 EXPORT_SYMBOL(completion_done);
index 13049aac05a6242e44a59b9ec424f1004448b3d4..f0f831e8a345d835f4cb21bf899c50ab67042b43 100644 (file)
@@ -306,66 +306,6 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-
-               while (unlikely(task_on_rq_migrating(p)))
-                       cpu_relax();
-       }
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-               while (unlikely(task_on_rq_migrating(p)))
-                       cpu_relax();
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -2899,7 +2839,7 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
-static void preempt_schedule_common(void)
+static void __sched notrace preempt_schedule_common(void)
 {
        do {
                __preempt_count_add(PREEMPT_ACTIVE);
@@ -4418,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to);
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-       struct rq *rq = raw_rq();
+       int old_iowait = current->in_iowait;
+       struct rq *rq;
        long ret;
 
+       current->in_iowait = 1;
+       if (old_iowait)
+               blk_schedule_flush_plug(current);
+       else
+               blk_flush_plug(current);
+
        delayacct_blkio_start();
+       rq = raw_rq();
        atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
+       current->in_iowait = old_iowait;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
+
        return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
@@ -7642,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
+       /*
+        * Autogroups do not have RT tasks; see autogroup_create().
+        */
+       if (task_group_is_autogroup(tg))
+               return 0;
+
        for_each_process_thread(g, p) {
                if (rt_task(p) && task_group(p) == tg)
                        return 1;
@@ -7734,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 {
        int i, err = 0;
 
+       /*
+        * Disallowing the root group RT runtime is BAD, it would disallow the
+        * kernel creating (and or operating) RT threads.
+        */
+       if (tg == &root_task_group && rt_runtime == 0)
+               return -EINVAL;
+
+       /* No period doesn't make any sense. */
+       if (rt_period == 0)
+               return -EINVAL;
+
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7790,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
        rt_period = (u64)rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-       if (rt_period == 0)
-               return -EINVAL;
-
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
index a027799ae130d3623ff4351f08c3cf456979bfbc..3fa8fa6d940300c1fbae721503aad2666f72b4e5 100644 (file)
@@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                                                     struct sched_dl_entity,
                                                     dl_timer);
        struct task_struct *p = dl_task_of(dl_se);
+       unsigned long flags;
        struct rq *rq;
-again:
-       rq = task_rq(p);
-       raw_spin_lock(&rq->lock);
 
-       if (rq != task_rq(p)) {
-               /* Task was moved, retrying. */
-               raw_spin_unlock(&rq->lock);
-               goto again;
-       }
+       rq = task_rq_lock(current, &flags);
 
        /*
         * We need to take care of several possible races here:
@@ -541,6 +535,26 @@ again:
 
        sched_clock_tick();
        update_rq_clock(rq);
+
+       /*
+        * If the throttle happened during sched-out; like:
+        *
+        *   schedule()
+        *     deactivate_task()
+        *       dequeue_task_dl()
+        *         update_curr_dl()
+        *           start_dl_timer()
+        *         __dequeue_task_dl()
+        *     prev->on_rq = 0;
+        *
+        * We can be both throttled and !queued. Replenish the counter
+        * but do not enqueue -- wait for our wakeup to do that.
+        */
+       if (!task_on_rq_queued(p)) {
+               replenish_dl_entity(dl_se, dl_se);
+               goto unlock;
+       }
+
        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
        if (dl_task(rq->curr))
                check_preempt_curr_dl(rq, p, 0);
@@ -555,7 +569,7 @@ again:
                push_dl_task(rq);
 #endif
 unlock:
-       raw_spin_unlock(&rq->lock);
+       task_rq_unlock(rq, current, &flags);
 
        return HRTIMER_NORESTART;
 }
@@ -898,6 +912,7 @@ static void yield_task_dl(struct rq *rq)
                rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
+       update_rq_clock(rq);
        update_curr_dl(rq);
 }
 
index 94b2d7b88a272856bf0f49ac0f0bdce430a5255d..80014a17834214fcad51add08b2b171463e84128 100644 (file)
@@ -82,6 +82,7 @@ static void cpuidle_idle_call(void)
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
        unsigned int broadcast;
+       bool reflect;
 
        /*
         * Check if the idle task must be rescheduled. If it is the
@@ -105,6 +106,9 @@ static void cpuidle_idle_call(void)
         */
        rcu_idle_enter();
 
+       if (cpuidle_not_available(drv, dev))
+               goto use_default;
+
        /*
         * Suspend-to-idle ("freeze") is a system state in which all user space
         * has been frozen, all I/O devices have been suspended and the only
@@ -115,30 +119,24 @@ static void cpuidle_idle_call(void)
         * until a proper wakeup interrupt happens.
         */
        if (idle_should_freeze()) {
-               cpuidle_enter_freeze();
-               local_irq_enable();
-               goto exit_idle;
-       }
+               entered_state = cpuidle_enter_freeze(drv, dev);
+               if (entered_state >= 0) {
+                       local_irq_enable();
+                       goto exit_idle;
+               }
 
-       /*
-        * Ask the cpuidle framework to choose a convenient idle state.
-        * Fall back to the default arch idle method on errors.
-        */
-       next_state = cpuidle_select(drv, dev);
-       if (next_state < 0) {
-use_default:
+               reflect = false;
+               next_state = cpuidle_find_deepest_state(drv, dev);
+       } else {
+               reflect = true;
                /*
-                * We can't use the cpuidle framework, let's use the default
-                * idle routine.
+                * Ask the cpuidle framework to choose a convenient idle state.
                 */
-               if (current_clr_polling_and_test())
-                       local_irq_enable();
-               else
-                       arch_cpu_idle();
-
-               goto exit_idle;
+               next_state = cpuidle_select(drv, dev);
        }
-
+       /* Fall back to the default arch idle method on errors. */
+       if (next_state < 0)
+               goto use_default;
 
        /*
         * The idle task must be scheduled, it is pointless to
@@ -183,7 +181,8 @@ use_default:
        /*
         * Give the governor an opportunity to reflect on the outcome
         */
-       cpuidle_reflect(dev, entered_state);
+       if (reflect)
+               cpuidle_reflect(dev, entered_state);
 
 exit_idle:
        __current_set_polling();
@@ -196,6 +195,19 @@ exit_idle:
 
        rcu_idle_exit();
        start_critical_timings();
+       return;
+
+use_default:
+       /*
+        * We can't use the cpuidle framework, let's use the default
+        * idle routine.
+        */
+       if (current_clr_polling_and_test())
+               local_irq_enable();
+       else
+               arch_cpu_idle();
+
+       goto exit_idle;
 }
 
 /*
index 0870db23d79cb3c0578b4f4b3450f5dad02c929e..dc0f435a27794657258623ac8a7f53f7326ff7ac 100644 (file)
@@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { }
 
 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       lockdep_assert_held(&p->pi_lock);
+
+       for (;;) {
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
+       }
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+       __acquires(p->pi_lock)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       for (;;) {
+               raw_spin_lock_irqsave(&p->pi_lock, *flags);
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               /*
+                *      move_queued_task()              task_rq_lock()
+                *
+                *      ACQUIRE (rq->lock)
+                *      [S] ->on_rq = MIGRATING         [L] rq = task_rq()
+                *      WMB (__set_task_cpu())          ACQUIRE (rq->lock);
+                *      [S] ->cpu = new_cpu             [L] task_rq()
+                *                                      [L] ->on_rq
+                *      RELEASE (rq->lock)
+                *
+                * If we observe the old cpu in task_rq_lock, the acquire of
+                * the old rq->lock will fully serialize against the stores.
+                *
+                * If we observe the new cpu in task_rq_lock, the acquire will
+                * pair with the WMB to ensure we must then also see migrating.
+                */
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
+       }
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+       __releases(rq->lock)
+{
+       raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+       __releases(rq->lock)
+       __releases(p->pi_lock)
+{
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
index ea9c881098941ecd9bbb42fa69a3ddb958d2ec41..a03d9cd23ed779b2a38d8bf564bac6860d934f2f 100644 (file)
 #ifndef MPX_DISABLE_MANAGEMENT
 # define MPX_DISABLE_MANAGEMENT(a)     (-EINVAL)
 #endif
+#ifndef GET_FP_MODE
+# define GET_FP_MODE(a)                (-EINVAL)
+#endif
+#ifndef SET_FP_MODE
+# define SET_FP_MODE(a,b)      (-EINVAL)
+#endif
 
 /*
  * this is where the system-wide overflow UID and GID are defined, for
@@ -1102,6 +1108,7 @@ DECLARE_RWSEM(uts_sem);
 /*
  * Work around broken programs that cannot handle "Linux 3.0".
  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+ * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
  */
 static int override_release(char __user *release, size_t len)
 {
@@ -1121,7 +1128,7 @@ static int override_release(char __user *release, size_t len)
                                break;
                        rest++;
                }
-               v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+               v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
                copy = clamp_t(size_t, len, 1, sizeof(buf));
                copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
                ret = copy_to_user(release, buf, copy + 1);
@@ -2219,6 +2226,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        return -EINVAL;
                error = MPX_DISABLE_MANAGEMENT(me);
                break;
+       case PR_SET_FP_MODE:
+               error = SET_FP_MODE(me, arg2);
+               break;
+       case PR_GET_FP_MODE:
+               error = GET_FP_MODE(me);
+               break;
        default:
                error = -EINVAL;
                break;
index 4b585e0fdd22e16288f688baa1051395836461d5..0f60b08a4f073e9246ced1dc3b5de5f50efd7cf4 100644 (file)
@@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
        if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
                return -EPERM;
 
-       if (txc->modes & ADJ_FREQUENCY) {
-               if (LONG_MIN / PPM_SCALE > txc->freq)
+       /*
+        * Check for potential multiplication overflows that can
+        * only happen on 64-bit systems:
+        */
+       if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
+               if (LLONG_MIN / PPM_SCALE > txc->freq)
                        return -EINVAL;
-               if (LONG_MAX / PPM_SCALE < txc->freq)
+               if (LLONG_MAX / PPM_SCALE < txc->freq)
                        return -EINVAL;
        }
 
index 45e5cb143d173d979576689dbc8e7a66703eee06..4f228024055b119d93279705ec4c42d7475c72f7 100644 (file)
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_graph_active;
+#else
+# define ftrace_graph_active 0
+#endif
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static struct ftrace_ops *removed_ops;
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
                if (!ftrace_rec_count(rec))
                        rec->flags = 0;
                else
-                       /* Just disable the record (keep REGS state) */
-                       rec->flags &= ~FTRACE_FL_ENABLED;
+                       /*
+                        * Just disable the record, but keep the ops TRAMP
+                        * and REGS states. The _EN flags must be disabled though.
+                        */
+                       rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
+                                       FTRACE_FL_REGS_EN);
        }
 
        return FTRACE_UPDATE_MAKE_NOP;
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 
 static void ftrace_startup_sysctl(void)
 {
+       int command;
+
        if (unlikely(ftrace_disabled))
                return;
 
        /* Force update next time */
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
-       if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+       if (ftrace_start_up) {
+               command = FTRACE_UPDATE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_START_FUNC_RET;
+               ftrace_startup_enable(command);
+       }
 }
 
 static void ftrace_shutdown_sysctl(void)
 {
+       int command;
+
        if (unlikely(ftrace_disabled))
                return;
 
        /* ftrace_start_up is true if ftrace is running */
-       if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_DISABLE_CALLS);
+       if (ftrace_start_up) {
+               command = FTRACE_DISABLE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_STOP_FUNC_RET;
+               ftrace_run_update_code(command);
+       }
 }
 
 static cycle_t         ftrace_update_time;
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
        if (ftrace_enabled) {
 
-               ftrace_startup_sysctl();
-
                /* we are starting ftrace again */
                if (ftrace_ops_list != &ftrace_list_end)
                        update_ftrace_function();
 
+               ftrace_startup_sysctl();
+
        } else {
                /* stopping ftrace calls (just send to ftrace_stub) */
                ftrace_trace_function = ftrace_stub;
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = {
        ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
 };
 
-static int ftrace_graph_active;
-
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
        return 0;
index f2884939479109f349b6291a82ee4cc990aa1c32..41ff75b478c60b443cd80626351b1e3f3030f94b 100644 (file)
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
+struct cwt_wait {
+       wait_queue_t            wait;
+       struct work_struct      *work;
+};
+
+static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+       struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
+
+       if (cwait->work != key)
+               return 0;
+       return autoremove_wake_function(wait, mode, sync, key);
+}
+
 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 {
+       static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
        unsigned long flags;
        int ret;
 
        do {
                ret = try_to_grab_pending(work, is_dwork, &flags);
                /*
-                * If someone else is canceling, wait for the same event it
-                * would be waiting for before retrying.
+                * If someone else is already canceling, wait for it to
+                * finish.  flush_work() doesn't work for PREEMPT_NONE
+                * because we may get scheduled between @work's completion
+                * and the other canceling task resuming and clearing
+                * CANCELING - flush_work() will return false immediately
+                * as @work is no longer busy, try_to_grab_pending() will
+                * return -ENOENT as @work is still being canceled and the
+                * other canceling task won't be able to clear CANCELING as
+                * we're hogging the CPU.
+                *
+                * Let's wait for completion using a waitqueue.  As this
+                * may lead to the thundering herd problem, use a custom
+                * wake function which matches @work along with exclusive
+                * wait and wakeup.
                 */
-               if (unlikely(ret == -ENOENT))
-                       flush_work(work);
+               if (unlikely(ret == -ENOENT)) {
+                       struct cwt_wait cwait;
+
+                       init_wait(&cwait.wait);
+                       cwait.wait.func = cwt_wakefn;
+                       cwait.work = work;
+
+                       prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
+                                                 TASK_UNINTERRUPTIBLE);
+                       if (work_is_canceling(work))
+                               schedule();
+                       finish_wait(&cancel_waitq, &cwait.wait);
+               }
        } while (unlikely(ret < 0));
 
        /* tell other tasks trying to grab @work to back off */
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 
        flush_work(work);
        clear_work_data(work);
+
+       /*
+        * Paired with prepare_to_wait() above so that either
+        * waitqueue_active() is visible here or !work_is_canceling() is
+        * visible there.
+        */
+       smp_mb();
+       if (waitqueue_active(&cancel_waitq))
+               __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
+
        return ret;
 }
 
index 87eb3bffc283aa8c0e65c5678729dbad484bd5bf..58f74d2dd3967a3bf0a6d9f5dc9996bfeb9bc842 100644 (file)
@@ -24,7 +24,7 @@ obj-y += lockref.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
-        gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
+        gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
         percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
 obj-y += string_helpers.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
new file mode 100644 (file)
index 0000000..9d96e28
--- /dev/null
@@ -0,0 +1,768 @@
+#include <linux/export.h>
+#include <linux/uio.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <net/checksum.h>
+
+#define iterate_iovec(i, n, __v, __p, skip, STEP) {    \
+       size_t left;                                    \
+       size_t wanted = n;                              \
+       __p = i->iov;                                   \
+       __v.iov_len = min(n, __p->iov_len - skip);      \
+       if (likely(__v.iov_len)) {                      \
+               __v.iov_base = __p->iov_base + skip;    \
+               left = (STEP);                          \
+               __v.iov_len -= left;                    \
+               skip += __v.iov_len;                    \
+               n -= __v.iov_len;                       \
+       } else {                                        \
+               left = 0;                               \
+       }                                               \
+       while (unlikely(!left && n)) {                  \
+               __p++;                                  \
+               __v.iov_len = min(n, __p->iov_len);     \
+               if (unlikely(!__v.iov_len))             \
+                       continue;                       \
+               __v.iov_base = __p->iov_base;           \
+               left = (STEP);                          \
+               __v.iov_len -= left;                    \
+               skip = __v.iov_len;                     \
+               n -= __v.iov_len;                       \
+       }                                               \
+       n = wanted - n;                                 \
+}
+
+#define iterate_kvec(i, n, __v, __p, skip, STEP) {     \
+       size_t wanted = n;                              \
+       __p = i->kvec;                                  \
+       __v.iov_len = min(n, __p->iov_len - skip);      \
+       if (likely(__v.iov_len)) {                      \
+               __v.iov_base = __p->iov_base + skip;    \
+               (void)(STEP);                           \
+               skip += __v.iov_len;                    \
+               n -= __v.iov_len;                       \
+       }                                               \
+       while (unlikely(n)) {                           \
+               __p++;                                  \
+               __v.iov_len = min(n, __p->iov_len);     \
+               if (unlikely(!__v.iov_len))             \
+                       continue;                       \
+               __v.iov_base = __p->iov_base;           \
+               (void)(STEP);                           \
+               skip = __v.iov_len;                     \
+               n -= __v.iov_len;                       \
+       }                                               \
+       n = wanted;                                     \
+}
+
+#define iterate_bvec(i, n, __v, __p, skip, STEP) {     \
+       size_t wanted = n;                              \
+       __p = i->bvec;                                  \
+       __v.bv_len = min_t(size_t, n, __p->bv_len - skip);      \
+       if (likely(__v.bv_len)) {                       \
+               __v.bv_page = __p->bv_page;             \
+               __v.bv_offset = __p->bv_offset + skip;  \
+               (void)(STEP);                           \
+               skip += __v.bv_len;                     \
+               n -= __v.bv_len;                        \
+       }                                               \
+       while (unlikely(n)) {                           \
+               __p++;                                  \
+               __v.bv_len = min_t(size_t, n, __p->bv_len);     \
+               if (unlikely(!__v.bv_len))              \
+                       continue;                       \
+               __v.bv_page = __p->bv_page;             \
+               __v.bv_offset = __p->bv_offset;         \
+               (void)(STEP);                           \
+               skip = __v.bv_len;                      \
+               n -= __v.bv_len;                        \
+       }                                               \
+       n = wanted;                                     \
+}
+
+#define iterate_all_kinds(i, n, v, I, B, K) {                  \
+       size_t skip = i->iov_offset;                            \
+       if (unlikely(i->type & ITER_BVEC)) {                    \
+               const struct bio_vec *bvec;                     \
+               struct bio_vec v;                               \
+               iterate_bvec(i, n, v, bvec, skip, (B))          \
+       } else if (unlikely(i->type & ITER_KVEC)) {             \
+               const struct kvec *kvec;                        \
+               struct kvec v;                                  \
+               iterate_kvec(i, n, v, kvec, skip, (K))          \
+       } else {                                                \
+               const struct iovec *iov;                        \
+               struct iovec v;                                 \
+               iterate_iovec(i, n, v, iov, skip, (I))          \
+       }                                                       \
+}
+
+#define iterate_and_advance(i, n, v, I, B, K) {                        \
+       size_t skip = i->iov_offset;                            \
+       if (unlikely(i->type & ITER_BVEC)) {                    \
+               const struct bio_vec *bvec;                     \
+               struct bio_vec v;                               \
+               iterate_bvec(i, n, v, bvec, skip, (B))          \
+               if (skip == bvec->bv_len) {                     \
+                       bvec++;                                 \
+                       skip = 0;                               \
+               }                                               \
+               i->nr_segs -= bvec - i->bvec;                   \
+               i->bvec = bvec;                                 \
+       } else if (unlikely(i->type & ITER_KVEC)) {             \
+               const struct kvec *kvec;                        \
+               struct kvec v;                                  \
+               iterate_kvec(i, n, v, kvec, skip, (K))          \
+               if (skip == kvec->iov_len) {                    \
+                       kvec++;                                 \
+                       skip = 0;                               \
+               }                                               \
+               i->nr_segs -= kvec - i->kvec;                   \
+               i->kvec = kvec;                                 \
+       } else {                                                \
+               const struct iovec *iov;                        \
+               struct iovec v;                                 \
+               iterate_iovec(i, n, v, iov, skip, (I))          \
+               if (skip == iov->iov_len) {                     \
+                       iov++;                                  \
+                       skip = 0;                               \
+               }                                               \
+               i->nr_segs -= iov - i->iov;                     \
+               i->iov = iov;                                   \
+       }                                                       \
+       i->count -= n;                                          \
+       i->iov_offset = skip;                                   \
+}
+
+static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
+                        struct iov_iter *i)
+{
+       size_t skip, copy, left, wanted;
+       const struct iovec *iov;
+       char __user *buf;
+       void *kaddr, *from;
+
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       wanted = bytes;
+       iov = i->iov;
+       skip = i->iov_offset;
+       buf = iov->iov_base + skip;
+       copy = min(bytes, iov->iov_len - skip);
+
+       if (!fault_in_pages_writeable(buf, copy)) {
+               kaddr = kmap_atomic(page);
+               from = kaddr + offset;
+
+               /* first chunk, usually the only one */
+               left = __copy_to_user_inatomic(buf, from, copy);
+               copy -= left;
+               skip += copy;
+               from += copy;
+               bytes -= copy;
+
+               while (unlikely(!left && bytes)) {
+                       iov++;
+                       buf = iov->iov_base;
+                       copy = min(bytes, iov->iov_len);
+                       left = __copy_to_user_inatomic(buf, from, copy);
+                       copy -= left;
+                       skip = copy;
+                       from += copy;
+                       bytes -= copy;
+               }
+               if (likely(!bytes)) {
+                       kunmap_atomic(kaddr);
+                       goto done;
+               }
+               offset = from - kaddr;
+               buf += copy;
+               kunmap_atomic(kaddr);
+               copy = min(bytes, iov->iov_len - skip);
+       }
+       /* Too bad - revert to non-atomic kmap */
+       kaddr = kmap(page);
+       from = kaddr + offset;
+       left = __copy_to_user(buf, from, copy);
+       copy -= left;
+       skip += copy;
+       from += copy;
+       bytes -= copy;
+       while (unlikely(!left && bytes)) {
+               iov++;
+               buf = iov->iov_base;
+               copy = min(bytes, iov->iov_len);
+               left = __copy_to_user(buf, from, copy);
+               copy -= left;
+               skip = copy;
+               from += copy;
+               bytes -= copy;
+       }
+       kunmap(page);
+done:
+       if (skip == iov->iov_len) {
+               iov++;
+               skip = 0;
+       }
+       i->count -= wanted - bytes;
+       i->nr_segs -= iov - i->iov;
+       i->iov = iov;
+       i->iov_offset = skip;
+       return wanted - bytes;
+}
+
+static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
+                        struct iov_iter *i)
+{
+       size_t skip, copy, left, wanted;
+       const struct iovec *iov;
+       char __user *buf;
+       void *kaddr, *to;
+
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       wanted = bytes;
+       iov = i->iov;
+       skip = i->iov_offset;
+       buf = iov->iov_base + skip;
+       copy = min(bytes, iov->iov_len - skip);
+
+       if (!fault_in_pages_readable(buf, copy)) {
+               kaddr = kmap_atomic(page);
+               to = kaddr + offset;
+
+               /* first chunk, usually the only one */
+               left = __copy_from_user_inatomic(to, buf, copy);
+               copy -= left;
+               skip += copy;
+               to += copy;
+               bytes -= copy;
+
+               while (unlikely(!left && bytes)) {
+                       iov++;
+                       buf = iov->iov_base;
+                       copy = min(bytes, iov->iov_len);
+                       left = __copy_from_user_inatomic(to, buf, copy);
+                       copy -= left;
+                       skip = copy;
+                       to += copy;
+                       bytes -= copy;
+               }
+               if (likely(!bytes)) {
+                       kunmap_atomic(kaddr);
+                       goto done;
+               }
+               offset = to - kaddr;
+               buf += copy;
+               kunmap_atomic(kaddr);
+               copy = min(bytes, iov->iov_len - skip);
+       }
+       /* Too bad - revert to non-atomic kmap */
+       kaddr = kmap(page);
+       to = kaddr + offset;
+       left = __copy_from_user(to, buf, copy);
+       copy -= left;
+       skip += copy;
+       to += copy;
+       bytes -= copy;
+       while (unlikely(!left && bytes)) {
+               iov++;
+               buf = iov->iov_base;
+               copy = min(bytes, iov->iov_len);
+               left = __copy_from_user(to, buf, copy);
+               copy -= left;
+               skip = copy;
+               to += copy;
+               bytes -= copy;
+       }
+       kunmap(page);
+done:
+       if (skip == iov->iov_len) {
+               iov++;
+               skip = 0;
+       }
+       i->count -= wanted - bytes;
+       i->nr_segs -= iov - i->iov;
+       i->iov = iov;
+       i->iov_offset = skip;
+       return wanted - bytes;
+}
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+               return fault_in_pages_readable(buf, bytes);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+void iov_iter_init(struct iov_iter *i, int direction,
+                       const struct iovec *iov, unsigned long nr_segs,
+                       size_t count)
+{
+       /* It will get better.  Eventually... */
+       if (segment_eq(get_fs(), KERNEL_DS)) {
+               direction |= ITER_KVEC;
+               i->type = direction;
+               i->kvec = (struct kvec *)iov;
+       } else {
+               i->type = direction;
+               i->iov = iov;
+       }
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_init);
+
+static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
+{
+       char *from = kmap_atomic(page);
+       memcpy(to, from + offset, len);
+       kunmap_atomic(from);
+}
+
+static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
+{
+       char *to = kmap_atomic(page);
+       memcpy(to + offset, from, len);
+       kunmap_atomic(to);
+}
+
+static void memzero_page(struct page *page, size_t offset, size_t len)
+{
+       char *addr = kmap_atomic(page);
+       memset(addr + offset, 0, len);
+       kunmap_atomic(addr);
+}
+
+size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+       char *from = addr;
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       iterate_and_advance(i, bytes, v,
+               __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
+                              v.iov_len),
+               memcpy_to_page(v.bv_page, v.bv_offset,
+                              (from += v.bv_len) - v.bv_len, v.bv_len),
+               memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
+       )
+
+       return bytes;
+}
+EXPORT_SYMBOL(copy_to_iter);
+
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+       char *to = addr;
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       iterate_and_advance(i, bytes, v,
+               __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
+                                v.iov_len),
+               memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+                                v.bv_offset, v.bv_len),
+               memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+       )
+
+       return bytes;
+}
+EXPORT_SYMBOL(copy_from_iter);
+
+size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+       char *to = addr;
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       iterate_and_advance(i, bytes, v,
+               __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+                                        v.iov_base, v.iov_len),
+               memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+                                v.bv_offset, v.bv_len),
+               memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+       )
+
+       return bytes;
+}
+EXPORT_SYMBOL(copy_from_iter_nocache);
+
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+                        struct iov_iter *i)
+{
+       if (i->type & (ITER_BVEC|ITER_KVEC)) {
+               void *kaddr = kmap_atomic(page);
+               size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
+               kunmap_atomic(kaddr);
+               return wanted;
+       } else
+               return copy_page_to_iter_iovec(page, offset, bytes, i);
+}
+EXPORT_SYMBOL(copy_page_to_iter);
+
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+                        struct iov_iter *i)
+{
+       if (i->type & (ITER_BVEC|ITER_KVEC)) {
+               void *kaddr = kmap_atomic(page);
+               size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
+               kunmap_atomic(kaddr);
+               return wanted;
+       } else
+               return copy_page_from_iter_iovec(page, offset, bytes, i);
+}
+EXPORT_SYMBOL(copy_page_from_iter);
+
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       iterate_and_advance(i, bytes, v,
+               __clear_user(v.iov_base, v.iov_len),
+               memzero_page(v.bv_page, v.bv_offset, v.bv_len),
+               memset(v.iov_base, 0, v.iov_len)
+       )
+
+       return bytes;
+}
+EXPORT_SYMBOL(iov_iter_zero);
+
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+       iterate_all_kinds(i, bytes, v,
+               __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
+                                         v.iov_base, v.iov_len),
+               memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
+                                v.bv_offset, v.bv_len),
+               memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+       )
+       kunmap_atomic(kaddr);
+       return bytes;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+       iterate_and_advance(i, size, v, 0, 0, 0)
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+       if (i->nr_segs == 1)
+               return i->count;
+       else if (i->type & ITER_BVEC)
+               return min(i->count, i->bvec->bv_len - i->iov_offset);
+       else
+               return min(i->count, i->iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
+
+void iov_iter_kvec(struct iov_iter *i, int direction,
+                       const struct kvec *kvec, unsigned long nr_segs,
+                       size_t count)
+{
+       BUG_ON(!(direction & ITER_KVEC));
+       i->type = direction;
+       i->kvec = kvec;
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_kvec);
+
+void iov_iter_bvec(struct iov_iter *i, int direction,
+                       const struct bio_vec *bvec, unsigned long nr_segs,
+                       size_t count)
+{
+       BUG_ON(!(direction & ITER_BVEC));
+       i->type = direction;
+       i->bvec = bvec;
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count;
+}
+EXPORT_SYMBOL(iov_iter_bvec);
+
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+       unsigned long res = 0;
+       size_t size = i->count;
+
+       if (!size)
+               return 0;
+
+       iterate_all_kinds(i, size, v,
+               (res |= (unsigned long)v.iov_base | v.iov_len, 0),
+               res |= v.bv_offset | v.bv_len,
+               res |= (unsigned long)v.iov_base | v.iov_len
+       )
+       return res;
+}
+EXPORT_SYMBOL(iov_iter_alignment);
+
+ssize_t iov_iter_get_pages(struct iov_iter *i,
+                  struct page **pages, size_t maxsize, unsigned maxpages,
+                  size_t *start)
+{
+       if (maxsize > i->count)
+               maxsize = i->count;
+
+       if (!maxsize)
+               return 0;
+
+       iterate_all_kinds(i, maxsize, v, ({
+               unsigned long addr = (unsigned long)v.iov_base;
+               size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
+               int n;
+               int res;
+
+               if (len > maxpages * PAGE_SIZE)
+                       len = maxpages * PAGE_SIZE;
+               addr &= ~(PAGE_SIZE - 1);
+               n = DIV_ROUND_UP(len, PAGE_SIZE);
+               res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
+               if (unlikely(res < 0))
+                       return res;
+               return (res == n ? len : res * PAGE_SIZE) - *start;
+       0;}),({
+               /* can't be more than PAGE_SIZE */
+               *start = v.bv_offset;
+               get_page(*pages = v.bv_page);
+               return v.bv_len;
+       }),({
+               return -EFAULT;
+       })
+       )
+       return 0;
+}
+EXPORT_SYMBOL(iov_iter_get_pages);
+
+static struct page **get_pages_array(size_t n)
+{
+       struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
+       if (!p)
+               p = vmalloc(n * sizeof(struct page *));
+       return p;
+}
+
+ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
+                  struct page ***pages, size_t maxsize,
+                  size_t *start)
+{
+       struct page **p;
+
+       if (maxsize > i->count)
+               maxsize = i->count;
+
+       if (!maxsize)
+               return 0;
+
+       iterate_all_kinds(i, maxsize, v, ({
+               unsigned long addr = (unsigned long)v.iov_base;
+               size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
+               int n;
+               int res;
+
+               addr &= ~(PAGE_SIZE - 1);
+               n = DIV_ROUND_UP(len, PAGE_SIZE);
+               p = get_pages_array(n);
+               if (!p)
+                       return -ENOMEM;
+               res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
+               if (unlikely(res < 0)) {
+                       kvfree(p);
+                       return res;
+               }
+               *pages = p;
+               return (res == n ? len : res * PAGE_SIZE) - *start;
+       0;}),({
+               /* can't be more than PAGE_SIZE */
+               *start = v.bv_offset;
+               *pages = p = get_pages_array(1);
+               if (!p)
+                       return -ENOMEM;
+               get_page(*p = v.bv_page);
+               return v.bv_len;
+       }),({
+               return -EFAULT;
+       })
+       )
+       return 0;
+}
+EXPORT_SYMBOL(iov_iter_get_pages_alloc);
+
+size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
+                              struct iov_iter *i)
+{
+       char *to = addr;
+       __wsum sum, next;
+       size_t off = 0;
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       sum = *csum;
+       iterate_and_advance(i, bytes, v, ({
+               int err = 0;
+               next = csum_and_copy_from_user(v.iov_base, 
+                                              (to += v.iov_len) - v.iov_len,
+                                              v.iov_len, 0, &err);
+               if (!err) {
+                       sum = csum_block_add(sum, next, off);
+                       off += v.iov_len;
+               }
+               err ? v.iov_len : 0;
+       }), ({
+               char *p = kmap_atomic(v.bv_page);
+               next = csum_partial_copy_nocheck(p + v.bv_offset,
+                                                (to += v.bv_len) - v.bv_len,
+                                                v.bv_len, 0);
+               kunmap_atomic(p);
+               sum = csum_block_add(sum, next, off);
+               off += v.bv_len;
+       }),({
+               next = csum_partial_copy_nocheck(v.iov_base,
+                                                (to += v.iov_len) - v.iov_len,
+                                                v.iov_len, 0);
+               sum = csum_block_add(sum, next, off);
+               off += v.iov_len;
+       })
+       )
+       *csum = sum;
+       return bytes;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter);
+
+size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
+                            struct iov_iter *i)
+{
+       char *from = addr;
+       __wsum sum, next;
+       size_t off = 0;
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       sum = *csum;
+       iterate_and_advance(i, bytes, v, ({
+               int err = 0;
+               next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
+                                            v.iov_base, 
+                                            v.iov_len, 0, &err);
+               if (!err) {
+                       sum = csum_block_add(sum, next, off);
+                       off += v.iov_len;
+               }
+               err ? v.iov_len : 0;
+       }), ({
+               char *p = kmap_atomic(v.bv_page);
+               next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
+                                                p + v.bv_offset,
+                                                v.bv_len, 0);
+               kunmap_atomic(p);
+               sum = csum_block_add(sum, next, off);
+               off += v.bv_len;
+       }),({
+               next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
+                                                v.iov_base,
+                                                v.iov_len, 0);
+               sum = csum_block_add(sum, next, off);
+               off += v.iov_len;
+       })
+       )
+       *csum = sum;
+       return bytes;
+}
+EXPORT_SYMBOL(csum_and_copy_to_iter);
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+       size_t size = i->count;
+       int npages = 0;
+
+       if (!size)
+               return 0;
+
+       iterate_all_kinds(i, size, v, ({
+               unsigned long p = (unsigned long)v.iov_base;
+               npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
+                       - p / PAGE_SIZE;
+               if (npages >= maxpages)
+                       return maxpages;
+       0;}),({
+               npages++;
+               if (npages >= maxpages)
+                       return maxpages;
+       }),({
+               unsigned long p = (unsigned long)v.iov_base;
+               npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
+                       - p / PAGE_SIZE;
+               if (npages >= maxpages)
+                       return maxpages;
+       })
+       )
+       return npages;
+}
+EXPORT_SYMBOL(iov_iter_npages);
+
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
+{
+       *new = *old;
+       if (new->type & ITER_BVEC)
+               return new->bvec = kmemdup(new->bvec,
+                                   new->nr_segs * sizeof(struct bio_vec),
+                                   flags);
+       else
+               /* iovec and kvec have identical layout */
+               return new->iov = kmemdup(new->iov,
+                                  new->nr_segs * sizeof(struct iovec),
+                                  flags);
+}
+EXPORT_SYMBOL(dup_iter);
index 9cc4c4a90d00686228bebdfe55b212c34e98206f..b5344ef4c6846c4f9256c1d0d418f774284c8fcc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/log2.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
@@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl)
 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
                                               size_t nbuckets)
 {
-       struct bucket_table *tbl;
+       struct bucket_table *tbl = NULL;
        size_t size;
        int i;
 
        size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
-       tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+               tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
        if (tbl == NULL)
                tbl = vzalloc(size);
-
        if (tbl == NULL)
                return NULL;
 
@@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  * @ht:                hash table
  * @new_size:  new table size
  */
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
 {
        /* Expand table when exceeding 75% load */
        return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
-              (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
+              (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
 }
-EXPORT_SYMBOL_GPL(rht_grow_above_75);
 
 /**
  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  * @ht:                hash table
  * @new_size:  new table size
  */
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
 {
        /* Shrink table beneath 30% load */
        return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
               (atomic_read(&ht->shift) > ht->p.min_shift);
 }
-EXPORT_SYMBOL_GPL(rht_shrink_below_30);
 
 static void lock_buckets(struct bucket_table *new_tbl,
                         struct bucket_table *old_tbl, unsigned int hash)
@@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht)
                        }
                }
                unlock_buckets(new_tbl, old_tbl, new_hash);
+               cond_resched();
        }
 
        /* Unzip interleaved hash chains */
@@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht)
                                complete = false;
 
                        unlock_buckets(new_tbl, old_tbl, old_hash);
+                       cond_resched();
                }
        }
 
@@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht)
                                   tbl->buckets[new_hash + new_tbl->size]);
 
                unlock_buckets(new_tbl, tbl, new_hash);
+               cond_resched();
        }
 
        /* Publish the new, valid hash table */
@@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work)
        list_for_each_entry(walker, &ht->walkers, list)
                walker->resize = true;
 
-       if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+       if (rht_grow_above_75(ht, tbl->size))
                rhashtable_expand(ht);
-       else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
+       else if (rht_shrink_below_30(ht, tbl->size))
                rhashtable_shrink(ht);
-
 unlock:
        mutex_unlock(&ht->mutex);
 }
 
-static void rhashtable_wakeup_worker(struct rhashtable *ht)
-{
-       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-       struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-       size_t size = tbl->size;
-
-       /* Only adjust the table if no resizing is currently in progress. */
-       if (tbl == new_tbl &&
-           ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
-            (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
-               schedule_work(&ht->run_work);
-}
-
 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
-                               struct bucket_table *tbl, u32 hash)
+                               struct bucket_table *tbl,
+                               const struct bucket_table *old_tbl, u32 hash)
 {
+       bool no_resize_running = tbl == old_tbl;
        struct rhash_head *head;
 
        hash = rht_bucket_index(tbl, hash);
@@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
        rcu_assign_pointer(tbl->buckets[hash], obj);
 
        atomic_inc(&ht->nelems);
-
-       rhashtable_wakeup_worker(ht);
+       if (no_resize_running && rht_grow_above_75(ht, tbl->size))
+               schedule_work(&ht->run_work);
 }
 
 /**
@@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
        hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
 
        lock_buckets(tbl, old_tbl, hash);
-       __rhashtable_insert(ht, obj, tbl, hash);
+       __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
        unlock_buckets(tbl, old_tbl, hash);
 
        rcu_read_unlock();
@@ -681,8 +671,11 @@ found:
        unlock_buckets(new_tbl, old_tbl, new_hash);
 
        if (ret) {
+               bool no_resize_running = new_tbl == old_tbl;
+
                atomic_dec(&ht->nelems);
-               rhashtable_wakeup_worker(ht);
+               if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
+                       schedule_work(&ht->run_work);
        }
 
        rcu_read_unlock();
@@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
                goto exit;
        }
 
-       __rhashtable_insert(ht, obj, new_tbl, new_hash);
+       __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
 
 exit:
        unlock_buckets(new_tbl, old_tbl, new_hash);
@@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
        if (!iter->walker)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&iter->walker->list);
+       iter->walker->resize = false;
+
        mutex_lock(&ht->mutex);
        list_add(&iter->walker->list, &ht->walkers);
        mutex_unlock(&ht->mutex);
@@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
        if (!ht->p.hash_rnd)
                get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
 
-       if (ht->p.grow_decision || ht->p.shrink_decision)
-               INIT_WORK(&ht->run_work, rht_deferred_worker);
+       INIT_WORK(&ht->run_work, rht_deferred_worker);
 
        return 0;
 }
@@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht)
 {
        ht->being_destroyed = true;
 
-       if (ht->p.grow_decision || ht->p.shrink_decision)
-               cancel_work_sync(&ht->run_work);
+       cancel_work_sync(&ht->run_work);
 
        mutex_lock(&ht->mutex);
        bucket_table_free(rht_dereference(ht->tbl, ht));
index 88c0854bd7527aca54ca5ec3f533efe896c5477a..5c94e1012a91f9ea65b20ca130f3f4aa4fb337f5 100644 (file)
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
 
        if (s->len < s->size) {
                len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
-               if (seq_buf_can_fit(s, len)) {
+               if (s->len + len < s->size) {
                        s->len += len;
                        return 0;
                }
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
 
        if (s->len < s->size) {
                ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
-               if (seq_buf_can_fit(s, ret)) {
+               if (s->len + ret < s->size) {
                        s->len += ret;
                        return 0;
                }
index 1dfeba73fc743718d94551e9356ec8c6580a1fac..67c7593d1dd69c91f646e21e47b589c40c808837 100644 (file)
@@ -191,18 +191,18 @@ error:
        return err;
 }
 
+static struct rhashtable ht;
+
 static int __init test_rht_init(void)
 {
-       struct rhashtable ht;
        struct rhashtable_params params = {
                .nelem_hint = TEST_HT_SIZE,
                .head_offset = offsetof(struct test_obj, node),
                .key_offset = offsetof(struct test_obj, value),
                .key_len = sizeof(int),
                .hashfn = jhash,
+               .max_shift = 1, /* we expand/shrink manually here */
                .nulls_base = (3U << RHT_BASE_SHIFT),
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
        int err;
 
@@ -222,6 +222,11 @@ static int __init test_rht_init(void)
        return err;
 }
 
+static void __exit test_rht_exit(void)
+{
+}
+
 module_init(test_rht_init);
+module_exit(test_rht_exit);
 
 MODULE_LICENSE("GPL v2");
index 3c1caa2693bd22bad68864896c5e02737aac31c5..15dbe9903c273f87c3e7c6a09e3c4a659647bd48 100644 (file)
@@ -21,7 +21,7 @@ obj-y                 := filemap.o mempool.o oom_kill.o \
                           mm_init.o mmu_context.o percpu.o slab_common.o \
                           compaction.o vmacache.o \
                           interval_tree.o list_lru.o workingset.o \
-                          iov_iter.o debug.o $(mmu-y)
+                          debug.o $(mmu-y)
 
 obj-y += init-mm.o
 
index 75016fd1de906280490352006ffc6845c90ddc46..68ecb7a42983a589fe4c057f109e327b352c8a0b 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
        return (1UL << (align_order - cma->order_per_bit)) - 1;
 }
 
+/*
+ * Find a PFN aligned to the specified order and return an offset represented in
+ * order_per_bits.
+ */
 static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
 {
-       unsigned int alignment;
-
        if (align_order <= cma->order_per_bit)
                return 0;
-       alignment = 1UL << (align_order - cma->order_per_bit);
-       return ALIGN(cma->base_pfn, alignment) -
-               (cma->base_pfn >> cma->order_per_bit);
+
+       return (ALIGN(cma->base_pfn, (1UL << align_order))
+               - cma->base_pfn) >> cma->order_per_bit;
 }
 
 static unsigned long cma_bitmap_maxno(struct cma *cma)
index fc00c8cb5a82ee89addf4d2ee8983894bf6aa5d1..626e93db28ba162d11e7d286985604bbc523981c 100644 (file)
@@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Avoid grouping on DSO/COW pages in specific and RO pages
         * in general, RO pages shouldn't hurt as much anyway since
         * they can be in shared cache state.
+        *
+        * FIXME! This checks "pmd_dirty()" as an approximation of
+        * "is this a read-only page", since checking "pmd_write()"
+        * is even more broken. We haven't actually turned this into
+        * a writable page, so pmd_write() will always be false.
         */
-       if (!pmd_write(pmd))
+       if (!pmd_dirty(pmd))
                flags |= TNF_NO_GROUP;
 
        /*
@@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
                pmd_t entry;
+               ret = 1;
 
                /*
                 * Avoid trapping faults against the zero page. The read-only
@@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                 */
                if (prot_numa && is_huge_zero_pmd(*pmd)) {
                        spin_unlock(ptl);
-                       return 0;
+                       return ret;
                }
 
                if (!prot_numa || !pmd_protnone(*pmd)) {
-                       ret = 1;
                        entry = pmdp_get_and_clear_notify(mm, addr, pmd);
                        entry = pmd_modify(entry, newprot);
                        ret = HPAGE_PMD_NR;
index 0a9ac6c268325a6ca9096bfc784cfbf68ac6a65e..c41b2a0ee2736e4f7df74c440ceb90bd5fcceecb 100644 (file)
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        __ClearPageReserved(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
-               __SetPageTail(p);
                /*
                 * For gigantic hugepages allocated through bootmem at
                 * boot, it's safer to be consistent with the not-gigantic
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
                __ClearPageReserved(p);
                set_page_count(p, 0);
                p->first_page = page;
+               /* Make sure p->first_page is always valid for PageTail() */
+               smp_wmb();
+               __SetPageTail(p);
        }
 }
 
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
deleted file mode 100644 (file)
index 8277320..0000000
+++ /dev/null
@@ -1,753 +0,0 @@
-#include <linux/export.h>
-#include <linux/uio.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <net/checksum.h>
-
-#define iterate_iovec(i, n, __v, __p, skip, STEP) {    \
-       size_t left;                                    \
-       size_t wanted = n;                              \
-       __p = i->iov;                                   \
-       __v.iov_len = min(n, __p->iov_len - skip);      \
-       if (likely(__v.iov_len)) {                      \
-               __v.iov_base = __p->iov_base + skip;    \
-               left = (STEP);                          \
-               __v.iov_len -= left;                    \
-               skip += __v.iov_len;                    \
-               n -= __v.iov_len;                       \
-       } else {                                        \
-               left = 0;                               \
-       }                                               \
-       while (unlikely(!left && n)) {                  \
-               __p++;                                  \
-               __v.iov_len = min(n, __p->iov_len);     \
-               if (unlikely(!__v.iov_len))             \
-                       continue;                       \
-               __v.iov_base = __p->iov_base;           \
-               left = (STEP);                          \
-               __v.iov_len -= left;                    \
-               skip = __v.iov_len;                     \
-               n -= __v.iov_len;                       \
-       }                                               \
-       n = wanted - n;                                 \
-}
-
-#define iterate_kvec(i, n, __v, __p, skip, STEP) {     \
-       size_t wanted = n;                              \
-       __p = i->kvec;                                  \
-       __v.iov_len = min(n, __p->iov_len - skip);      \
-       if (likely(__v.iov_len)) {                      \
-               __v.iov_base = __p->iov_base + skip;    \
-               (void)(STEP);                           \
-               skip += __v.iov_len;                    \
-               n -= __v.iov_len;                       \
-       }                                               \
-       while (unlikely(n)) {                           \
-               __p++;                                  \
-               __v.iov_len = min(n, __p->iov_len);     \
-               if (unlikely(!__v.iov_len))             \
-                       continue;                       \
-               __v.iov_base = __p->iov_base;           \
-               (void)(STEP);                           \
-               skip = __v.iov_len;                     \
-               n -= __v.iov_len;                       \
-       }                                               \
-       n = wanted;                                     \
-}
-
-#define iterate_bvec(i, n, __v, __p, skip, STEP) {     \
-       size_t wanted = n;                              \
-       __p = i->bvec;                                  \
-       __v.bv_len = min_t(size_t, n, __p->bv_len - skip);      \
-       if (likely(__v.bv_len)) {                       \
-               __v.bv_page = __p->bv_page;             \
-               __v.bv_offset = __p->bv_offset + skip;  \
-               (void)(STEP);                           \
-               skip += __v.bv_len;                     \
-               n -= __v.bv_len;                        \
-       }                                               \
-       while (unlikely(n)) {                           \
-               __p++;                                  \
-               __v.bv_len = min_t(size_t, n, __p->bv_len);     \
-               if (unlikely(!__v.bv_len))              \
-                       continue;                       \
-               __v.bv_page = __p->bv_page;             \
-               __v.bv_offset = __p->bv_offset;         \
-               (void)(STEP);                           \
-               skip = __v.bv_len;                      \
-               n -= __v.bv_len;                        \
-       }                                               \
-       n = wanted;                                     \
-}
-
-#define iterate_all_kinds(i, n, v, I, B, K) {                  \
-       size_t skip = i->iov_offset;                            \
-       if (unlikely(i->type & ITER_BVEC)) {                    \
-               const struct bio_vec *bvec;                     \
-               struct bio_vec v;                               \
-               iterate_bvec(i, n, v, bvec, skip, (B))          \
-       } else if (unlikely(i->type & ITER_KVEC)) {             \
-               const struct kvec *kvec;                        \
-               struct kvec v;                                  \
-               iterate_kvec(i, n, v, kvec, skip, (K))          \
-       } else {                                                \
-               const struct iovec *iov;                        \
-               struct iovec v;                                 \
-               iterate_iovec(i, n, v, iov, skip, (I))          \
-       }                                                       \
-}
-
-#define iterate_and_advance(i, n, v, I, B, K) {                        \
-       size_t skip = i->iov_offset;                            \
-       if (unlikely(i->type & ITER_BVEC)) {                    \
-               const struct bio_vec *bvec;                     \
-               struct bio_vec v;                               \
-               iterate_bvec(i, n, v, bvec, skip, (B))          \
-               if (skip == bvec->bv_len) {                     \
-                       bvec++;                                 \
-                       skip = 0;                               \
-               }                                               \
-               i->nr_segs -= bvec - i->bvec;                   \
-               i->bvec = bvec;                                 \
-       } else if (unlikely(i->type & ITER_KVEC)) {             \
-               const struct kvec *kvec;                        \
-               struct kvec v;                                  \
-               iterate_kvec(i, n, v, kvec, skip, (K))          \
-               if (skip == kvec->iov_len) {                    \
-                       kvec++;                                 \
-                       skip = 0;                               \
-               }                                               \
-               i->nr_segs -= kvec - i->kvec;                   \
-               i->kvec = kvec;                                 \
-       } else {                                                \
-               const struct iovec *iov;                        \
-               struct iovec v;                                 \
-               iterate_iovec(i, n, v, iov, skip, (I))          \
-               if (skip == iov->iov_len) {                     \
-                       iov++;                                  \
-                       skip = 0;                               \
-               }                                               \
-               i->nr_segs -= iov - i->iov;                     \
-               i->iov = iov;                                   \
-       }                                                       \
-       i->count -= n;                                          \
-       i->iov_offset = skip;                                   \
-}
-
-static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
-                        struct iov_iter *i)
-{
-       size_t skip, copy, left, wanted;
-       const struct iovec *iov;
-       char __user *buf;
-       void *kaddr, *from;
-
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       wanted = bytes;
-       iov = i->iov;
-       skip = i->iov_offset;
-       buf = iov->iov_base + skip;
-       copy = min(bytes, iov->iov_len - skip);
-
-       if (!fault_in_pages_writeable(buf, copy)) {
-               kaddr = kmap_atomic(page);
-               from = kaddr + offset;
-
-               /* first chunk, usually the only one */
-               left = __copy_to_user_inatomic(buf, from, copy);
-               copy -= left;
-               skip += copy;
-               from += copy;
-               bytes -= copy;
-
-               while (unlikely(!left && bytes)) {
-                       iov++;
-                       buf = iov->iov_base;
-                       copy = min(bytes, iov->iov_len);
-                       left = __copy_to_user_inatomic(buf, from, copy);
-                       copy -= left;
-                       skip = copy;
-                       from += copy;
-                       bytes -= copy;
-               }
-               if (likely(!bytes)) {
-                       kunmap_atomic(kaddr);
-                       goto done;
-               }
-               offset = from - kaddr;
-               buf += copy;
-               kunmap_atomic(kaddr);
-               copy = min(bytes, iov->iov_len - skip);
-       }
-       /* Too bad - revert to non-atomic kmap */
-       kaddr = kmap(page);
-       from = kaddr + offset;
-       left = __copy_to_user(buf, from, copy);
-       copy -= left;
-       skip += copy;
-       from += copy;
-       bytes -= copy;
-       while (unlikely(!left && bytes)) {
-               iov++;
-               buf = iov->iov_base;
-               copy = min(bytes, iov->iov_len);
-               left = __copy_to_user(buf, from, copy);
-               copy -= left;
-               skip = copy;
-               from += copy;
-               bytes -= copy;
-       }
-       kunmap(page);
-done:
-       if (skip == iov->iov_len) {
-               iov++;
-               skip = 0;
-       }
-       i->count -= wanted - bytes;
-       i->nr_segs -= iov - i->iov;
-       i->iov = iov;
-       i->iov_offset = skip;
-       return wanted - bytes;
-}
-
-static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
-                        struct iov_iter *i)
-{
-       size_t skip, copy, left, wanted;
-       const struct iovec *iov;
-       char __user *buf;
-       void *kaddr, *to;
-
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       wanted = bytes;
-       iov = i->iov;
-       skip = i->iov_offset;
-       buf = iov->iov_base + skip;
-       copy = min(bytes, iov->iov_len - skip);
-
-       if (!fault_in_pages_readable(buf, copy)) {
-               kaddr = kmap_atomic(page);
-               to = kaddr + offset;
-
-               /* first chunk, usually the only one */
-               left = __copy_from_user_inatomic(to, buf, copy);
-               copy -= left;
-               skip += copy;
-               to += copy;
-               bytes -= copy;
-
-               while (unlikely(!left && bytes)) {
-                       iov++;
-                       buf = iov->iov_base;
-                       copy = min(bytes, iov->iov_len);
-                       left = __copy_from_user_inatomic(to, buf, copy);
-                       copy -= left;
-                       skip = copy;
-                       to += copy;
-                       bytes -= copy;
-               }
-               if (likely(!bytes)) {
-                       kunmap_atomic(kaddr);
-                       goto done;
-               }
-               offset = to - kaddr;
-               buf += copy;
-               kunmap_atomic(kaddr);
-               copy = min(bytes, iov->iov_len - skip);
-       }
-       /* Too bad - revert to non-atomic kmap */
-       kaddr = kmap(page);
-       to = kaddr + offset;
-       left = __copy_from_user(to, buf, copy);
-       copy -= left;
-       skip += copy;
-       to += copy;
-       bytes -= copy;
-       while (unlikely(!left && bytes)) {
-               iov++;
-               buf = iov->iov_base;
-               copy = min(bytes, iov->iov_len);
-               left = __copy_from_user(to, buf, copy);
-               copy -= left;
-               skip = copy;
-               to += copy;
-               bytes -= copy;
-       }
-       kunmap(page);
-done:
-       if (skip == iov->iov_len) {
-               iov++;
-               skip = 0;
-       }
-       i->count -= wanted - bytes;
-       i->nr_segs -= iov - i->iov;
-       i->iov = iov;
-       i->iov_offset = skip;
-       return wanted - bytes;
-}
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-               return fault_in_pages_readable(buf, bytes);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-void iov_iter_init(struct iov_iter *i, int direction,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       size_t count)
-{
-       /* It will get better.  Eventually... */
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               direction |= ITER_KVEC;
-               i->type = direction;
-               i->kvec = (struct kvec *)iov;
-       } else {
-               i->type = direction;
-               i->iov = iov;
-       }
-       i->nr_segs = nr_segs;
-       i->iov_offset = 0;
-       i->count = count;
-}
-EXPORT_SYMBOL(iov_iter_init);
-
-static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
-{
-       char *from = kmap_atomic(page);
-       memcpy(to, from + offset, len);
-       kunmap_atomic(from);
-}
-
-static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
-{
-       char *to = kmap_atomic(page);
-       memcpy(to + offset, from, len);
-       kunmap_atomic(to);
-}
-
-static void memzero_page(struct page *page, size_t offset, size_t len)
-{
-       char *addr = kmap_atomic(page);
-       memset(addr + offset, 0, len);
-       kunmap_atomic(addr);
-}
-
-size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
-{
-       char *from = addr;
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       iterate_and_advance(i, bytes, v,
-               __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
-                              v.iov_len),
-               memcpy_to_page(v.bv_page, v.bv_offset,
-                              (from += v.bv_len) - v.bv_len, v.bv_len),
-               memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
-       )
-
-       return bytes;
-}
-EXPORT_SYMBOL(copy_to_iter);
-
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
-{
-       char *to = addr;
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       iterate_and_advance(i, bytes, v,
-               __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
-                                v.iov_len),
-               memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
-                                v.bv_offset, v.bv_len),
-               memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
-       )
-
-       return bytes;
-}
-EXPORT_SYMBOL(copy_from_iter);
-
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
-{
-       char *to = addr;
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       iterate_and_advance(i, bytes, v,
-               __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
-                                        v.iov_base, v.iov_len),
-               memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
-                                v.bv_offset, v.bv_len),
-               memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
-       )
-
-       return bytes;
-}
-EXPORT_SYMBOL(copy_from_iter_nocache);
-
-size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
-                        struct iov_iter *i)
-{
-       if (i->type & (ITER_BVEC|ITER_KVEC)) {
-               void *kaddr = kmap_atomic(page);
-               size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
-               kunmap_atomic(kaddr);
-               return wanted;
-       } else
-               return copy_page_to_iter_iovec(page, offset, bytes, i);
-}
-EXPORT_SYMBOL(copy_page_to_iter);
-
-size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
-                        struct iov_iter *i)
-{
-       if (i->type & (ITER_BVEC|ITER_KVEC)) {
-               void *kaddr = kmap_atomic(page);
-               size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
-               kunmap_atomic(kaddr);
-               return wanted;
-       } else
-               return copy_page_from_iter_iovec(page, offset, bytes, i);
-}
-EXPORT_SYMBOL(copy_page_from_iter);
-
-size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
-{
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       iterate_and_advance(i, bytes, v,
-               __clear_user(v.iov_base, v.iov_len),
-               memzero_page(v.bv_page, v.bv_offset, v.bv_len),
-               memset(v.iov_base, 0, v.iov_len)
-       )
-
-       return bytes;
-}
-EXPORT_SYMBOL(iov_iter_zero);
-
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr = kmap_atomic(page), *p = kaddr + offset;
-       iterate_all_kinds(i, bytes, v,
-               __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
-                                         v.iov_base, v.iov_len),
-               memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
-                                v.bv_offset, v.bv_len),
-               memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
-       )
-       kunmap_atomic(kaddr);
-       return bytes;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-void iov_iter_advance(struct iov_iter *i, size_t size)
-{
-       iterate_and_advance(i, size, v, 0, 0, 0)
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
-       if (i->nr_segs == 1)
-               return i->count;
-       else if (i->type & ITER_BVEC)
-               return min(i->count, i->bvec->bv_len - i->iov_offset);
-       else
-               return min(i->count, i->iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
-void iov_iter_kvec(struct iov_iter *i, int direction,
-                       const struct kvec *kvec, unsigned long nr_segs,
-                       size_t count)
-{
-       BUG_ON(!(direction & ITER_KVEC));
-       i->type = direction;
-       i->kvec = kvec;
-       i->nr_segs = nr_segs;
-       i->iov_offset = 0;
-       i->count = count;
-}
-EXPORT_SYMBOL(iov_iter_kvec);
-
-void iov_iter_bvec(struct iov_iter *i, int direction,
-                       const struct bio_vec *bvec, unsigned long nr_segs,
-                       size_t count)
-{
-       BUG_ON(!(direction & ITER_BVEC));
-       i->type = direction;
-       i->bvec = bvec;
-       i->nr_segs = nr_segs;
-       i->iov_offset = 0;
-       i->count = count;
-}
-EXPORT_SYMBOL(iov_iter_bvec);
-
-unsigned long iov_iter_alignment(const struct iov_iter *i)
-{
-       unsigned long res = 0;
-       size_t size = i->count;
-
-       if (!size)
-               return 0;
-
-       iterate_all_kinds(i, size, v,
-               (res |= (unsigned long)v.iov_base | v.iov_len, 0),
-               res |= v.bv_offset | v.bv_len,
-               res |= (unsigned long)v.iov_base | v.iov_len
-       )
-       return res;
-}
-EXPORT_SYMBOL(iov_iter_alignment);
-
-ssize_t iov_iter_get_pages(struct iov_iter *i,
-                  struct page **pages, size_t maxsize, unsigned maxpages,
-                  size_t *start)
-{
-       if (maxsize > i->count)
-               maxsize = i->count;
-
-       if (!maxsize)
-               return 0;
-
-       iterate_all_kinds(i, maxsize, v, ({
-               unsigned long addr = (unsigned long)v.iov_base;
-               size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
-               int n;
-               int res;
-
-               if (len > maxpages * PAGE_SIZE)
-                       len = maxpages * PAGE_SIZE;
-               addr &= ~(PAGE_SIZE - 1);
-               n = DIV_ROUND_UP(len, PAGE_SIZE);
-               res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
-               if (unlikely(res < 0))
-                       return res;
-               return (res == n ? len : res * PAGE_SIZE) - *start;
-       0;}),({
-               /* can't be more than PAGE_SIZE */
-               *start = v.bv_offset;
-               get_page(*pages = v.bv_page);
-               return v.bv_len;
-       }),({
-               return -EFAULT;
-       })
-       )
-       return 0;
-}
-EXPORT_SYMBOL(iov_iter_get_pages);
-
-static struct page **get_pages_array(size_t n)
-{
-       struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
-       if (!p)
-               p = vmalloc(n * sizeof(struct page *));
-       return p;
-}
-
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
-                  struct page ***pages, size_t maxsize,
-                  size_t *start)
-{
-       struct page **p;
-
-       if (maxsize > i->count)
-               maxsize = i->count;
-
-       if (!maxsize)
-               return 0;
-
-       iterate_all_kinds(i, maxsize, v, ({
-               unsigned long addr = (unsigned long)v.iov_base;
-               size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
-               int n;
-               int res;
-
-               addr &= ~(PAGE_SIZE - 1);
-               n = DIV_ROUND_UP(len, PAGE_SIZE);
-               p = get_pages_array(n);
-               if (!p)
-                       return -ENOMEM;
-               res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
-               if (unlikely(res < 0)) {
-                       kvfree(p);
-                       return res;
-               }
-               *pages = p;
-               return (res == n ? len : res * PAGE_SIZE) - *start;
-       0;}),({
-               /* can't be more than PAGE_SIZE */
-               *start = v.bv_offset;
-               *pages = p = get_pages_array(1);
-               if (!p)
-                       return -ENOMEM;
-               get_page(*p = v.bv_page);
-               return v.bv_len;
-       }),({
-               return -EFAULT;
-       })
-       )
-       return 0;
-}
-EXPORT_SYMBOL(iov_iter_get_pages_alloc);
-
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
-                              struct iov_iter *i)
-{
-       char *to = addr;
-       __wsum sum, next;
-       size_t off = 0;
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       sum = *csum;
-       iterate_and_advance(i, bytes, v, ({
-               int err = 0;
-               next = csum_and_copy_from_user(v.iov_base, 
-                                              (to += v.iov_len) - v.iov_len,
-                                              v.iov_len, 0, &err);
-               if (!err) {
-                       sum = csum_block_add(sum, next, off);
-                       off += v.iov_len;
-               }
-               err ? v.iov_len : 0;
-       }), ({
-               char *p = kmap_atomic(v.bv_page);
-               next = csum_partial_copy_nocheck(p + v.bv_offset,
-                                                (to += v.bv_len) - v.bv_len,
-                                                v.bv_len, 0);
-               kunmap_atomic(p);
-               sum = csum_block_add(sum, next, off);
-               off += v.bv_len;
-       }),({
-               next = csum_partial_copy_nocheck(v.iov_base,
-                                                (to += v.iov_len) - v.iov_len,
-                                                v.iov_len, 0);
-               sum = csum_block_add(sum, next, off);
-               off += v.iov_len;
-       })
-       )
-       *csum = sum;
-       return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter);
-
-size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
-                            struct iov_iter *i)
-{
-       char *from = addr;
-       __wsum sum, next;
-       size_t off = 0;
-       if (unlikely(bytes > i->count))
-               bytes = i->count;
-
-       if (unlikely(!bytes))
-               return 0;
-
-       sum = *csum;
-       iterate_and_advance(i, bytes, v, ({
-               int err = 0;
-               next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
-                                            v.iov_base, 
-                                            v.iov_len, 0, &err);
-               if (!err) {
-                       sum = csum_block_add(sum, next, off);
-                       off += v.iov_len;
-               }
-               err ? v.iov_len : 0;
-       }), ({
-               char *p = kmap_atomic(v.bv_page);
-               next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
-                                                p + v.bv_offset,
-                                                v.bv_len, 0);
-               kunmap_atomic(p);
-               sum = csum_block_add(sum, next, off);
-               off += v.bv_len;
-       }),({
-               next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
-                                                v.iov_base,
-                                                v.iov_len, 0);
-               sum = csum_block_add(sum, next, off);
-               off += v.iov_len;
-       })
-       )
-       *csum = sum;
-       return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_to_iter);
-
-int iov_iter_npages(const struct iov_iter *i, int maxpages)
-{
-       size_t size = i->count;
-       int npages = 0;
-
-       if (!size)
-               return 0;
-
-       iterate_all_kinds(i, size, v, ({
-               unsigned long p = (unsigned long)v.iov_base;
-               npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
-                       - p / PAGE_SIZE;
-               if (npages >= maxpages)
-                       return maxpages;
-       0;}),({
-               npages++;
-               if (npages >= maxpages)
-                       return maxpages;
-       }),({
-               unsigned long p = (unsigned long)v.iov_base;
-               npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
-                       - p / PAGE_SIZE;
-               if (npages >= maxpages)
-                       return maxpages;
-       })
-       )
-       return npages;
-}
-EXPORT_SYMBOL(iov_iter_npages);
index 78fee632a7ee9b73d9d0d5498d76aeb58460e711..936d81661c478a89fd797e8b311b4c52291f2414 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/stacktrace.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/vmalloc.h>
 #include <linux/kasan.h>
 
 #include "kasan.h"
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size)
                        GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
                        PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
                        __builtin_return_address(0));
-       return ret ? 0 : -ENOMEM;
+
+       if (ret) {
+               find_vm_area(addr)->flags |= VM_KASAN;
+               return 0;
+       }
+
+       return -ENOMEM;
 }
 
-void kasan_module_free(void *addr)
+void kasan_free_shadow(const struct vm_struct *vm)
 {
-       vfree(kasan_mem_to_shadow(addr));
+       if (vm->flags & VM_KASAN)
+               vfree(kasan_mem_to_shadow(vm->addr));
 }
 
 static void register_global(struct kasan_global *global)
index d18d3a6e7337d944a36e1375b1f50fdce483961f..b34ef4a32a3b266768248743849e241183e112ed 100644 (file)
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
         * on for the root memcg is enough.
         */
        if (cgroup_on_dfl(root_css->cgroup))
-               mem_cgroup_from_css(root_css)->use_hierarchy = true;
+               root_mem_cgroup->use_hierarchy = true;
+       else
+               root_mem_cgroup->use_hierarchy = false;
 }
 
 static u64 memory_current_read(struct cgroup_subsys_state *css,
@@ -5247,7 +5249,7 @@ static int memory_low_show(struct seq_file *m, void *v)
        unsigned long low = ACCESS_ONCE(memcg->low);
 
        if (low == PAGE_COUNTER_MAX)
-               seq_puts(m, "infinity\n");
+               seq_puts(m, "max\n");
        else
                seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
 
@@ -5262,7 +5264,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
        int err;
 
        buf = strstrip(buf);
-       err = page_counter_memparse(buf, "infinity", &low);
+       err = page_counter_memparse(buf, "max", &low);
        if (err)
                return err;
 
@@ -5277,7 +5279,7 @@ static int memory_high_show(struct seq_file *m, void *v)
        unsigned long high = ACCESS_ONCE(memcg->high);
 
        if (high == PAGE_COUNTER_MAX)
-               seq_puts(m, "infinity\n");
+               seq_puts(m, "max\n");
        else
                seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
 
@@ -5292,7 +5294,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
        int err;
 
        buf = strstrip(buf);
-       err = page_counter_memparse(buf, "infinity", &high);
+       err = page_counter_memparse(buf, "max", &high);
        if (err)
                return err;
 
@@ -5307,7 +5309,7 @@ static int memory_max_show(struct seq_file *m, void *v)
        unsigned long max = ACCESS_ONCE(memcg->memory.limit);
 
        if (max == PAGE_COUNTER_MAX)
-               seq_puts(m, "infinity\n");
+               seq_puts(m, "max\n");
        else
                seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
 
@@ -5322,7 +5324,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
        int err;
 
        buf = strstrip(buf);
-       err = page_counter_memparse(buf, "infinity", &max);
+       err = page_counter_memparse(buf, "max", &max);
        if (err)
                return err;
 
@@ -5426,7 +5428,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
        if (memcg == root_mem_cgroup)
                return false;
 
-       if (page_counter_read(&memcg->memory) > memcg->low)
+       if (page_counter_read(&memcg->memory) >= memcg->low)
                return false;
 
        while (memcg != root) {
@@ -5435,7 +5437,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
                if (memcg == root_mem_cgroup)
                        break;
 
-               if (page_counter_read(&memcg->memory) > memcg->low)
+               if (page_counter_read(&memcg->memory) >= memcg->low)
                        return false;
        }
        return true;
index 8068893697bbdbb5d64f9d43508658d601a6932e..411144f977b10eab492410728784efe37c4ea54a 100644 (file)
@@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Avoid grouping on DSO/COW pages in specific and RO pages
         * in general, RO pages shouldn't hurt as much anyway since
         * they can be in shared cache state.
+        *
+        * FIXME! This checks "pmd_dirty()" as an approximation of
+        * "is this a read-only page", since checking "pmd_write()"
+        * is even more broken. We haven't actually turned this into
+        * a writable page, so pmd_write() will always be false.
         */
-       if (!pte_write(pte))
+       if (!pte_dirty(pte))
                flags |= TNF_NO_GROUP;
 
        /*
index 73cf0987088c36647fbb805278978bb656ff1fda..8a54cd214925872a66d4d1cc36b69ec6c6047324 100644 (file)
 
 int can_do_mlock(void)
 {
-       if (capable(CAP_IPC_LOCK))
-               return 1;
        if (rlimit(RLIMIT_MEMLOCK) != 0)
                return 1;
+       if (capable(CAP_IPC_LOCK))
+               return 1;
        return 0;
 }
 EXPORT_SYMBOL(can_do_mlock);
index 7296360fc057e5bbf67b9904501fdbe98a97b1b2..3fba2dc97c44bece0d6fb5754afdafb1dba6353e 100644 (file)
@@ -62,6 +62,7 @@ void *high_memory;
 EXPORT_SYMBOL(high_memory);
 struct page *mem_map;
 unsigned long max_mapnr;
+EXPORT_SYMBOL(max_mapnr);
 unsigned long highest_memmap_pfn;
 struct percpu_counter vm_committed_as;
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
@@ -1213,11 +1214,9 @@ static int do_mmap_private(struct vm_area_struct *vma,
        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
                total = point;
                kdebug("try to alloc exact %lu pages", total);
-               base = alloc_pages_exact(len, GFP_KERNEL);
-       } else {
-               base = (void *)__get_free_pages(GFP_KERNEL, order);
        }
 
+       base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
        if (!base)
                goto enomem;
 
index a47f0b229a1aca202b15195c88ab93d52e65064f..40e29429e7b0995bd5799bd6263b18d6ce8261cb 100644 (file)
@@ -2353,8 +2353,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                if (ac->high_zoneidx < ZONE_NORMAL)
                        goto out;
                /* The OOM killer does not compensate for light reclaim */
-               if (!(gfp_mask & __GFP_FS))
+               if (!(gfp_mask & __GFP_FS)) {
+                       /*
+                        * XXX: Page reclaim didn't yield anything,
+                        * and the OOM killer can't be invoked, but
+                        * keep looping as per should_alloc_retry().
+                        */
+                       *did_some_progress = 1;
                        goto out;
+               }
                /*
                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2366,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                        goto out;
        }
        /* Exhausted what can be done so it's blamo time */
-       if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false))
+       if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
+                       || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
                *did_some_progress = 1;
 out:
        oom_zonelist_unlock(ac->zonelist, gfp_mask);
index a63031fa3e0c1e4380e6937aa711df912c9a687f..cf2d0ca010bc52efd5ea86c7f6ba760a5c3ef286 100644 (file)
@@ -1455,6 +1455,9 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 
 bool shmem_mapping(struct address_space *mapping)
 {
+       if (!mapping->host)
+               return false;
+
        return mapping->host->i_sb->s_op == &shmem_ops;
 }
 
@@ -2319,8 +2322,8 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
 
 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
 {
-       bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
-       bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+       bool old_is_dir = d_is_dir(old_dentry);
+       bool new_is_dir = d_is_dir(new_dentry);
 
        if (old_dir != new_dir && old_is_dir != new_is_dir) {
                if (old_is_dir) {
index 35b25e1340ca49cf0a95bbc80c69490ec68b5554..49abccf29a29f65c4748a6decec00c27fec23c6d 100644 (file)
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr)
                spin_unlock(&vmap_area_lock);
 
                vmap_debug_free_range(va->va_start, va->va_end);
+               kasan_free_shadow(vm);
                free_unmap_vmap_area(va);
                vm->size -= PAGE_SIZE;
 
index d8e376a5f0f13d9eed735f62b81367b7bc685ebe..36a1a739ad68ff57eace5ba4bc4166faf12c485b 100644 (file)
@@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
 static void p9_virtio_remove(struct virtio_device *vdev)
 {
        struct virtio_chan *chan = vdev->priv;
-
-       if (chan->inuse)
-               p9_virtio_close(chan->client);
-       vdev->config->del_vqs(vdev);
+       unsigned long warning_time;
 
        mutex_lock(&virtio_9p_lock);
+
+       /* Remove self from list so we don't get new users. */
        list_del(&chan->chan_list);
+       warning_time = jiffies;
+
+       /* Wait for existing users to close. */
+       while (chan->inuse) {
+               mutex_unlock(&virtio_9p_lock);
+               msleep(250);
+               if (time_after(jiffies, warning_time + 10 * HZ)) {
+                       dev_emerg(&vdev->dev,
+                                 "p9_virtio_remove: waiting for device in use.\n");
+                       warning_time = jiffies;
+               }
+               mutex_lock(&virtio_9p_lock);
+       }
+
        mutex_unlock(&virtio_9p_lock);
+
+       vdev->config->del_vqs(vdev);
+
        sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
        kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
        kfree(chan->tag);
index fb57ab6b24f9ef8feea780179ad2e8284a9e532f..02c24cf63c344a3b15bcf87369da7f847150fab3 100644 (file)
@@ -190,6 +190,8 @@ static int __init br_init(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+
        err = stp_proto_register(&br_stp_proto);
        if (err < 0) {
                pr_err("bridge: can't register sap for STP\n");
index b087d278c6793f48f413082ff60fd8abbea49ff3..1849d96b3c91d82e20f85ca2579546857343d0f2 100644 (file)
@@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
         */
        del_nbp(p);
 
+       dev_set_mtu(br->dev, br_min_mtu(br));
+
        spin_lock_bh(&br->lock);
        changed_addr = br_stp_recalculate_bridge_id(br);
        spin_unlock_bh(&br->lock);
index 769b185fefbd5f6bb6d4e68c1bcb0caeafa23d25..a6e2da0bc7184501ed5eb2bcef0e5169be2cf276 100644 (file)
@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
        int copylen;
 
        ret = -EOPNOTSUPP;
-       if (m->msg_flags&MSG_OOB)
+       if (flags & MSG_OOB)
                goto read_error;
 
        skb = skb_recv_datagram(sk, flags, 0 , &ret);
index 8bc7caa28e64ddc32d30f0054ac9dee708ba8f3f..434ba8557826ddf160fafd08a04949d546201692 100644 (file)
@@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
        u16 tmp;
        u16 len;
        u16 hdrchks;
-       u16 pktchks;
+       int pktchks;
        struct cffrml *this;
        this = container_obj(layr);
 
index 1be0b521ac490143e60e3b2d5b02d7fd68c24087..f6c3b2137eeaacdc5cf38d1eac03e018d8f2d37b 100644 (file)
@@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt)
        return skb->len;
 }
 
-inline u16 cfpkt_iterate(struct cfpkt *pkt,
-                        u16 (*iter_func)(u16, void *, u16),
-                        u16 data)
+int cfpkt_iterate(struct cfpkt *pkt,
+                 u16 (*iter_func)(u16, void *, u16),
+                 u16 data)
 {
        /*
         * Don't care about the performance hit of linearizing,
index 66e08040ced7557ba19e7535815804bbcffca12a..32d710eaf1fc991b2ef4638fe8ccc95b84352ace 100644 (file)
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
                goto inval_skb;
        }
 
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
 
index 3236b4167a32109b91b1eb28ae8bdf522c8db3f5..94d3d5e978832cba85b7212988f735780ceed9dd 100644 (file)
@@ -711,24 +711,18 @@ static unsigned char nas[21] = {
 
 COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
        return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
                       unsigned int, vlen, unsigned int, flags)
 {
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
        return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                              flags | MSG_CMSG_COMPAT);
 }
 
 COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
        return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
@@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
        int datagrams;
        struct timespec ktspec;
 
-       if (flags & MSG_CMSG_COMPAT)
-               return -EINVAL;
-
        if (timeout == NULL)
                return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                                      flags | MSG_CMSG_COMPAT, NULL);
index 8f9710c62e20d58bcdcec3d184ca6344fbe5a57c..962ee9d719641291853715f366717bf1626e115c 100644 (file)
@@ -946,7 +946,7 @@ bool dev_valid_name(const char *name)
                return false;
 
        while (*name) {
-               if (*name == '/' || isspace(*name))
+               if (*name == '/' || *name == ':' || isspace(*name))
                        return false;
                name++;
        }
index 91f74f3eb20475439214d9692f9b17c8124a9c3a..aa378ecef1860d0c1e255c001aef9528b8198d6f 100644 (file)
@@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_RXALL_BIT] =            "rx-all",
        [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
        [NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
+       [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
 };
 
 static const char
index 0c08062d1796337f25b9dbc2cbce68d5e3194037..1e2f46a69d50196f71f1fb7ae97e7732c2a8a059 100644 (file)
@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
        return 0;
 
 nla_put_failure:
+       kfree(d->xstats);
+       d->xstats = NULL;
+       d->xstats_len = 0;
        spin_unlock_bh(d->lock);
        return -1;
 }
@@ -305,7 +308,9 @@ int
 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
 {
        if (d->compat_xstats) {
-               d->xstats = st;
+               d->xstats = kmemdup(st, len, GFP_ATOMIC);
+               if (!d->xstats)
+                       goto err_out;
                d->xstats_len = len;
        }
 
@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
                return gnet_stats_copy(d, TCA_STATS_APP, st, len);
 
        return 0;
+
+err_out:
+       d->xstats_len = 0;
+       spin_unlock_bh(d->lock);
+       return -1;
 }
 EXPORT_SYMBOL(gnet_stats_copy_app);
 
@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
                        return -1;
        }
 
+       kfree(d->xstats);
+       d->xstats = NULL;
+       d->xstats_len = 0;
        spin_unlock_bh(d->lock);
        return 0;
 }
index b4899f5b7388e8f0c825a433a1f633d6b087a0f9..508155b283ddcc73a967a2bc8068e67cb8cada7d 100644 (file)
@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
                        return len;
 
                i += len;
+               if ((value > 1) &&
+                   (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+                       return -ENOTSUPP;
                pkt_dev->burst = value < 1 ? 1 : value;
                sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
                return count;
index ab293a3066b34bc4f6af71701f0c12b9ab6e5a34..ee0608bb3bc087212d12bac729677bf454053560 100644 (file)
@@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        s_h = cb->args[0];
        s_idx = cb->args[1];
 
-       rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
        /* A hack to preserve kernel<->userspace interface.
@@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
-               hlist_for_each_entry_rcu(dev, head, index_hlist) {
+               hlist_for_each_entry(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                        err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1344,7 +1343,6 @@ cont:
                }
        }
 out:
-       rcu_read_unlock();
        cb->args[1] = idx;
        cb->args[0] = h;
 
@@ -2012,8 +2010,8 @@ replay:
        }
 
        if (1) {
-               struct nlattr *attr[ops ? ops->maxtype + 1 : 0];
-               struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
+               struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
+               struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
                struct nlattr **data = NULL;
                struct nlattr **slave_data = NULL;
                struct net *dest_net, *link_net = NULL;
@@ -2122,6 +2120,10 @@ replay:
                if (IS_ERR(dest_net))
                        return PTR_ERR(dest_net);
 
+               err = -EPERM;
+               if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
+                       goto out;
+
                if (tb[IFLA_LINK_NETNSID]) {
                        int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
 
@@ -2130,6 +2132,9 @@ replay:
                                err =  -EINVAL;
                                goto out;
                        }
+                       err = -EPERM;
+                       if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
+                               goto out;
                }
 
                dev = rtnl_create_link(link_net ? : dest_net, ifname,
@@ -2161,28 +2166,28 @@ replay:
                        }
                }
                err = rtnl_configure_link(dev, ifm);
-               if (err < 0) {
-                       if (ops->newlink) {
-                               LIST_HEAD(list_kill);
-
-                               ops->dellink(dev, &list_kill);
-                               unregister_netdevice_many(&list_kill);
-                       } else {
-                               unregister_netdevice(dev);
-                       }
-                       goto out;
-               }
-
+               if (err < 0)
+                       goto out_unregister;
                if (link_net) {
                        err = dev_change_net_namespace(dev, dest_net, ifname);
                        if (err < 0)
-                               unregister_netdevice(dev);
+                               goto out_unregister;
                }
 out:
                if (link_net)
                        put_net(link_net);
                put_net(dest_net);
                return err;
+out_unregister:
+               if (ops->newlink) {
+                       LIST_HEAD(list_kill);
+
+                       ops->dellink(dev, &list_kill);
+                       unregister_netdevice_many(&list_kill);
+               } else {
+                       unregister_netdevice(dev);
+               }
+               goto out;
        }
 }
 
index 88c613eab142962dc44f2075378fce0b94349e8e..8e4ac97c84775f15c786020ff87a2ef22a8dbac8 100644 (file)
@@ -3621,13 +3621,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
 {
        struct sk_buff_head *q = &sk->sk_error_queue;
        struct sk_buff *skb, *skb_next;
+       unsigned long flags;
        int err = 0;
 
-       spin_lock_bh(&q->lock);
+       spin_lock_irqsave(&q->lock, flags);
        skb = __skb_dequeue(q);
        if (skb && (skb_next = skb_peek(q)))
                err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
-       spin_unlock_bh(&q->lock);
+       spin_unlock_irqrestore(&q->lock, flags);
 
        sk->sk_err = err;
        if (err)
@@ -3732,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
                     struct sock *sk, int tstype)
 {
        struct sk_buff *skb;
-       bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+       bool tsonly;
+
+       if (!sk)
+               return;
 
-       if (!sk || !skb_may_tx_timestamp(sk, tsonly))
+       tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+       if (!skb_may_tx_timestamp(sk, tsonly))
                return;
 
        if (tsonly)
@@ -4172,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
-       skb->sender_cpu = 0;
+       skb_sender_cpu_clear(skb);
        skb_init_secmark(skb);
        secpath_reset(skb);
        nf_reset(skb);
index 93c8b20c91e496648f7f2e5c769c062ca2dd679d..78e89eb7eb705624d3ff63324f5002ae10b51145 100644 (file)
@@ -1655,6 +1655,10 @@ void sock_rfree(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_rfree);
 
+/*
+ * Buffer destructor for skbs that are not used directly in read or write
+ * path, e.g. for error handler skbs. Automatically called from kfree_skb.
+ */
 void sock_efree(struct sk_buff *skb)
 {
        sock_put(skb->sk);
index 433424804284cad8921a2efb6427a615b740ecc5..8ce351ffceb122568ae05587a0b1dc3544d476d7 100644 (file)
@@ -25,6 +25,8 @@
 static int zero = 0;
 static int one = 1;
 static int ushort_max = USHRT_MAX;
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_sndbuf,
        },
        {
                .procname       = "rmem_max",
@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "wmem_default",
@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_sndbuf,
        },
        {
                .procname       = "rmem_default",
@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "dev_weight",
index 1d7c1256e8458d35e4a9f9daa392aba37672e1bf..3b81092771f8b7beb74349a84819046f7c080df1 100644 (file)
@@ -1062,7 +1062,7 @@ source_ok:
        if (decnet_debug_level & 16)
                printk(KERN_DEBUG
                       "dn_route_output_slow: initial checks complete."
-                      " dst=%o4x src=%04x oif=%d try_hard=%d\n",
+                      " dst=%04x src=%04x oif=%d try_hard=%d\n",
                       le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
                       fld.flowidn_oif, try_hard);
 
index a138d75751df2fb46219168c01fd1bf5cce24d43..44d27469ae55982d1895021b79ba76a85c1324a8 100644 (file)
@@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
        struct hsr_port *port;
 
        hsr = netdev_priv(hsr_dev);
+
+       rtnl_lock();
        hsr_for_each_port(hsr, port)
                hsr_del_port(port);
+       rtnl_unlock();
 
        del_timer_sync(&hsr->prune_timer);
        del_timer_sync(&hsr->announce_timer);
index 779d28b65417a6e62b687d8f5ea36d6be285f417..cd37d0011b424824fd113ffd4da59f36c116996a 100644 (file)
@@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                        return NOTIFY_DONE;     /* Not an HSR device */
                hsr = netdev_priv(dev);
                port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+               if (port == NULL) {
+                       /* Resend of notification concerning removed device? */
+                       return NOTIFY_DONE;
+               }
        } else {
                hsr = port->hsr;
        }
index a348dcbcd683e6858248bf17ee73e7e24d08b4ea..7d37366cc695554ae243f940869b46d26f598b65 100644 (file)
@@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port)
        list_del_rcu(&port->port_list);
 
        if (port != master) {
-               netdev_update_features(master->dev);
-               dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               if (master != NULL) {
+                       netdev_update_features(master->dev);
+                       dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               }
                netdev_rx_handler_unregister(port->dev);
                dev_set_promiscuity(port->dev, -1);
        }
@@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port)
         */
 
        synchronize_rcu();
-       dev_put(port->dev);
+
+       if (port != master)
+               dev_put(port->dev);
 }
index 14d02ea905b6bea37240f88054f0cd42db73c4c2..3e44b9b0b78ece392a1f1b0763b5445cadfb2557 100644 (file)
@@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
                release_sock(sk);
                if (reqsk_queue_empty(&icsk->icsk_accept_queue))
                        timeo = schedule_timeout(timeo);
+               sched_annotate_sleep();
                lock_sock(sk);
                err = 0;
                if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
index 81751f12645f6224a7f076dea1da2fc0d638819a..592aff37366bb932c0aeb2bad944b5470374ec21 100644 (file)
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
        mutex_unlock(&inet_diag_table_mutex);
 }
 
+static size_t inet_sk_attr_size(void)
+{
+       return    nla_total_size(sizeof(struct tcp_info))
+               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1) /* INET_DIAG_TOS */
+               + nla_total_size(1) /* INET_DIAG_TCLASS */
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
+               + nla_total_size(sizeof(struct inet_diag_msg))
+               + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+               + nla_total_size(TCP_CA_NAME_MAX)
+               + nla_total_size(sizeof(struct tcpvegas_info))
+               + 64;
+}
+
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,                   
@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
        if (err)
                goto out;
 
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) +
-                       sizeof(struct tcp_info) + 64, GFP_KERNEL);
+       rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
        if (!rep) {
                err = -ENOMEM;
                goto out;
index 787b3c294ce672244ce08c5426c03bbd1f71c0f3..d9bc28ac5d1b97340e79aae1eefcbac3f463251a 100644 (file)
@@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
+       skb_sender_cpu_clear(skb);
        return dst_output(skb);
 }
 
index e5b6d0ddcb5808f662ca0b1fd5863d63e6b54b83..145a50c4d56630a5fc97283d85c3fa29e10ab476 100644 (file)
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
 {
        struct iphdr iph;
+       int netoff;
        u32 len;
 
        if (skb->protocol != htons(ETH_P_IP))
                return skb;
 
-       if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
+       netoff = skb_network_offset(skb);
+
+       if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
                return skb;
 
        if (iph.ihl < 5 || iph.version != 4)
                return skb;
 
        len = ntohs(iph.tot_len);
-       if (skb->len < len || len < (iph.ihl * 4))
+       if (skb->len < netoff + len || len < (iph.ihl * 4))
                return skb;
 
        if (ip_is_fragment(&iph)) {
                skb = skb_share_check(skb, GFP_ATOMIC);
                if (skb) {
-                       if (!pskb_may_pull(skb, iph.ihl*4))
+                       if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
                                return skb;
-                       if (pskb_trim_rcsum(skb, len))
+                       if (pskb_trim_rcsum(skb, netoff + len))
                                return skb;
                        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
                        if (ip_defrag(skb, user))
index d68199d9b2b01faf7a5272862b7cd8658f08c8ae..a7aea2048a0d7a624ceb79923d25e9750ec6fa9a 100644 (file)
@@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk,
        cork->length += length;
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
+           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+           (sk->sk_type == SOCK_DGRAM)) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
index 31d8c71986b40e28e5c84f5e62d474a639d3bf91..5cd99271d3a6a07c17a915fddde7a5a0c8a86618 100644 (file)
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
                kfree_skb(skb);
 }
 
-static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk,
-                                         const struct sk_buff *skb,
-                                         int ee_origin)
+/* IPv4 supports cmsg on all imcp errors and some timestamps
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+ * the PKTINFO fields in skb->cb[]. Fill those in here.
+ */
+static bool ipv4_datagram_support_cmsg(const struct sock *sk,
+                                      struct sk_buff *skb,
+                                      int ee_origin)
 {
-       struct in_pktinfo *info = PKTINFO_SKB_CB(skb);
+       struct in_pktinfo *info;
+
+       if (ee_origin == SO_EE_ORIGIN_ICMP)
+               return true;
 
-       if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) ||
-           (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
+       if (ee_origin == SO_EE_ORIGIN_LOCAL)
+               return false;
+
+       /* Support IP_PKTINFO on tstamp packets if requested, to correlate
+        * timestamp with egress dev. Not possible for packets without dev
+        * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
+        */
+       if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
            (!skb->dev))
                return false;
 
+       info = PKTINFO_SKB_CB(skb);
        info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
        info->ipi_ifindex = skb->dev->ifindex;
        return true;
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        serr = SKB_EXT_ERR(skb);
 
-       if (sin && skb->len) {
+       if (sin && serr->port) {
                sin->sin_family = AF_INET;
                sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
                                                   serr->addr_offset);
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
        sin = &errhdr.offender;
        memset(sin, 0, sizeof(*sin));
 
-       if (skb->len &&
-           (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
-            ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
+       if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
                sin->sin_family = AF_INET;
                sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                if (inet_sk(sk)->cmsg_flags)
index e9f66e1cda507cf2d5cb532958d23a89beeccaba..208d5439e59b2e8c3ccb2da46c292ad4f75b3784 100644 (file)
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
        kgid_t low, high;
        int ret = 0;
 
+       if (sk->sk_family == AF_INET6)
+               sk->sk_ipv6only = 1;
+
        inet_get_ping_group_range_net(net, &low, &high);
        if (gid_lte(low, group) && gid_lte(group, high))
                return 0;
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                if (addr_len < sizeof(*addr))
                        return -EINVAL;
 
+               if (addr->sin_family != AF_INET &&
+                   !(addr->sin_family == AF_UNSPEC &&
+                     addr->sin_addr.s_addr == htonl(INADDR_ANY)))
+                       return -EAFNOSUPPORT;
+
                pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
                         sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
 
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                        return -EINVAL;
 
                if (addr->sin6_family != AF_INET6)
-                       return -EINVAL;
+                       return -EAFNOSUPPORT;
 
                pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
                         sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                if (msg->msg_namelen < sizeof(*usin))
                        return -EINVAL;
                if (usin->sin_family != AF_INET)
-                       return -EINVAL;
+                       return -EAFNOSUPPORT;
                daddr = usin->sin_addr.s_addr;
                /* no remote port */
        } else {
index 9d72a0fcd9284425e088cef6e1b8c14e95950ca4..995a2259bcfc80894caec08fe2e7ccd62311e227 100644 (file)
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
                                       int large_allowed)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 new_size_goal, size_goal, hlen;
+       u32 new_size_goal, size_goal;
 
        if (!large_allowed || !sk_can_gso(sk))
                return mss_now;
 
-       /* Maybe we should/could use sk->sk_prot->max_header here ? */
-       hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
-              inet_csk(sk)->icsk_ext_hdr_len +
-              tp->tcp_header_len;
-
-       new_size_goal = sk->sk_gso_max_size - 1 - hlen;
+       /* Note : tcp_tso_autosize() will eventually split this later */
+       new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
        new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
 
        /* We try hard to avoid divides here */
index d694088214cd87fa50e413730402499610c546e6..62856e185a935e44deb26de59ff94c8bf7500579 100644 (file)
@@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
  */
 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 {
+       /* If credits accumulated at a higher w, apply them gently now. */
+       if (tp->snd_cwnd_cnt >= w) {
+               tp->snd_cwnd_cnt = 0;
+               tp->snd_cwnd++;
+       }
+
        tp->snd_cwnd_cnt += acked;
        if (tp->snd_cwnd_cnt >= w) {
                u32 delta = tp->snd_cwnd_cnt / w;
index 4b276d1ed9807057986bd3b050e2e901bf1afec0..06d3d665a9fd1bfda5688907a284de83697273f6 100644 (file)
@@ -306,8 +306,10 @@ tcp_friendliness:
                }
        }
 
-       if (ca->cnt == 0)                       /* cannot be zero */
-               ca->cnt = 1;
+       /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
+        * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
+        */
+       ca->cnt = max(ca->cnt, 2U);
 }
 
 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
index 8fdd27b173061def484663beeace691a8bfa2365..fb4cf8b8e121acd4bffcf2fdfbd7e03c76bad7cc 100644 (file)
@@ -4770,7 +4770,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
                return false;
 
        /* If we filled the congestion window, do not expand.  */
-       if (tp->packets_out >= tp->snd_cwnd)
+       if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
                return false;
 
        return true;
index d5f6bd9a210ab93ed27177ecffc9499259dbe999..dab73813cb9208dafaae1277e281c2255601a771 100644 (file)
@@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
                return err;
 
        IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+       skb->protocol = htons(ETH_P_IP);
 
        return x->outer_mode->output2(x, skb);
 }
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
 int xfrm4_output_finish(struct sk_buff *skb)
 {
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       skb->protocol = htons(ETH_P_IP);
 
 #ifdef CONFIG_NETFILTER
        IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
index 98e4a63d72bb435e1ac1ae7cf2767072eed6db92..b6030025f41197efbcdfd1d8c013e469413550b5 100644 (file)
@@ -4903,6 +4903,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static
+int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct inet6_dev *idev = ctl->extra1;
+       int min_mtu = IPV6_MIN_MTU;
+       struct ctl_table lctl;
+
+       lctl = *ctl;
+       lctl.extra1 = &min_mtu;
+       lctl.extra2 = idev ? &idev->dev->mtu : NULL;
+
+       return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
+}
+
 static void dev_disable_change(struct inet6_dev *idev)
 {
        struct netdev_notifier_info info;
@@ -5054,7 +5069,7 @@ static struct addrconf_sysctl_table
                        .data           = &ipv6_devconf.mtu6,
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
-                       .proc_handler   = proc_dointvec,
+                       .proc_handler   = addrconf_sysctl_mtu,
                },
                {
                        .procname       = "accept_ra",
index c215be70cac08af78953ea860c5d67cf9d3fa642..ace8daca5c8361ad37073a4eeb0f8d55c622d807 100644 (file)
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
        kfree_skb(skb);
 }
 
-static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb)
+/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
+ *
+ * At one point, excluding local errors was a quick test to identify icmp/icmp6
+ * errors. This is no longer true, but the test remained, so the v6 stack,
+ * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+ * the PKTINFO fields in skb->cb[]. Fill those in here.
+ */
+static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
+                                     struct sock_exterr_skb *serr)
 {
-       int ifindex = skb->dev ? skb->dev->ifindex : -1;
+       if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+           serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
+               return true;
+
+       if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
+               return false;
+
+       if (!skb->dev)
+               return false;
 
        if (skb->protocol == htons(ETH_P_IPV6))
-               IP6CB(skb)->iif = ifindex;
+               IP6CB(skb)->iif = skb->dev->ifindex;
        else
-               PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex;
+               PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
+
+       return true;
 }
 
 /*
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 
        serr = SKB_EXT_ERR(skb);
 
-       if (sin && skb->len) {
+       if (sin && serr->port) {
                const unsigned char *nh = skb_network_header(skb);
                sin->sin6_family = AF_INET6;
                sin->sin6_flowinfo = 0;
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
        memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
        sin = &errhdr.offender;
        memset(sin, 0, sizeof(*sin));
-       if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) {
+
+       if (ip6_datagram_support_cmsg(skb, serr)) {
                sin->sin6_family = AF_INET6;
-               if (np->rxopt.all) {
-                       if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
-                           serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
-                               ip6_datagram_prepare_pktinfo_errqueue(skb);
+               if (np->rxopt.all)
                        ip6_datagram_recv_common_ctl(sk, msg, skb);
-               }
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
index 7deebf102cbafc276f45e4eaffdd8efdb658d842..7e80b61b51ff474db6c188218b70f12709209256 100644 (file)
@@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
 
 static inline int ip6_forward_finish(struct sk_buff *skb)
 {
+       skb_sender_cpu_clear(skb);
        return dst_output(skb);
 }
 
@@ -1298,7 +1299,8 @@ emsgsize:
        if (((length > mtu) ||
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO)) {
+           (rt->dst.dev->features & NETIF_F_UFO) &&
+           (sk->sk_type == SOCK_DGRAM)) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen,
                                          transhdrlen, mtu, flags, rt);
index 266a264ec21273147b32d75ac7bde55afdd1b4cf..ddd94eca19b3986e4fc0b1ff684eee401032815e 100644 (file)
@@ -314,7 +314,7 @@ out:
  *   Create tunnel matching given parameters.
  *
  * Return:
- *   created tunnel or NULL
+ *   created tunnel or error pointer
  **/
 
 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
@@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
        struct net_device *dev;
        struct ip6_tnl *t;
        char name[IFNAMSIZ];
-       int err;
+       int err = -ENOMEM;
 
        if (p->name[0])
                strlcpy(name, p->name, IFNAMSIZ);
@@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 failed_free:
        ip6_dev_free(dev);
 failed:
-       return NULL;
+       return ERR_PTR(err);
 }
 
 /**
@@ -362,7 +362,7 @@ failed:
  *   tunnel device is created and registered for use.
  *
  * Return:
- *   matching tunnel or NULL
+ *   matching tunnel or error pointer
  **/
 
 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
@@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
                    ipv6_addr_equal(remote, &t->parms.raddr)) {
                        if (create)
-                               return NULL;
+                               return ERR_PTR(-EEXIST);
 
                        return t;
                }
        }
        if (!create)
-               return NULL;
+               return ERR_PTR(-ENODEV);
        return ip6_tnl_create(net, p);
 }
 
@@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        }
                        ip6_tnl_parm_from_user(&p1, &p);
                        t = ip6_tnl_locate(net, &p1, 0);
-                       if (t == NULL)
+                       if (IS_ERR(t))
                                t = netdev_priv(dev);
                } else {
                        memset(&p, 0, sizeof(p));
@@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                ip6_tnl_parm_from_user(&p1, &p);
                t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
                if (cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (!IS_ERR(t)) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
@@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        else
                                err = ip6_tnl_update(t, &p1);
                }
-               if (t) {
+               if (!IS_ERR(t)) {
                        err = 0;
                        ip6_tnl_parm_to_user(&p, &t->parms);
                        if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                                err = -EFAULT;
 
-               } else
-                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               } else {
+                       err = PTR_ERR(t);
+               }
                break;
        case SIOCDELTUNNEL:
                err = -EPERM;
@@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        err = -ENOENT;
                        ip6_tnl_parm_from_user(&p1, &p);
                        t = ip6_tnl_locate(net, &p1, 0);
-                       if (t == NULL)
+                       if (IS_ERR(t))
                                break;
                        err = -EPERM;
                        if (t->dev == ip6n->fb_tnl_dev)
@@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
                           struct nlattr *tb[], struct nlattr *data[])
 {
        struct net *net = dev_net(dev);
-       struct ip6_tnl *nt;
+       struct ip6_tnl *nt, *t;
 
        nt = netdev_priv(dev);
        ip6_tnl_netlink_parms(data, &nt->parms);
 
-       if (ip6_tnl_locate(net, &nt->parms, 0))
+       t = ip6_tnl_locate(net, &nt->parms, 0);
+       if (!IS_ERR(t))
                return -EEXIST;
 
        return ip6_tnl_create2(dev);
@@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
        ip6_tnl_netlink_parms(data, &p);
 
        t = ip6_tnl_locate(net, &p, 0);
-
-       if (t) {
+       if (!IS_ERR(t)) {
                if (t->dev != dev)
                        return -EEXIST;
        } else
index bd46f736f61d74bcb75a4dabef264154f55a9fb0..a2dfff6ff227e09607d1d267265e7635d64a2030 100644 (file)
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        if (msg->msg_name) {
                DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
-               if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
-                   u->sin6_family != AF_INET6) {
+               if (msg->msg_namelen < sizeof(*u))
                        return -EINVAL;
+               if (u->sin6_family != AF_INET6) {
+                       return -EAFNOSUPPORT;
                }
                if (sk->sk_bound_dev_if &&
                    sk->sk_bound_dev_if != u->sin6_scope_id) {
index ca3f29b98ae5d76b7e69c38f617362a90fd9fd04..010f8bd2d577f9767d7d44182b246e62f4a118f6 100644 (file)
@@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
                return err;
 
        skb->ignore_df = 1;
+       skb->protocol = htons(ETH_P_IPV6);
 
        return x->outer_mode->output2(x, skb);
 }
@@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
 int xfrm6_output_finish(struct sk_buff *skb)
 {
        memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-       skb->protocol = htons(ETH_P_IPV6);
 
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
index 48bf5a06847bd59db7834758b22aa9208d727940..8d2d01b4800a197eaaa64fb184f56c58920ad462 100644 (file)
@@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
                case IPPROTO_MH:
+                       offset += ipv6_optlen(exthdr);
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
                                struct ip6_mh *mh;
 
index 40695b9751c10b41e7fd310fa5d15bcef4629549..683346d2d633b4b2ac839e975e2c21d687242075 100644 (file)
@@ -798,7 +798,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
        orig_jiffies = jiffies;
 
        /* Set poll time to 200 ms */
-       poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
+       poll_time = msecs_to_jiffies(200);
+       if (timeout)
+               poll_time = min_t(unsigned long, timeout, poll_time);
 
        spin_lock_irqsave(&self->spinlock, flags);
        while (self->tx_skb && self->tx_skb->len) {
@@ -811,7 +813,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
                        break;
        }
        spin_unlock_irqrestore(&self->spinlock, flags);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
 }
 
 /*
index 3c83a1e5ab0394f0eaa04b4ba5a813d118c17434..1215693fdd22897b5217b878763d56d087a25372 100644 (file)
@@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket *      ap,
 
   /* Put ourselves on the wait queue to be woken up */
   add_wait_queue(&irnet_events.rwait, &wait);
-  current->state = TASK_INTERRUPTIBLE;
+  set_current_state(TASK_INTERRUPTIBLE);
   for(;;)
     {
       /* If there is unread events */
@@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket *      ap,
       /* Yield and wait to be woken up */
       schedule();
     }
-  current->state = TASK_RUNNING;
+  __set_current_state(TASK_RUNNING);
   remove_wait_queue(&irnet_events.rwait, &wait);
 
   /* Did we got it ? */
index ff0d2db09df9db467a5831606971e02f2fe6d410..5bcd4e5589d3294602c4abdeff778497afbc8de1 100644 (file)
@@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
        if (ieee80211_chanctx_refcount(local, ctx) == 0)
                ieee80211_free_chanctx(local, ctx);
 
+       sdata->radar_required = false;
+
        /* Unreserving may ready an in-place reservation. */
        if (use_reserved_switch)
                ieee80211_vif_use_reserved_switch(local);
@@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        ieee80211_recalc_smps_chanctx(local, ctx);
        ieee80211_recalc_radar_chanctx(local, ctx);
  out:
+       if (ret)
+               sdata->radar_required = false;
+
        mutex_unlock(&local->chanctx_mtx);
        return ret;
 }
index 3afe36824703f49dcfe374e103b6372a9851b8aa..8d53d65bd2abc2d993ae5dbc7df37b707fd9ec13 100644 (file)
@@ -58,13 +58,24 @@ struct ieee80211_local;
 #define IEEE80211_UNSET_POWER_LEVEL    INT_MIN
 
 /*
- * Some APs experience problems when working with U-APSD. Decrease the
- * probability of that happening by using legacy mode for all ACs but VO.
- * The AP that caused us trouble was a Cisco 4410N. It ignores our
- * setting, and always treats non-VO ACs as legacy.
+ * Some APs experience problems when working with U-APSD. Decreasing the
+ * probability of that happening by using legacy mode for all ACs but VO isn't
+ * enough.
+ *
+ * Cisco 4410N originally forced us to enable VO by default only because it
+ * treated non-VO ACs as legacy.
+ *
+ * However some APs (notably Netgear R7000) silently reclassify packets to
+ * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
+ * clients would never see some frames (e.g. ARP responses) or would fetch them
+ * accidentally after a long time.
+ *
+ * It makes little sense to enable u-APSD queues by default because it needs
+ * userspace applications to be aware of it to actually take advantage of the
+ * possible additional powersavings. Implicitly depending on driver autotrigger
+ * frame support doesn't make much sense.
  */
-#define IEEE80211_DEFAULT_UAPSD_QUEUES \
-       IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
+#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
 
 #define IEEE80211_DEFAULT_MAX_SP_LEN           \
        IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -453,6 +464,7 @@ struct ieee80211_if_managed {
        unsigned int flags;
 
        bool csa_waiting_bcn;
+       bool csa_ignored_same_chan;
 
        bool beacon_crc_valid;
        u32 beacon_crc;
index 10ac6324c1d014c708749748ce89ef31055561cf..142f66aece18a8789205fc12b0e1d7c7816d0863 100644 (file)
@@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                return;
        }
 
+       if (cfg80211_chandef_identical(&csa_ie.chandef,
+                                      &sdata->vif.bss_conf.chandef)) {
+               if (ifmgd->csa_ignored_same_chan)
+                       return;
+               sdata_info(sdata,
+                          "AP %pM tries to chanswitch to same channel, ignore\n",
+                          ifmgd->associated->bssid);
+               ifmgd->csa_ignored_same_chan = true;
+               return;
+       }
+
        mutex_lock(&local->mtx);
        mutex_lock(&local->chanctx_mtx);
        conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        sdata->vif.csa_active = true;
        sdata->csa_chandef = csa_ie.chandef;
        sdata->csa_block_tx = csa_ie.mode;
+       ifmgd->csa_ignored_same_chan = false;
 
        if (sdata->csa_block_tx)
                ieee80211_stop_vif_queues(local, sdata,
@@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        sdata->vif.csa_active = false;
        ifmgd->csa_waiting_bcn = false;
+       ifmgd->csa_ignored_same_chan = false;
        if (sdata->csa_block_tx) {
                ieee80211_wake_vif_queues(local, sdata,
                                          IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -3204,7 +3217,8 @@ static const u64 care_about_ies =
        (1ULL << WLAN_EID_CHANNEL_SWITCH) |
        (1ULL << WLAN_EID_PWR_CONSTRAINT) |
        (1ULL << WLAN_EID_HT_CAPABILITY) |
-       (1ULL << WLAN_EID_HT_OPERATION);
+       (1ULL << WLAN_EID_HT_OPERATION) |
+       (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
 
 static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_mgmt *mgmt, size_t len,
index 7c86a002df95fee46be8e7dfdb0d691fff0e9e7f..ef6e8a6c4253c72f6f2398b733e8c427f5b59953 100644 (file)
@@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
                rate++;
                mi->sample_deferred++;
        } else {
-               if (!msr->sample_limit != 0)
+               if (!msr->sample_limit)
                        return;
 
                mi->sample_packets++;
index 1101563357eae365f1e1a1df926ecf36fdc0570b..944bdc04e913d2f599b6c5845ee2549abba20a1c 100644 (file)
@@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        hdr = (struct ieee80211_hdr *) skb->data;
        mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
 
+       if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
+               return RX_DROP_MONITOR;
+
        /* frame is in RMC, don't forward */
        if (ieee80211_is_data(hdr->frame_control) &&
            is_multicast_ether_addr(hdr->addr1) &&
index 88a18ffe2975520edbcc80733bc1bbc9b2655f11..07bd8db00af84b820139c644da95eaf29e474b5f 100644 (file)
@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
                if (tx->sdata->control_port_no_encrypt)
                        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
                info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+               info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
        }
 
        return TX_CONTINUE;
index 8428f4a954795657a32a24f77a0f9c9ae6591b7e..747bdcf72e92788574ec3e2e635b5e6b96cb75f0 100644 (file)
@@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
                wdev_iter = &sdata_iter->wdev;
 
                if (sdata_iter == sdata ||
-                   rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
+                   !ieee80211_sdata_running(sdata_iter) ||
                    local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
                        continue;
 
index e55759056361c47ed1fcfa5c656541ba39bfd260..ed99448671c3003374fc947bee6e91ab0f0d3fce 100644 (file)
@@ -3402,7 +3402,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
                if (udest.af == 0)
                        udest.af = svc->af;
 
-               if (udest.af != svc->af) {
+               if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
                        /* The synchronization protocol is incompatible
                         * with mixed family services
                         */
index c47ffd7a0a709cb73834c84652f251960f25db79..d93ceeb3ef04822427004ef0a70549f389d17354 100644 (file)
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
                        IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
                        return;
                }
+               if (!(flags & IP_VS_CONN_F_TEMPLATE))
+                       kfree(param->pe_data);
        }
 
        if (opt)
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
                                (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
                                );
 #endif
+       ip_vs_pe_put(param.pe);
        return 0;
        /* Error exit */
 out:
index 199fd0f27b0e128cfb8674ca331c2dae240e1b1c..6ab777912237976ed72cdd34f18a7bea336f44ae 100644 (file)
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
 
 static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
 {
-       rule->genmask = 0;
+       rule->genmask &= ~(1 << gencursor_next(net));
 }
 
 static int
@@ -1711,9 +1711,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
        }
        nla_nest_end(skb, list);
 
-       if (rule->ulen &&
-           nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule)))
-               goto nla_put_failure;
+       if (rule->udata) {
+               struct nft_userdata *udata = nft_userdata(rule);
+               if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
+                           udata->data) < 0)
+                       goto nla_put_failure;
+       }
 
        nlmsg_end(skb, nlh);
        return 0;
@@ -1896,11 +1899,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
        struct nft_table *table;
        struct nft_chain *chain;
        struct nft_rule *rule, *old_rule = NULL;
+       struct nft_userdata *udata;
        struct nft_trans *trans = NULL;
        struct nft_expr *expr;
        struct nft_ctx ctx;
        struct nlattr *tmp;
-       unsigned int size, i, n, ulen = 0;
+       unsigned int size, i, n, ulen = 0, usize = 0;
        int err, rem;
        bool create;
        u64 handle, pos_handle;
@@ -1968,12 +1972,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                        n++;
                }
        }
+       /* Check for overflow of dlen field */
+       err = -EFBIG;
+       if (size >= 1 << 12)
+               goto err1;
 
-       if (nla[NFTA_RULE_USERDATA])
+       if (nla[NFTA_RULE_USERDATA]) {
                ulen = nla_len(nla[NFTA_RULE_USERDATA]);
+               if (ulen > 0)
+                       usize = sizeof(struct nft_userdata) + ulen;
+       }
 
        err = -ENOMEM;
-       rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL);
+       rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
        if (rule == NULL)
                goto err1;
 
@@ -1981,10 +1992,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        rule->handle = handle;
        rule->dlen   = size;
-       rule->ulen   = ulen;
+       rule->udata  = ulen ? 1 : 0;
 
-       if (ulen)
-               nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen);
+       if (ulen) {
+               udata = nft_userdata(rule);
+               udata->len = ulen - 1;
+               nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
+       }
 
        expr = nft_expr_first(rule);
        for (i = 0; i < n; i++) {
@@ -2031,12 +2045,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
 err3:
        list_del_rcu(&rule->list);
-       if (trans) {
-               list_del_rcu(&nft_trans_rule(trans)->list);
-               nft_rule_clear(net, nft_trans_rule(trans));
-               nft_trans_destroy(trans);
-               chain->use++;
-       }
 err2:
        nf_tables_rule_destroy(&ctx, rule);
 err1:
@@ -3612,12 +3620,11 @@ static int nf_tables_commit(struct sk_buff *skb)
                                                 &te->elem,
                                                 NFT_MSG_DELSETELEM, 0);
                        te->set->ops->get(te->set, &te->elem);
-                       te->set->ops->remove(te->set, &te->elem);
                        nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
-                       if (te->elem.flags & NFT_SET_MAP) {
-                               nft_data_uninit(&te->elem.data,
-                                               te->set->dtype);
-                       }
+                       if (te->set->flags & NFT_SET_MAP &&
+                           !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
+                               nft_data_uninit(&te->elem.data, te->set->dtype);
+                       te->set->ops->remove(te->set, &te->elem);
                        nft_trans_destroy(trans);
                        break;
                }
@@ -3658,7 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb)
 {
        struct net *net = sock_net(skb->sk);
        struct nft_trans *trans, *next;
-       struct nft_set *set;
+       struct nft_trans_elem *te;
 
        list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
                switch (trans->msg_type) {
@@ -3719,9 +3726,13 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWSETELEM:
                        nft_trans_elem_set(trans)->nelems--;
-                       set = nft_trans_elem_set(trans);
-                       set->ops->get(set, &nft_trans_elem(trans));
-                       set->ops->remove(set, &nft_trans_elem(trans));
+                       te = (struct nft_trans_elem *)trans->data;
+                       te->set->ops->get(te->set, &te->elem);
+                       nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
+                       if (te->set->flags & NFT_SET_MAP &&
+                           !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
+                               nft_data_uninit(&te->elem.data, te->set->dtype);
+                       te->set->ops->remove(te->set, &te->elem);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_DELSETELEM:
index c598f74063a19ebd51ea786530c0669d6f92b8c3..213584cf04b34858164a07f99f1fc0b623c1ab44 100644 (file)
@@ -123,7 +123,7 @@ static void
 nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                           const struct nft_ctx *ctx,
                           struct xt_target *target, void *info,
-                          union nft_entry *entry, u8 proto, bool inv)
+                          union nft_entry *entry, u16 proto, bool inv)
 {
        par->net        = ctx->net;
        par->table      = ctx->table->name;
@@ -137,7 +137,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
                break;
        case NFPROTO_BRIDGE:
-               entry->ebt.ethproto = proto;
+               entry->ebt.ethproto = (__force __be16)proto;
                entry->ebt.invflags = inv ? EBT_IPROTO : 0;
                break;
        }
@@ -171,7 +171,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
        [NFTA_RULE_COMPAT_FLAGS]        = { .type = NLA_U32 },
 };
 
-static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv)
+static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
 {
        struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
        u32 flags;
@@ -203,7 +203,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        struct xt_target *target = expr->ops->data;
        struct xt_tgchk_param par;
        size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
-       u8 proto = 0;
+       u16 proto = 0;
        bool inv = false;
        union nft_entry e = {};
        int ret;
@@ -334,7 +334,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
 static void
 nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                          struct xt_match *match, void *info,
-                         union nft_entry *entry, u8 proto, bool inv)
+                         union nft_entry *entry, u16 proto, bool inv)
 {
        par->net        = ctx->net;
        par->table      = ctx->table->name;
@@ -348,7 +348,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
                break;
        case NFPROTO_BRIDGE:
-               entry->ebt.ethproto = proto;
+               entry->ebt.ethproto = (__force __be16)proto;
                entry->ebt.invflags = inv ? EBT_IPROTO : 0;
                break;
        }
@@ -385,7 +385,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        struct xt_match *match = expr->ops->data;
        struct xt_mtchk_param par;
        size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
-       u8 proto = 0;
+       u16 proto = 0;
        bool inv = false;
        union nft_entry e = {};
        int ret;
@@ -625,8 +625,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
                struct xt_match *match = nft_match->ops.data;
 
                if (strcmp(match->name, mt_name) == 0 &&
-                   match->revision == rev && match->family == family)
+                   match->revision == rev && match->family == family) {
+                       if (!try_module_get(match->me))
+                               return ERR_PTR(-ENOENT);
+
                        return &nft_match->ops;
+               }
        }
 
        match = xt_request_find_match(family, mt_name, rev);
@@ -695,8 +699,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
                struct xt_target *target = nft_target->ops.data;
 
                if (strcmp(target->name, tg_name) == 0 &&
-                   target->revision == rev && target->family == family)
+                   target->revision == rev && target->family == family) {
+                       if (!try_module_get(target->me))
+                               return ERR_PTR(-ENOENT);
+
                        return &nft_target->ops;
+               }
        }
 
        target = xt_request_find_target(family, tg_name, rev);
index 61e6c407476a618df386c2f14839033398aae14b..c82df0a48fcd8a649921b3fcb6b5d1edd39c6295 100644 (file)
@@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set,
                .key_offset = offsetof(struct nft_hash_elem, key),
                .key_len = set->klen,
                .hashfn = jhash,
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
 
        return rhashtable_init(priv, &params);
index 30dbe34915ae2b1fcf4d0ca3149369bdf3913f0f..45e1b30e4fb214f850af2590425ab2a9748c6476 100644 (file)
@@ -378,12 +378,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        mutex_lock(&recent_mutex);
        t = recent_table_lookup(recent_net, info->name);
        if (t != NULL) {
-               if (info->hit_count > t->nstamps_max_mask) {
-                       pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
-                               info->hit_count, t->nstamps_max_mask + 1,
-                               info->name);
-                       ret = -EINVAL;
-                       goto out;
+               if (nstamp_mask > t->nstamps_max_mask) {
+                       spin_lock_bh(&recent_lock);
+                       recent_table_flush(t);
+                       t->nstamps_max_mask = nstamp_mask;
+                       spin_unlock_bh(&recent_lock);
                }
 
                t->refcnt++;
index 1ba67931eb1b168fabfa78790f5ed53713188f9d..13332dbf291d6e530b77c3c8a7d155a07788ebc3 100644 (file)
@@ -243,12 +243,13 @@ static int
 extract_icmp6_fields(const struct sk_buff *skb,
                     unsigned int outside_hdrlen,
                     int *protocol,
-                    struct in6_addr **raddr,
-                    struct in6_addr **laddr,
+                    const struct in6_addr **raddr,
+                    const struct in6_addr **laddr,
                     __be16 *rport,
-                    __be16 *lport)
+                    __be16 *lport,
+                    struct ipv6hdr *ipv6_var)
 {
-       struct ipv6hdr *inside_iph, _inside_iph;
+       const struct ipv6hdr *inside_iph;
        struct icmp6hdr *icmph, _icmph;
        __be16 *ports, _ports[2];
        u8 inside_nexthdr;
@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
        if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
                return 1;
 
-       inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph);
+       inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
+                                       sizeof(*ipv6_var), ipv6_var);
        if (inside_iph == NULL)
                return 1;
        inside_nexthdr = inside_iph->nexthdr;
 
-       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
+                                             sizeof(*ipv6_var),
                                         &inside_nexthdr, &inside_fragoff);
        if (inside_hdrlen < 0)
                return 1; /* hjm: Packet has no/incomplete transport layer headers. */
@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
 static bool
 socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
 {
-       struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
        struct sock *sk = skb->sk;
-       struct in6_addr *daddr = NULL, *saddr = NULL;
+       const struct in6_addr *daddr = NULL, *saddr = NULL;
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        int thoff = 0, uninitialized_var(tproto);
        const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
 
        } else if (tproto == IPPROTO_ICMPV6) {
                if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
-                                        &sport, &dport))
+                                        &sport, &dport, &ipv6_var))
                        return false;
        } else {
                return false;
index 2702673f0f237d7fa43e4649ccc0672f67c895bc..05919bf3f670ed1267e01f14c1de61e78e4d80c4 100644 (file)
@@ -3126,8 +3126,6 @@ static int __init netlink_proto_init(void)
                .key_len = sizeof(u32), /* portid */
                .hashfn = jhash,
                .max_shift = 16, /* 64K */
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
 
        if (err != 0)
index ae5e77cdc0ca1f34ff7f9c99d65ba0c8bda9ace6..5bae7243c5777e38df7be95454b8164724c769cf 100644 (file)
@@ -2194,14 +2194,55 @@ static int __net_init ovs_init_net(struct net *net)
        return 0;
 }
 
-static void __net_exit ovs_exit_net(struct net *net)
+static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
+                                           struct list_head *head)
 {
-       struct datapath *dp, *dp_next;
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+       struct datapath *dp;
+
+       list_for_each_entry(dp, &ovs_net->dps, list_node) {
+               int i;
+
+               for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+                       struct vport *vport;
+
+                       hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
+                               struct netdev_vport *netdev_vport;
+
+                               if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
+                                       continue;
+
+                               netdev_vport = netdev_vport_priv(vport);
+                               if (dev_net(netdev_vport->dev) == dnet)
+                                       list_add(&vport->detach_list, head);
+                       }
+               }
+       }
+}
+
+static void __net_exit ovs_exit_net(struct net *dnet)
+{
+       struct datapath *dp, *dp_next;
+       struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
+       struct vport *vport, *vport_next;
+       struct net *net;
+       LIST_HEAD(head);
 
        ovs_lock();
        list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
                __dp_destroy(dp);
+
+       rtnl_lock();
+       for_each_net(net)
+               list_vports_from_net(net, dnet, &head);
+       rtnl_unlock();
+
+       /* Detach all vports from given namespace. */
+       list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
+               list_del(&vport->detach_list);
+               ovs_dp_detach_port(vport);
+       }
+
        ovs_unlock();
 
        cancel_work_sync(&ovs_net->dp_notify_work);
index 216f20b90aa596b49592beee89a996cbe868d8ba..22b18c145c9221675e031de2617fcdd800405170 100644 (file)
@@ -2253,14 +2253,20 @@ static int masked_set_action_to_set_action_attr(const struct nlattr *a,
                                                struct sk_buff *skb)
 {
        const struct nlattr *ovs_key = nla_data(a);
+       struct nlattr *nla;
        size_t key_len = nla_len(ovs_key) / 2;
 
        /* Revert the conversion we did from a non-masked set action to
         * masked set action.
         */
-       if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key))
+       nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
+       if (!nla)
                return -EMSGSIZE;
 
+       if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
+               return -EMSGSIZE;
+
+       nla_nest_end(skb, nla);
        return 0;
 }
 
index f8ae295fb0011f7cc5dea75737833b7086641c77..bc85331a6c60cae9182bd1348d35d81117cf2943 100644 (file)
@@ -103,6 +103,7 @@ struct vport_portids {
  * @ops: Class structure.
  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
  * @err_stats: Points to error statistics used and maintained by vport
+ * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
        struct rcu_head rcu;
@@ -117,6 +118,7 @@ struct vport {
        struct pcpu_sw_netstats __percpu *percpu_stats;
 
        struct vport_err_stats err_stats;
+       struct list_head detach_list;
 };
 
 /**
index 9c28cec1a0838ecf8ea03ceff77fb301c5a425a7..f8db7064d81c770cda356633153230eb905ccb39 100644 (file)
@@ -698,6 +698,10 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
 
        if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
                if (!frozen) {
+                       if (!BLOCK_NUM_PKTS(pbd)) {
+                               /* An empty block. Just refresh the timer. */
+                               goto refresh_timer;
+                       }
                        prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
                        if (!prb_dispatch_next_block(pkc, po))
                                goto refresh_timer;
@@ -798,7 +802,11 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
                h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
                h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
        } else {
-               /* Ok, we tmo'd - so get the current time */
+               /* Ok, we tmo'd - so get the current time.
+                *
+                * It shouldn't really happen as we don't close empty
+                * blocks. See prb_retire_rx_blk_timer_expired().
+                */
                struct timespec ts;
                getnstimeofday(&ts);
                h1->ts_last_pkt.ts_sec = ts.tv_sec;
@@ -1349,14 +1357,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
                return 0;
        }
 
+       if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
+               skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
+               if (!skb)
+                       return 0;
+       }
        switch (f->type) {
        case PACKET_FANOUT_HASH:
        default:
-               if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
-                       skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
-                       if (!skb)
-                               return 0;
-               }
                idx = fanout_demux_hash(f, skb, num);
                break;
        case PACKET_FANOUT_LB:
@@ -3115,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
        return 0;
 }
 
-static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
+static void packet_dev_mclist_delete(struct net_device *dev,
+                                    struct packet_mclist **mlp)
 {
-       for ( ; i; i = i->next) {
-               if (i->ifindex == dev->ifindex)
-                       packet_dev_mc(dev, i, what);
+       struct packet_mclist *ml;
+
+       while ((ml = *mlp) != NULL) {
+               if (ml->ifindex == dev->ifindex) {
+                       packet_dev_mc(dev, ml, -1);
+                       *mlp = ml->next;
+                       kfree(ml);
+               } else
+                       mlp = &ml->next;
        }
 }
 
@@ -3196,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
                                        packet_dev_mc(dev, ml, -1);
                                kfree(ml);
                        }
-                       rtnl_unlock();
-                       return 0;
+                       break;
                }
        }
        rtnl_unlock();
-       return -EADDRNOTAVAIL;
+       return 0;
 }
 
 static void packet_flush_mclist(struct sock *sk)
@@ -3551,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this,
                switch (msg) {
                case NETDEV_UNREGISTER:
                        if (po->mclist)
-                               packet_dev_mclist(dev, po->mclist, -1);
+                               packet_dev_mclist_delete(dev, &po->mclist);
                        /* fallthrough */
 
                case NETDEV_DOWN:
index a817705ce2d0e9246388c2c65d77b4c486a8feba..dba8d0864f18046ee87a168d49cc159518fa2916 100644 (file)
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
                        int *unpinned);
 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
 
-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
+static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
+                            struct rds_iw_device **rds_iwdev,
+                            struct rdma_cm_id **cm_id)
 {
        struct rds_iw_device *iwdev;
        struct rds_iw_cm_id *i_cm_id;
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
                                src_addr->sin_port,
                                dst_addr->sin_addr.s_addr,
                                dst_addr->sin_port,
-                               rs->rs_bound_addr,
-                               rs->rs_bound_port,
-                               rs->rs_conn_addr,
-                               rs->rs_conn_port);
+                               src->sin_addr.s_addr,
+                               src->sin_port,
+                               dst->sin_addr.s_addr,
+                               dst->sin_port);
 #ifdef WORKING_TUPLE_DETECTION
-                       if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
-                           src_addr->sin_port == rs->rs_bound_port &&
-                           dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
-                           dst_addr->sin_port == rs->rs_conn_port) {
+                       if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
+                           src_addr->sin_port == src->sin_port &&
+                           dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
+                           dst_addr->sin_port == dst->sin_port) {
 #else
                        /* FIXME - needs to compare the local and remote
                         * ipaddr/port tuple, but the ipaddr is the only
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
                         * zero'ed.  It doesn't appear to be properly populated
                         * during connection setup...
                         */
-                       if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
+                       if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
 #endif
                                spin_unlock_irq(&iwdev->spinlock);
                                *rds_iwdev = iwdev;
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
 {
        struct sockaddr_in *src_addr, *dst_addr;
        struct rds_iw_device *rds_iwdev_old;
-       struct rds_sock rs;
        struct rdma_cm_id *pcm_id;
        int rc;
 
        src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
        dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
 
-       rs.rs_bound_addr = src_addr->sin_addr.s_addr;
-       rs.rs_bound_port = src_addr->sin_port;
-       rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
-       rs.rs_conn_port = dst_addr->sin_port;
-
-       rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
+       rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
        if (rc)
                rds_iw_remove_cm_id(rds_iwdev, cm_id);
 
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
        struct rds_iw_device *rds_iwdev;
        struct rds_iw_mr *ibmr = NULL;
        struct rdma_cm_id *cm_id;
+       struct sockaddr_in src = {
+               .sin_addr.s_addr = rs->rs_bound_addr,
+               .sin_port = rs->rs_bound_port,
+       };
+       struct sockaddr_in dst = {
+               .sin_addr.s_addr = rs->rs_conn_addr,
+               .sin_port = rs->rs_conn_port,
+       };
        int ret;
 
-       ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
+       ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
        if (ret || !cm_id) {
                ret = -ENODEV;
                goto out;
index c6be17a959a6e4981ecfff38af85805df6d8b26e..e0547f521f20d79c688c773286d609066c990a1d 100644 (file)
@@ -218,7 +218,8 @@ static void rxrpc_resend(struct rxrpc_call *call)
        struct rxrpc_header *hdr;
        struct sk_buff *txb;
        unsigned long *p_txb, resend_at;
-       int loop, stop;
+       bool stop;
+       int loop;
        u8 resend;
 
        _enter("{%d,%d,%d,%d},",
@@ -226,7 +227,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
               atomic_read(&call->sequence),
               CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
 
-       stop = 0;
+       stop = false;
        resend = 0;
        resend_at = 0;
 
@@ -255,11 +256,11 @@ static void rxrpc_resend(struct rxrpc_call *call)
                        _proto("Tx DATA %%%u { #%d }",
                               ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
                        if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
-                               stop = 0;
+                               stop = true;
                                sp->resend_at = jiffies + 3;
                        } else {
                                sp->resend_at =
-                                       jiffies + rxrpc_resend_timeout * HZ;
+                                       jiffies + rxrpc_resend_timeout;
                        }
                }
 
index 5394b6be46ecd5ebb6c677b745a4848977433f70..0610efa83d721389fc0f2c3597f3e284fee341d7 100644 (file)
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk)
                _leave("UDP socket errqueue empty");
                return;
        }
-       if (!skb->len) {
+       serr = SKB_EXT_ERR(skb);
+       if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
                _leave("UDP empty message");
                kfree_skb(skb);
                return;
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
 
        rxrpc_new_skb(skb);
 
-       serr = SKB_EXT_ERR(skb);
        addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
        port = serr->port;
 
index 4575485ad1b4d02796717879e09ad9078e232617..19a560626dc4f4232592e7d34381a859a04b6f4c 100644 (file)
@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
                if (!skb) {
                        /* nothing remains on the queue */
                        if (copied &&
-                           (msg->msg_flags & MSG_PEEK || timeo == 0))
+                           (flags & MSG_PEEK || timeo == 0))
                                goto out;
 
                        /* wait for a message to turn up */
index 82c5d7fc19881577d888fad3d5b0369e4698172b..5f6288fa3f1247462897cd747364dfbc9e0da843 100644 (file)
@@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a,
                   struct tcf_result *res)
 {
        struct tcf_bpf *b = a->priv;
-       int action;
-       int filter_res;
+       int action, filter_res;
 
        spin_lock(&b->tcf_lock);
+
        b->tcf_tm.lastuse = jiffies;
        bstats_update(&b->tcf_bstats, skb);
-       action = b->tcf_action;
 
        filter_res = BPF_PROG_RUN(b->filter, skb);
-       if (filter_res == 0) {
-               /* Return code 0 from the BPF program
-                * is being interpreted as a drop here.
-                */
-               action = TC_ACT_SHOT;
+
+       /* A BPF program may overwrite the default action opcode.
+        * Similarly as in cls_bpf, if filter_res == -1 we use the
+        * default action specified from tc.
+        *
+        * In case a different well-known TC_ACT opcode has been
+        * returned, it will overwrite the default one.
+        *
+        * For everything else that is unkown, TC_ACT_UNSPEC is
+        * returned.
+        */
+       switch (filter_res) {
+       case TC_ACT_PIPE:
+       case TC_ACT_RECLASSIFY:
+       case TC_ACT_OK:
+               action = filter_res;
+               break;
+       case TC_ACT_SHOT:
+               action = filter_res;
                b->tcf_qstats.drops++;
+               break;
+       case TC_ACT_UNSPEC:
+               action = b->tcf_action;
+               break;
+       default:
+               action = TC_ACT_UNSPEC;
+               break;
        }
 
        spin_unlock(&b->tcf_lock);
index 09487afbfd5187a312ab155df5da43548d0c326b..95fdf4e4005190704dcd405e6b9422f8f56ba796 100644 (file)
@@ -78,8 +78,11 @@ struct tc_u_hnode {
        struct tc_u_common      *tp_c;
        int                     refcnt;
        unsigned int            divisor;
-       struct tc_u_knode __rcu *ht[1];
        struct rcu_head         rcu;
+       /* The 'ht' field MUST be the last field in structure to allow for
+        * more entries allocated at end of structure.
+        */
+       struct tc_u_knode __rcu *ht[1];
 };
 
 struct tc_u_common {
index 6742200b13071b6e63c200767a77a653d2e10c06..fbb7ebfc58c6761f6afb58e62646908b10e2bf09 100644 (file)
@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
                                 * to replay the request.
                                 */
                                module_put(em->ops->owner);
+                               em->ops = NULL;
                                err = -EAGAIN;
                        }
 #endif
index abbb7dcd16897125863098cb48f6a6411488225c..59eeed43eda2d2651916dcc8698c12c7d7249e4e 100644 (file)
@@ -217,6 +217,8 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
 
        for (i = 0; i < arg->npages && arg->pages[i]; i++)
                __free_page(arg->pages[i]);
+
+       kfree(arg->pages);
 }
 
 static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
index 224a82f24d3c75e60c702bd89215b7934ded1ea8..1095be9c80ab809900d2bf0afbde9c63b6034a9d 100644 (file)
@@ -463,6 +463,8 @@ static int rsc_parse(struct cache_detail *cd,
                /* number of additional gid's */
                if (get_int(&mesg, &N))
                        goto out;
+               if (N < 0 || N > NGROUPS_MAX)
+                       goto out;
                status = -ENOMEM;
                rsci.cred.cr_group_info = groups_alloc(N);
                if (rsci.cred.cr_group_info == NULL)
index 651f49ab601fbdd75eed30ddc0a29c9cb4369e54..9dd0ea8db463acc9daba0c51be89b1f17ec8f17d 100644 (file)
@@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        struct rpc_xprt *xprt = req->rq_xprt;
        struct svc_serv *bc_serv = xprt->bc_serv;
 
+       spin_lock(&xprt->bc_pa_lock);
+       list_del(&req->rq_bc_pa_list);
+       spin_unlock(&xprt->bc_pa_lock);
+
        req->rq_private_buf.len = copied;
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
        dprintk("RPC:       add callback request to list\n");
        spin_lock(&bc_serv->sv_cb_lock);
-       list_del(&req->rq_bc_pa_list);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
        wake_up(&bc_serv->sv_cb_waitq);
        spin_unlock(&bc_serv->sv_cb_lock);
index 33fb105d4352627319c604bdd03b7c9493f24adf..5199bb1a017e47b1b7503caf8075191dac1ade57 100644 (file)
@@ -921,7 +921,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
        poll_wait(filp, &queue_wait, wait);
 
        /* alway allow write */
-       mask = POLL_OUT | POLLWRNORM;
+       mask = POLLOUT | POLLWRNORM;
 
        if (!rp)
                return mask;
index 7e9acd9361c55bae557fcc51681585434b1755ef..91ffde82fa0c49eba3e11385aec9e334eb6b699a 100644 (file)
@@ -738,8 +738,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
        struct rpc_xprt *xprt = rep->rr_xprt;
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
        __be32 *iptr;
-       int credits, rdmalen, status;
+       int rdmalen, status;
        unsigned long cwnd;
+       u32 credits;
 
        /* Check status. If bad, signal disconnect and return rep to pool */
        if (rep->rr_len == ~0U) {
index d1b70397c60f0d8cb2ceb31c2feed19f7ca8f99a..0a16fb6f088590c142232ad99b2ac3b29304c169 100644 (file)
@@ -285,7 +285,7 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
  */
 struct rpcrdma_buffer {
        spinlock_t      rb_lock;        /* protects indexes */
-       int             rb_max_requests;/* client max requests */
+       u32             rb_max_requests;/* client max requests */
        struct list_head rb_mws;        /* optional memory windows/fmrs/frmrs */
        struct list_head rb_all;
        int             rb_send_index;
index a4cf364316de64a2a8df01350d31edf4af0866ea..14f09b3cb87c2fd9c87c67dfb67ce5e8df7d9f0f 100644 (file)
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        /* Clean up all queues, except inputq: */
        __skb_queue_purge(&l_ptr->outqueue);
        __skb_queue_purge(&l_ptr->deferred_queue);
-       skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
-       if (!skb_queue_empty(&l_ptr->inputq))
+       if (!owner->inputq)
+               owner->inputq = &l_ptr->inputq;
+       skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
+       if (!skb_queue_empty(owner->inputq))
                owner->action_flags |= TIPC_MSG_EVT;
-       owner->inputq = &l_ptr->inputq;
        l_ptr->next_out = NULL;
        l_ptr->unacked_window = 0;
        l_ptr->checkpoint = 1;
index f73e975af80b622c48d5ba92ec1e9504af4755e2..b4d4467d0bb051b09243c62aae2b3b2dca6d90a6 100644 (file)
@@ -2364,8 +2364,6 @@ int tipc_sk_rht_init(struct net *net)
                .hashfn = jhash,
                .max_shift = 20, /* 1M */
                .min_shift = 8,  /* 256 */
-               .grow_decision = rht_grow_above_75,
-               .shrink_decision = rht_shrink_below_30,
        };
 
        return rhashtable_init(&tn->sk_rht, &rht_params);
index 3af0ecf1cc16859abecb7451df5cabe35d6dbf08..2a0bbd22854bd97b377139200f9e6b5e0ec2662f 100644 (file)
@@ -1199,6 +1199,7 @@ out_fail_wq:
        regulatory_exit();
 out_fail_reg:
        debugfs_remove(ieee80211_debugfs_dir);
+       nl80211_exit();
 out_fail_nl80211:
        unregister_netdevice_notifier(&cfg80211_netdev_notifier);
 out_fail_notifier:
index d78fd8b54515e630b67bf38d710b2b698f703c4c..b6f84f6a2a095ef0c94891f796ef6cf7cf9af65f 100644 (file)
@@ -2654,10 +2654,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                        return err;
        }
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
-
        err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
                                  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
                                  &flags);
@@ -2666,6 +2662,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
        wdev = rdev_add_virtual_intf(rdev,
                                nla_data(info->attrs[NL80211_ATTR_IFNAME]),
                                type, err ? NULL : &flags, &params);
@@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
        if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
                return -EINVAL;
 
+       /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
+        * as userspace might just pass through the capabilities from the IEs
+        * directly, rather than enforcing this restriction and returning an
+        * error in this case.
+        */
+       if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
+               params.ht_capa = NULL;
+               params.vht_capa = NULL;
+       }
+
        /* When you run into this, adjust the code below for the new flag */
        BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
 
@@ -12528,9 +12538,7 @@ static int cfg80211_net_detect_results(struct sk_buff *msg,
                        }
 
                        for (j = 0; j < match->n_channels; j++) {
-                               if (nla_put_u32(msg,
-                                               NL80211_ATTR_WIPHY_FREQ,
-                                               match->channels[j])) {
+                               if (nla_put_u32(msg, j, match->channels[j])) {
                                        nla_nest_cancel(msg, nl_freqs);
                                        nla_nest_cancel(msg, nl_match);
                                        goto out;
index b586d0dcb09ebc9382fa0bd22016264e5bdd21c2..48dfc7b4e98130e4d8d5b265fceadd9004ed4f5e 100644 (file)
@@ -228,7 +228,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
 
 /* We keep a static world regulatory domain in case of the absence of CRDA */
 static const struct ieee80211_regdomain world_regdom = {
-       .n_reg_rules = 6,
+       .n_reg_rules = 8,
        .alpha2 =  "00",
        .reg_rules = {
                /* IEEE 802.11b/g, channels 1..11 */
index cee479bc655c4f317edb4e90cbcb06ec3b424b81..638af0655aaf8ec600ae5f6b201e252ca229d89d 100644 (file)
@@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                 * have the xfrm_state's. We need to wait for KM to
                 * negotiate new SA's or bail out with error.*/
                if (net->xfrm.sysctl_larval_drop) {
-                       dst_release(dst);
-                       xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
-
-                       return ERR_PTR(-EREMOTE);
+                       err = -EREMOTE;
+                       goto error;
                }
 
                err = -EAGAIN;
@@ -2324,7 +2322,8 @@ nopol:
 error:
        dst_release(dst);
 dropdst:
-       dst_release(dst_orig);
+       if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
+               dst_release(dst_orig);
        xfrm_pols_put(pols, drop_pols);
        return ERR_PTR(err);
 }
@@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
                                    struct sock *sk, int flags)
 {
        struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
-                                           flags | XFRM_LOOKUP_QUEUE);
+                                           flags | XFRM_LOOKUP_QUEUE |
+                                           XFRM_LOOKUP_KEEP_DST_REF);
 
        if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
                return make_blackhole(net, dst_orig->ops->family, dst_orig);
diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py
new file mode 100644 (file)
index 0000000..4680fb1
--- /dev/null
@@ -0,0 +1 @@
+# nothing to do for the initialization of this package
index 97130f88838bc2ad385b5ccd69bf0dfc51acae95..e4ea6266386662c2c88445743ff2e107b665230a 100644 (file)
@@ -112,9 +112,9 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
        return aa_dfa_next(dfa, start, 0);
 }
 
-static inline bool mediated_filesystem(struct inode *inode)
+static inline bool mediated_filesystem(struct dentry *dentry)
 {
-       return !(inode->i_sb->s_flags & MS_NOUSER);
+       return !(dentry->d_sb->s_flags & MS_NOUSER);
 }
 
 #endif /* __APPARMOR_H */
index 65ca451a764db1a38db4c8a68be1acd015f14d4f..107db88b1d5f9d1d5dda20c0636f229738fec8bd 100644 (file)
@@ -226,7 +226,7 @@ static int common_perm_rm(int op, struct path *dir,
        struct inode *inode = dentry->d_inode;
        struct path_cond cond = { };
 
-       if (!inode || !dir->mnt || !mediated_filesystem(inode))
+       if (!inode || !dir->mnt || !mediated_filesystem(dentry))
                return 0;
 
        cond.uid = inode->i_uid;
@@ -250,7 +250,7 @@ static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
 {
        struct path_cond cond = { current_fsuid(), mode };
 
-       if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
+       if (!dir->mnt || !mediated_filesystem(dir->dentry))
                return 0;
 
        return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
@@ -285,7 +285,7 @@ static int apparmor_path_truncate(struct path *path)
                                  path->dentry->d_inode->i_mode
        };
 
-       if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
+       if (!path->mnt || !mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
@@ -305,7 +305,7 @@ static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(old_dentry->d_inode))
+       if (!mediated_filesystem(old_dentry))
                return 0;
 
        profile = aa_current_profile();
@@ -320,7 +320,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(old_dentry->d_inode))
+       if (!mediated_filesystem(old_dentry))
                return 0;
 
        profile = aa_current_profile();
@@ -346,7 +346,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
 
 static int apparmor_path_chmod(struct path *path, umode_t mode)
 {
-       if (!mediated_filesystem(path->dentry->d_inode))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
@@ -358,7 +358,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
                                   path->dentry->d_inode->i_mode
        };
 
-       if (!mediated_filesystem(path->dentry->d_inode))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
@@ -366,7 +366,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 
 static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
 {
-       if (!mediated_filesystem(dentry->d_inode))
+       if (!mediated_filesystem(dentry))
                return 0;
 
        return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
@@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(file_inode(file)))
+       if (!mediated_filesystem(file->f_path.dentry))
                return 0;
 
        /* If in exec, permission is handled by bprm hooks.
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
        BUG_ON(!fprofile);
 
        if (!file->f_path.mnt ||
-           !mediated_filesystem(file_inode(file)))
+           !mediated_filesystem(file->f_path.dentry))
                return 0;
 
        profile = __aa_current_profile();
index 35b394a75d762dd6a4e935f3ffe1d5b4566a2885..71e0e3a15b9dc3bbae6b73cd1d8134768f67d2c5 100644 (file)
@@ -114,7 +114,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *    security_path hooks as a deleted dentry except without an inode
         *    allocated.
         */
-       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+       if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
            !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
index 8e7ca62078abe85f988a90b796f265fc2172d526..131a3c49f766444f167f88d19b712ef80ee47a66 100644 (file)
@@ -203,7 +203,7 @@ void securityfs_remove(struct dentry *dentry)
        mutex_lock(&parent->d_inode->i_mutex);
        if (positive(dentry)) {
                if (dentry->d_inode) {
-                       if (S_ISDIR(dentry->d_inode->i_mode))
+                       if (d_is_dir(dentry))
                                simple_rmdir(parent->d_inode, dentry);
                        else
                                simple_unlink(parent->d_inode, dentry);
index 29c39e0b03ed7e3048d5fed858f31d40dc2f8d3d..4d1a54190388df96dddb7ff951c681dc28bab866 100644 (file)
@@ -1799,7 +1799,7 @@ static inline int may_rename(struct inode *old_dir,
 
        old_dsec = old_dir->i_security;
        old_isec = old_dentry->d_inode->i_security;
-       old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+       old_is_dir = d_is_dir(old_dentry);
        new_dsec = new_dir->i_security;
 
        ad.type = LSM_AUDIT_DATA_DENTRY;
@@ -1822,14 +1822,14 @@ static inline int may_rename(struct inode *old_dir,
 
        ad.u.dentry = new_dentry;
        av = DIR__ADD_NAME | DIR__SEARCH;
-       if (new_dentry->d_inode)
+       if (d_is_positive(new_dentry))
                av |= DIR__REMOVE_NAME;
        rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
        if (rc)
                return rc;
-       if (new_dentry->d_inode) {
+       if (d_is_positive(new_dentry)) {
                new_isec = new_dentry->d_inode->i_security;
-               new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+               new_is_dir = d_is_dir(new_dentry);
                rc = avc_has_perm(sid, new_isec->sid,
                                  new_isec->sclass,
                                  (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
index ed94f6f836e75baf9086f553c78a2d325ea99285..c934311812f1a777093c44a89543dcae924b8568 100644 (file)
@@ -855,7 +855,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
        rc = smk_curacc(isp, MAY_WRITE, &ad);
        rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
 
-       if (rc == 0 && new_dentry->d_inode != NULL) {
+       if (rc == 0 && d_is_positive(new_dentry)) {
                isp = smk_of_inode(new_dentry->d_inode);
                smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
                rc = smk_curacc(isp, MAY_WRITE, &ad);
@@ -961,7 +961,7 @@ static int smack_inode_rename(struct inode *old_inode,
        rc = smk_curacc(isp, MAY_READWRITE, &ad);
        rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
 
-       if (rc == 0 && new_dentry->d_inode != NULL) {
+       if (rc == 0 && d_is_positive(new_dentry)) {
                isp = smk_of_inode(new_dentry->d_inode);
                smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
                rc = smk_curacc(isp, MAY_READWRITE, &ad);
index 400390790745212764bd99c9178dceea031382ef..c151a1869597f8155a0296f89fafa61cc65f447d 100644 (file)
@@ -905,11 +905,9 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
            !tomoyo_get_realpath(&buf2, path2))
                goto out;
        switch (operation) {
-               struct dentry *dentry;
        case TOMOYO_TYPE_RENAME:
        case TOMOYO_TYPE_LINK:
-               dentry = path1->dentry;
-               if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
+               if (!d_is_dir(path1->dentry))
                        break;
                /* fall through */
        case TOMOYO_TYPE_PIVOT_ROOT:
index 35324a8e83c867f126256f67b493b5320b9652c7..eeb691d1911f5716bb09b8eecacd24215a31564f 100644 (file)
@@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
 
        if (info->count < 1)
                return -EINVAL;
+       if (!*info->id.name)
+               return -EINVAL;
+       if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
+               return -EINVAL;
        access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
                (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
                                 SNDRV_CTL_ELEM_ACCESS_INACTIVE|
index b03a638b420c18243776c45fda5884b422392c46..279e24f613051fddb8ca16375ab9031e6a703b03 100644 (file)
@@ -1552,6 +1552,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
                        if (! snd_pcm_playback_empty(substream)) {
                                snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
                                snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
+                       } else {
+                               runtime->status->state = SNDRV_PCM_STATE_SETUP;
                        }
                        break;
                case SNDRV_PCM_STATE_RUNNING:
index 9b6470cdcf246f6e89ecf428d84dee0a25ad361e..7ba937399ac783c5cbf3c92f7497227d2feaa3cc 100644 (file)
@@ -269,6 +269,9 @@ do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chse
 {
        int  i;
 
+       if (control >= ARRAY_SIZE(chan->control))
+               return;
+
        /* Switches */
        if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
                /* These are all switches; either off or on so set to 0 or 127 */
index f62780ed64adcc85465447838ee3b5d794690937..7821b07415a785c70982e03803ac34601770e3a9 100644 (file)
@@ -105,6 +105,8 @@ static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum,
                int pitchbend = chan->midi_pitchbend;
                int segment;
 
+               if (pitchbend < -0x2000)
+                       pitchbend = -0x2000;
                if (pitchbend > 0x1FFF)
                        pitchbend = 0x1FFF;
 
index 0d580186ef1ac379bcd2cb699ac2f33baeac9029..5cc356db5351d903a7199b233fdfffc2bd8a674e 100644 (file)
@@ -33,7 +33,7 @@
  */
 #define MAX_MIDI_RX_BLOCKS     8
 
-#define TRANSFER_DELAY_TICKS   0x2e00 /* 479.17 Ã‚µs */
+#define TRANSFER_DELAY_TICKS   0x2e00 /* 479.17 microseconds */
 
 /* isochronous header parameters */
 #define ISO_DATA_LENGTH_SHIFT  16
@@ -78,7 +78,7 @@ static void pcm_period_tasklet(unsigned long data);
 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
                      enum amdtp_stream_direction dir, enum cip_flags flags)
 {
-       s->unit = fw_unit_get(unit);
+       s->unit = unit;
        s->direction = dir;
        s->flags = flags;
        s->context = ERR_PTR(-1);
@@ -102,7 +102,6 @@ void amdtp_stream_destroy(struct amdtp_stream *s)
 {
        WARN_ON(amdtp_stream_running(s));
        mutex_destroy(&s->mutex);
-       fw_unit_put(s->unit);
 }
 EXPORT_SYMBOL(amdtp_stream_destroy);
 
index fc19c99654aa0284d400e58c51abc1a0c35495ee..611b7dae7ee54c932394c713022fd4501c84f7ca 100644 (file)
@@ -116,11 +116,22 @@ end:
        return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void
 bebob_card_free(struct snd_card *card)
 {
        struct snd_bebob *bebob = card->private_data;
 
+       snd_bebob_stream_destroy_duplex(bebob);
+       fw_unit_put(bebob->unit);
+
+       kfree(bebob->maudio_special_quirk);
+
        if (bebob->card_index >= 0) {
                mutex_lock(&devices_mutex);
                clear_bit(bebob->card_index, devices_used);
@@ -205,7 +216,7 @@ bebob_probe(struct fw_unit *unit,
        card->private_free = bebob_card_free;
 
        bebob->card = card;
-       bebob->unit = unit;
+       bebob->unit = fw_unit_get(unit);
        bebob->spec = spec;
        mutex_init(&bebob->mutex);
        spin_lock_init(&bebob->lock);
@@ -306,10 +317,11 @@ static void bebob_remove(struct fw_unit *unit)
        if (bebob == NULL)
                return;
 
-       kfree(bebob->maudio_special_quirk);
+       /* Awake bus-reset waiters. */
+       if (!completion_done(&bebob->bus_reset))
+               complete_all(&bebob->bus_reset);
 
-       snd_bebob_stream_destroy_duplex(bebob);
-       snd_card_disconnect(bebob->card);
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(bebob->card);
 }
 
index 0ebcabfdc7ce0162c9a77ed30ca038e6588cae63..98e4fc8121a1f4bdad82d892d79fa5c8241086af 100644 (file)
@@ -410,8 +410,6 @@ break_both_connections(struct snd_bebob *bebob)
 static void
 destroy_both_connections(struct snd_bebob *bebob)
 {
-       break_both_connections(bebob);
-
        cmp_connection_destroy(&bebob->in_conn);
        cmp_connection_destroy(&bebob->out_conn);
 }
@@ -712,22 +710,16 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob)
        mutex_unlock(&bebob->mutex);
 }
 
+/*
+ * This function should be called before starting streams or after stopping
+ * streams.
+ */
 void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob)
 {
-       mutex_lock(&bebob->mutex);
-
-       amdtp_stream_pcm_abort(&bebob->rx_stream);
-       amdtp_stream_pcm_abort(&bebob->tx_stream);
-
-       amdtp_stream_stop(&bebob->rx_stream);
-       amdtp_stream_stop(&bebob->tx_stream);
-
        amdtp_stream_destroy(&bebob->rx_stream);
        amdtp_stream_destroy(&bebob->tx_stream);
 
        destroy_both_connections(bebob);
-
-       mutex_unlock(&bebob->mutex);
 }
 
 /*
index fa9cf761b610ad81e537634590f6f33cff7e674e..07dbd01d7a6bd336d901fa78b83365b44307b1a0 100644 (file)
@@ -311,14 +311,21 @@ end:
        return err;
 }
 
+/*
+ * This function should be called before starting streams or after stopping
+ * streams.
+ */
 static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream)
 {
-       amdtp_stream_destroy(stream);
+       struct fw_iso_resources *resources;
 
        if (stream == &dice->tx_stream)
-               fw_iso_resources_destroy(&dice->tx_resources);
+               resources = &dice->tx_resources;
        else
-               fw_iso_resources_destroy(&dice->rx_resources);
+               resources = &dice->rx_resources;
+
+       amdtp_stream_destroy(stream);
+       fw_iso_resources_destroy(resources);
 }
 
 int snd_dice_stream_init_duplex(struct snd_dice *dice)
@@ -332,6 +339,8 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
                goto end;
 
        err = init_stream(dice, &dice->rx_stream);
+       if (err < 0)
+               destroy_stream(dice, &dice->tx_stream);
 end:
        return err;
 }
@@ -340,10 +349,7 @@ void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
 {
        snd_dice_transaction_clear_enable(dice);
 
-       stop_stream(dice, &dice->tx_stream);
        destroy_stream(dice, &dice->tx_stream);
-
-       stop_stream(dice, &dice->rx_stream);
        destroy_stream(dice, &dice->rx_stream);
 
        dice->substreams_counter = 0;
index 90d8f40ff72712f2ac67dc8f978855d59fd5f8d6..70a111d7f428af4a0487f80cf350767935c8cac0 100644 (file)
@@ -226,11 +226,20 @@ static void dice_card_strings(struct snd_dice *dice)
        strcpy(card->mixername, "DICE");
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void dice_card_free(struct snd_card *card)
 {
        struct snd_dice *dice = card->private_data;
 
+       snd_dice_stream_destroy_duplex(dice);
        snd_dice_transaction_destroy(dice);
+       fw_unit_put(dice->unit);
+
        mutex_destroy(&dice->mutex);
 }
 
@@ -251,7 +260,7 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
 
        dice = card->private_data;
        dice->card = card;
-       dice->unit = unit;
+       dice->unit = fw_unit_get(unit);
        card->private_free = dice_card_free;
 
        spin_lock_init(&dice->lock);
@@ -305,10 +314,7 @@ static void dice_remove(struct fw_unit *unit)
 {
        struct snd_dice *dice = dev_get_drvdata(&unit->device);
 
-       snd_card_disconnect(dice->card);
-
-       snd_dice_stream_destroy_duplex(dice);
-
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(dice->card);
 }
 
index 3e2ed8e82cbc49b4699c82ad30776ef8a4300253..2682e7e3e5c98511e8bd26fddf452427f1e83a6a 100644 (file)
@@ -173,11 +173,23 @@ end:
        return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void
 efw_card_free(struct snd_card *card)
 {
        struct snd_efw *efw = card->private_data;
 
+       snd_efw_stream_destroy_duplex(efw);
+       snd_efw_transaction_remove_instance(efw);
+       fw_unit_put(efw->unit);
+
+       kfree(efw->resp_buf);
+
        if (efw->card_index >= 0) {
                mutex_lock(&devices_mutex);
                clear_bit(efw->card_index, devices_used);
@@ -185,7 +197,6 @@ efw_card_free(struct snd_card *card)
        }
 
        mutex_destroy(&efw->mutex);
-       kfree(efw->resp_buf);
 }
 
 static int
@@ -218,7 +229,7 @@ efw_probe(struct fw_unit *unit,
        card->private_free = efw_card_free;
 
        efw->card = card;
-       efw->unit = unit;
+       efw->unit = fw_unit_get(unit);
        mutex_init(&efw->mutex);
        spin_lock_init(&efw->lock);
        init_waitqueue_head(&efw->hwdep_wait);
@@ -289,10 +300,7 @@ static void efw_remove(struct fw_unit *unit)
 {
        struct snd_efw *efw = dev_get_drvdata(&unit->device);
 
-       snd_efw_stream_destroy_duplex(efw);
-       snd_efw_transaction_remove_instance(efw);
-
-       snd_card_disconnect(efw->card);
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(efw->card);
 }
 
index 4f440e16366780f097d8c03daeb45e01fb5e7eb0..c55db1bddc80a0ceab4997279643f73840943cef 100644 (file)
@@ -100,17 +100,22 @@ end:
        return err;
 }
 
+/*
+ * This function should be called before starting the stream or after stopping
+ * the streams.
+ */
 static void
 destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
 {
-       stop_stream(efw, stream);
-
-       amdtp_stream_destroy(stream);
+       struct cmp_connection *conn;
 
        if (stream == &efw->tx_stream)
-               cmp_connection_destroy(&efw->out_conn);
+               conn = &efw->out_conn;
        else
-               cmp_connection_destroy(&efw->in_conn);
+               conn = &efw->in_conn;
+
+       amdtp_stream_destroy(stream);
+       cmp_connection_destroy(&efw->out_conn);
 }
 
 static int
@@ -319,12 +324,8 @@ void snd_efw_stream_update_duplex(struct snd_efw *efw)
 
 void snd_efw_stream_destroy_duplex(struct snd_efw *efw)
 {
-       mutex_lock(&efw->mutex);
-
        destroy_stream(efw, &efw->rx_stream);
        destroy_stream(efw, &efw->tx_stream);
-
-       mutex_unlock(&efw->mutex);
 }
 
 void snd_efw_stream_lock_changed(struct snd_efw *efw)
index 5f17b77ee15222ad0e43c3c6b5b13688b386417f..f0e4d502d60482ae8374cf9f830b739eb9e38753 100644 (file)
@@ -26,7 +26,7 @@
 int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
 {
        r->channels_mask = ~0uLL;
-       r->unit = fw_unit_get(unit);
+       r->unit = unit;
        mutex_init(&r->mutex);
        r->allocated = false;
 
@@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r)
 {
        WARN_ON(r->allocated);
        mutex_destroy(&r->mutex);
-       fw_unit_put(r->unit);
 }
 EXPORT_SYMBOL(fw_iso_resources_destroy);
 
index bda845afb470703ff0c05bd69f1caac5bfeabbd6..e6757cd8572422813b1627f3c5718ec4c53f2aac 100644 (file)
@@ -171,9 +171,10 @@ static int start_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream,
        }
 
        /* Wait first packet */
-       err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT);
-       if (err < 0)
+       if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) {
                stop_stream(oxfw, stream);
+               err = -ETIMEDOUT;
+       }
 end:
        return err;
 }
@@ -337,6 +338,10 @@ void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw,
        stop_stream(oxfw, stream);
 }
 
+/*
+ * This function should be called before starting the stream or after stopping
+ * the streams.
+ */
 void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
                                     struct amdtp_stream *stream)
 {
@@ -347,8 +352,6 @@ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
        else
                conn = &oxfw->in_conn;
 
-       stop_stream(oxfw, stream);
-
        amdtp_stream_destroy(stream);
        cmp_connection_destroy(conn);
 }
index 60e5cad0531aeb181d4cc6558f44b4cd6896f070..8c6ce019f437c310043a3686b634def68c92e424 100644 (file)
@@ -104,11 +104,23 @@ end:
        return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void oxfw_card_free(struct snd_card *card)
 {
        struct snd_oxfw *oxfw = card->private_data;
        unsigned int i;
 
+       snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+       if (oxfw->has_output)
+               snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+
+       fw_unit_put(oxfw->unit);
+
        for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
                kfree(oxfw->tx_stream_formats[i]);
                kfree(oxfw->rx_stream_formats[i]);
@@ -136,7 +148,7 @@ static int oxfw_probe(struct fw_unit *unit,
        oxfw = card->private_data;
        oxfw->card = card;
        mutex_init(&oxfw->mutex);
-       oxfw->unit = unit;
+       oxfw->unit = fw_unit_get(unit);
        oxfw->device_info = (const struct device_info *)id->driver_data;
        spin_lock_init(&oxfw->lock);
        init_waitqueue_head(&oxfw->hwdep_wait);
@@ -212,12 +224,7 @@ static void oxfw_remove(struct fw_unit *unit)
 {
        struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
 
-       snd_card_disconnect(oxfw->card);
-
-       snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
-       if (oxfw->has_output)
-               snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
-
+       /* No need to wait for releasing card object in this context. */
        snd_card_free_when_closed(oxfw->card);
 }
 
index 17e49a071af4497e5f39e0341cdee1db229cbc59..b408540798c1648d04a4ec1c36086844326affe1 100644 (file)
@@ -306,11 +306,12 @@ int snd_msndmix_new(struct snd_card *card)
        spin_lock_init(&chip->mixer_lock);
        strcpy(card->mixername, "MSND Pinnacle Mixer");
 
-       for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++)
+       for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) {
                err = snd_ctl_add(card,
                                  snd_ctl_new1(snd_msnd_controls + idx, chip));
                if (err < 0)
                        return err;
+       }
 
        return 0;
 }
index dfcb5e929f9fc9643701eb33f04aa8efd7f101d3..17c2637d842c1c366275683f30d2a3cb743560b6 100644 (file)
@@ -961,7 +961,6 @@ static int azx_alloc_cmd_io(struct azx *chip)
                dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
        return err;
 }
-EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
 
 static void azx_init_cmd_io(struct azx *chip)
 {
@@ -1026,7 +1025,6 @@ static void azx_init_cmd_io(struct azx *chip)
        azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
        spin_unlock_irq(&chip->reg_lock);
 }
-EXPORT_SYMBOL_GPL(azx_init_cmd_io);
 
 static void azx_free_cmd_io(struct azx *chip)
 {
@@ -1036,7 +1034,6 @@ static void azx_free_cmd_io(struct azx *chip)
        azx_writeb(chip, CORBCTL, 0);
        spin_unlock_irq(&chip->reg_lock);
 }
-EXPORT_SYMBOL_GPL(azx_free_cmd_io);
 
 static unsigned int azx_command_addr(u32 cmd)
 {
@@ -1167,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
                }
        }
 
-       if (!bus->no_response_fallback)
+       if (bus->no_response_fallback)
                return -1;
 
        if (!chip->polling_mode && chip->poll_count < 2) {
@@ -1316,7 +1313,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
        else
                return azx_corb_send_cmd(bus, val);
 }
-EXPORT_SYMBOL_GPL(azx_send_cmd);
 
 /* get a response */
 static unsigned int azx_get_response(struct hda_bus *bus,
@@ -1330,7 +1326,6 @@ static unsigned int azx_get_response(struct hda_bus *bus,
        else
                return azx_rirb_get_response(bus, addr);
 }
-EXPORT_SYMBOL_GPL(azx_get_response);
 
 #ifdef CONFIG_SND_HDA_DSP_LOADER
 /*
index b680b4ec63313c8b1152390dbbf602018a212952..8ec5289f8e058538892aa04c402a4db9967c94c7 100644 (file)
@@ -687,12 +687,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
        return val;
 }
 
+/* is this a stereo widget or a stereo-to-mono mix? */
+static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
+{
+       unsigned int wcaps = get_wcaps(codec, nid);
+       hda_nid_t conn;
+
+       if (wcaps & AC_WCAP_STEREO)
+               return true;
+       if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
+               return false;
+       if (snd_hda_get_num_conns(codec, nid) != 1)
+               return false;
+       if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
+               return false;
+       return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
+}
+
 /* initialize the amp value (only at the first time) */
 static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
 {
        unsigned int caps = query_amp_caps(codec, nid, dir);
        int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
-       snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+
+       if (is_stereo_amps(codec, nid, dir))
+               snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+       else
+               snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
+}
+
+/* update the amp, doing in stereo or mono depending on NID */
+static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
+                     unsigned int mask, unsigned int val)
+{
+       if (is_stereo_amps(codec, nid, dir))
+               return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
+                                               mask, val);
+       else
+               return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
+                                               mask, val);
 }
 
 /* calculate amp value mask we can modify;
@@ -732,7 +765,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
                return;
 
        val &= mask;
-       snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
+       update_amp(codec, nid, dir, idx, mask, val);
 }
 
 static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
@@ -4424,13 +4457,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
        has_amp = nid_has_mute(codec, mix, HDA_INPUT);
        for (i = 0; i < nums; i++) {
                if (has_amp)
-                       snd_hda_codec_amp_stereo(codec, mix,
-                                                HDA_INPUT, i,
-                                                0xff, HDA_AMP_MUTE);
+                       update_amp(codec, mix, HDA_INPUT, i,
+                                  0xff, HDA_AMP_MUTE);
                else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
-                       snd_hda_codec_amp_stereo(codec, conn[i],
-                                                HDA_OUTPUT, 0,
-                                                0xff, HDA_AMP_MUTE);
+                       update_amp(codec, conn[i], HDA_OUTPUT, 0,
+                                  0xff, HDA_AMP_MUTE);
        }
 }
 
index 36d2f20db7a4201b999155e759e3e22b6fe3f753..4ca3d5d02436daf0ec7ec7274c675c4beae74423 100644 (file)
@@ -1966,7 +1966,7 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Panther Point */
        { PCI_DEVICE(0x8086, 0x1e20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
index ce5a6da834199bd2acd7e6ee18fd123b4f078820..05e19f78b4cb8689ff8ac1b0b4f699bdbf5759da 100644 (file)
@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
                    (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
 }
 
+/* is this a stereo widget or a stereo-to-mono mix? */
+static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
+                          int dir, unsigned int wcaps, int indices)
+{
+       hda_nid_t conn;
+
+       if (wcaps & AC_WCAP_STEREO)
+               return true;
+       /* check for a stereo-to-mono mix; it must be:
+        * only a single connection, only for input, and only a mixer widget
+        */
+       if (indices != 1 || dir != HDA_INPUT ||
+           get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
+               return false;
+
+       if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
+               return false;
+       /* the connection source is a stereo? */
+       wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
+       return !!(wcaps & AC_WCAP_STEREO);
+}
+
 static void print_amp_vals(struct snd_info_buffer *buffer,
                           struct hda_codec *codec, hda_nid_t nid,
-                          int dir, int stereo, int indices)
+                          int dir, unsigned int wcaps, int indices)
 {
        unsigned int val;
+       bool stereo;
        int i;
 
+       stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
+
        dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
        for (i = 0; i < indices; i++) {
                snd_iprintf(buffer, " [");
@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry,
                            (codec->single_adc_amp &&
                             wid_type == AC_WID_AUD_IN))
                                print_amp_vals(buffer, codec, nid, HDA_INPUT,
-                                              wid_caps & AC_WCAP_STEREO,
-                                              1);
+                                              wid_caps, 1);
                        else
                                print_amp_vals(buffer, codec, nid, HDA_INPUT,
-                                              wid_caps & AC_WCAP_STEREO,
-                                              conn_len);
+                                              wid_caps, conn_len);
                }
                if (wid_caps & AC_WCAP_OUT_AMP) {
                        snd_iprintf(buffer, "  Amp-Out caps: ");
@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry,
                        if (wid_type == AC_WID_PIN &&
                            codec->pin_amp_workaround)
                                print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
-                                              wid_caps & AC_WCAP_STEREO,
-                                              conn_len);
+                                              wid_caps, conn_len);
                        else
                                print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
-                                              wid_caps & AC_WCAP_STEREO, 1);
+                                              wid_caps, 1);
                }
 
                switch (wid_type) {
index 227990bc02e38cb459921ea5f08d7945eb540b51..375e94f4cf5265ba19378f96998658a5e4fed91c 100644 (file)
@@ -329,8 +329,8 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hda->regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(chip->remap_addr))
-               return PTR_ERR(chip->remap_addr);
+       if (IS_ERR(hda->regs))
+               return PTR_ERR(hda->regs);
 
        chip->remap_addr = hda->regs + HDA_BAR0;
        chip->addr = res->start + HDA_BAR0;
index 1589c9bcce3e15a230f87d352f2ae165252c358d..dd2b3d92071f698f41a75d2d7b7877eb6b357c00 100644 (file)
@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
        SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
        SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+       SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
        SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
        SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
        {} /* terminator */
@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec)
                return -ENOMEM;
 
        spec->gen.automute_hook = cs_automute;
+       codec->single_adc_amp = 1;
 
        snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
                           cs420x_fixups);
index fd3ed18670e9c4005d115a26f6410c7efd4faff8..da67ea8645a6e8462f23b8f8b1e96044e66812f7 100644 (file)
@@ -223,6 +223,7 @@ enum {
        CXT_PINCFG_LENOVO_TP410,
        CXT_PINCFG_LEMOTE_A1004,
        CXT_PINCFG_LEMOTE_A1205,
+       CXT_PINCFG_COMPAQ_CQ60,
        CXT_FIXUP_STEREO_DMIC,
        CXT_FIXUP_INC_MIC_BOOST,
        CXT_FIXUP_HEADPHONE_MIC_PIN,
@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = cxt_pincfg_lemote,
        },
+       [CXT_PINCFG_COMPAQ_CQ60] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       /* 0x17 was falsely set up as a mic, it should 0x1d */
+                       { 0x17, 0x400001f0 },
+                       { 0x1d, 0x97a70120 },
+                       { }
+               }
+       },
        [CXT_FIXUP_STEREO_DMIC] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_stereo_dmic,
@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = {
 };
 
 static const struct snd_pci_quirk cxt5051_fixups[] = {
+       SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
        {}
 };
index ddb93083a2af1ea15ffe2374a81193481a2f0da1..526398a4a4428da11d5792c9c7f2a299cac9bd43 100644 (file)
@@ -4937,6 +4937,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
        /* ALC282 */
+       SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
@@ -5208,6 +5209,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC255_STANDARD_PINS,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170140},
+               {0x17, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x21, 0x02211050}),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
                {0x12, 0x90a60130},
                {0x13, 0x40000000},
index 6d36c5b7880504457430718a8f23f2dbc0e9a9ba..87eff3173ce924ae89596068b5bf8dcd38e7482e 100644 (file)
@@ -79,6 +79,7 @@ enum {
        STAC_ALIENWARE_M17X,
        STAC_92HD89XX_HP_FRONT_JACK,
        STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+       STAC_92HD73XX_ASUS_MOBO,
        STAC_92HD73XX_MODELS
 };
 
@@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
        [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
-       }
+       },
+       [STAC_92HD73XX_ASUS_MOBO] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       /* enable 5.1 and SPDIF out */
+                       { 0x0c, 0x01014411 },
+                       { 0x0d, 0x01014410 },
+                       { 0x0e, 0x01014412 },
+                       { 0x22, 0x014b1180 },
+                       { }
+               }
+       },
 };
 
 static const struct hda_model_fixup stac92hd73xx_models[] = {
@@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
        { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
        { .id = STAC_DELL_EQ, .name = "dell-eq" },
        { .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+       { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
        {}
 };
 
@@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
                                "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
                                "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
+                     STAC_92HD73XX_ASUS_MOBO),
        {} /* terminator */
 };
 
index 2c363fdca9fd00e0b82b99d029a4bd53a9728467..ca67f896d11757bc05be43804d367175b282dad0 100644 (file)
@@ -6082,6 +6082,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
                snd_pcm_hw_constraint_minmax(runtime,
                                             SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                             64, 8192);
+               snd_pcm_hw_constraint_minmax(runtime,
+                                            SNDRV_PCM_HW_PARAM_PERIODS,
+                                            2, 2);
                break;
        }
 
@@ -6156,6 +6159,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
                snd_pcm_hw_constraint_minmax(runtime,
                                             SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                             64, 8192);
+               snd_pcm_hw_constraint_minmax(runtime,
+                                            SNDRV_PCM_HW_PARAM_PERIODS,
+                                            2, 2);
                break;
        }
 
index f5ad214663f98b4dbbdc3c2a4180d6142d09688c..8de836165cf2ed1978c4846ac56e84bcbae4e6d1 100644 (file)
@@ -46,8 +46,6 @@
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 
-#include <asm/mach-types.h>
-
 #include "../codecs/wm8731.h"
 #include "atmel-pcm.h"
 #include "atmel_ssc_dai.h"
@@ -171,9 +169,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
        int ret;
 
        if (!np) {
-               if (!(machine_is_at91sam9g20ek() ||
-                       machine_is_at91sam9g20ek_2mmc()))
-                       return -ENODEV;
+               return -ENODEV;
        }
 
        ret = atmel_ssc_set_audio(0);
@@ -210,39 +206,37 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
        card->dev = &pdev->dev;
 
        /* Parse device node info */
-       if (np) {
-               ret = snd_soc_of_parse_card_name(card, "atmel,model");
-               if (ret)
-                       goto err;
-
-               ret = snd_soc_of_parse_audio_routing(card,
-                       "atmel,audio-routing");
-               if (ret)
-                       goto err;
-
-               /* Parse codec info */
-               at91sam9g20ek_dai.codec_name = NULL;
-               codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
-               if (!codec_np) {
-                       dev_err(&pdev->dev, "codec info missing\n");
-                       return -EINVAL;
-               }
-               at91sam9g20ek_dai.codec_of_node = codec_np;
-
-               /* Parse dai and platform info */
-               at91sam9g20ek_dai.cpu_dai_name = NULL;
-               at91sam9g20ek_dai.platform_name = NULL;
-               cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
-               if (!cpu_np) {
-                       dev_err(&pdev->dev, "dai and pcm info missing\n");
-                       return -EINVAL;
-               }
-               at91sam9g20ek_dai.cpu_of_node = cpu_np;
-               at91sam9g20ek_dai.platform_of_node = cpu_np;
-
-               of_node_put(codec_np);
-               of_node_put(cpu_np);
+       ret = snd_soc_of_parse_card_name(card, "atmel,model");
+       if (ret)
+               goto err;
+
+       ret = snd_soc_of_parse_audio_routing(card,
+               "atmel,audio-routing");
+       if (ret)
+               goto err;
+
+       /* Parse codec info */
+       at91sam9g20ek_dai.codec_name = NULL;
+       codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+       if (!codec_np) {
+               dev_err(&pdev->dev, "codec info missing\n");
+               return -EINVAL;
+       }
+       at91sam9g20ek_dai.codec_of_node = codec_np;
+
+       /* Parse dai and platform info */
+       at91sam9g20ek_dai.cpu_dai_name = NULL;
+       at91sam9g20ek_dai.platform_name = NULL;
+       cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+       if (!cpu_np) {
+               dev_err(&pdev->dev, "dai and pcm info missing\n");
+               return -EINVAL;
        }
+       at91sam9g20ek_dai.cpu_of_node = cpu_np;
+       at91sam9g20ek_dai.platform_of_node = cpu_np;
+
+       of_node_put(codec_np);
+       of_node_put(cpu_np);
 
        ret = snd_soc_register_card(card);
        if (ret) {
index 7b7fbcd49e5e424c25e47d9e6a1aee77065358d5..c7cd60f009e93e69ef2220eec3eb6ec97a9e6500 100644 (file)
@@ -16,7 +16,7 @@ config SND_EP93XX_SOC_AC97
 
 config SND_EP93XX_SOC_SNAPPERCL15
         tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
-        depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15
+        depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
         select SND_EP93XX_SOC_I2S
         select SND_SOC_TLV320AIC23_I2C
         help
index 064e6c18e10923fd75609b750405dbc33f9db6af..ea9f0e31f9d40d4cf4d8e626371698cb857fcb3a 100644 (file)
@@ -69,7 +69,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_MAX98088 if I2C
        select SND_SOC_MAX98090 if I2C
        select SND_SOC_MAX98095 if I2C
-       select SND_SOC_MAX98357A
+       select SND_SOC_MAX98357A if GPIOLIB
        select SND_SOC_MAX9850 if I2C
        select SND_SOC_MAX9768 if I2C
        select SND_SOC_MAX9877 if I2C
index b67480f1b1aa4a7dcfb6c03b10b2ecd826875989..4373ada95648e6890f013582b97c355b45e76dd1 100644 (file)
@@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
-       unsigned int deemph = ucontrol->value.enumerated.item[0];
+       unsigned int deemph = ucontrol->value.integer.value[0];
 
        if (deemph > 1)
                return -EINVAL;
@@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = adav80x->deemph;
+       ucontrol->value.integer.value[0] = adav80x->deemph;
        return 0;
 };
 
index 70861c7b1631ab577acf506f8af18f2386a0852a..81b54a270bd8f800637108d027f9edfffb464d04 100644 (file)
@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
 
        if (deemph > 1)
                return -EINVAL;
@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = ak4641->deemph;
+       ucontrol->value.integer.value[0] = ak4641->deemph;
        return 0;
 };
 
index 632e89f793a78d2691dfb2e33b3516f0f483aeee..2a58b1dccd2f18cb41221c45ac4d026e731240c9 100644 (file)
@@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = {
 };
 
 static const struct snd_soc_dapm_route ak4671_intercon[] = {
-       {"DAC Left", "NULL", "PMPLL"},
-       {"DAC Right", "NULL", "PMPLL"},
-       {"ADC Left", "NULL", "PMPLL"},
-       {"ADC Right", "NULL", "PMPLL"},
+       {"DAC Left", NULL, "PMPLL"},
+       {"DAC Right", NULL, "PMPLL"},
+       {"ADC Left", NULL, "PMPLL"},
+       {"ADC Right", NULL, "PMPLL"},
 
        /* Outputs */
-       {"LOUT1", "NULL", "LOUT1 Mixer"},
-       {"ROUT1", "NULL", "ROUT1 Mixer"},
-       {"LOUT2", "NULL", "LOUT2 Mix Amp"},
-       {"ROUT2", "NULL", "ROUT2 Mix Amp"},
-       {"LOUT3", "NULL", "LOUT3 Mixer"},
-       {"ROUT3", "NULL", "ROUT3 Mixer"},
+       {"LOUT1", NULL, "LOUT1 Mixer"},
+       {"ROUT1", NULL, "ROUT1 Mixer"},
+       {"LOUT2", NULL, "LOUT2 Mix Amp"},
+       {"ROUT2", NULL, "ROUT2 Mix Amp"},
+       {"LOUT3", NULL, "LOUT3 Mixer"},
+       {"ROUT3", NULL, "ROUT3 Mixer"},
 
        {"LOUT1 Mixer", "DACL", "DAC Left"},
        {"ROUT1 Mixer", "DACR", "DAC Right"},
        {"LOUT2 Mixer", "DACHL", "DAC Left"},
        {"ROUT2 Mixer", "DACHR", "DAC Right"},
-       {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"},
-       {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"},
+       {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"},
+       {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"},
        {"LOUT3 Mixer", "DACSL", "DAC Left"},
        {"ROUT3 Mixer", "DACSR", "DAC Right"},
 
@@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = {
        {"LIN2", NULL, "Mic Bias"},
        {"RIN2", NULL, "Mic Bias"},
 
-       {"ADC Left", "NULL", "LIN MUX"},
-       {"ADC Right", "NULL", "RIN MUX"},
+       {"ADC Left", NULL, "LIN MUX"},
+       {"ADC Right", NULL, "RIN MUX"},
 
        /* Analog Loops */
-       {"LIN1 Mixing Circuit", "NULL", "LIN1"},
-       {"RIN1 Mixing Circuit", "NULL", "RIN1"},
-       {"LIN2 Mixing Circuit", "NULL", "LIN2"},
-       {"RIN2 Mixing Circuit", "NULL", "RIN2"},
-       {"LIN3 Mixing Circuit", "NULL", "LIN3"},
-       {"RIN3 Mixing Circuit", "NULL", "RIN3"},
-       {"LIN4 Mixing Circuit", "NULL", "LIN4"},
-       {"RIN4 Mixing Circuit", "NULL", "RIN4"},
+       {"LIN1 Mixing Circuit", NULL, "LIN1"},
+       {"RIN1 Mixing Circuit", NULL, "RIN1"},
+       {"LIN2 Mixing Circuit", NULL, "LIN2"},
+       {"RIN2 Mixing Circuit", NULL, "RIN2"},
+       {"LIN3 Mixing Circuit", NULL, "LIN3"},
+       {"RIN3 Mixing Circuit", NULL, "RIN3"},
+       {"LIN4 Mixing Circuit", NULL, "LIN4"},
+       {"RIN4 Mixing Circuit", NULL, "RIN4"},
 
        {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"},
        {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"},
index 79a4efcb894c192c763c649083081e4c75fbece3..7d3a6accaf9a4a583dcacbb934e275092933c026 100644 (file)
@@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = cs4271->deemph;
+       ucontrol->value.integer.value[0] = cs4271->deemph;
        return 0;
 }
 
@@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
-       cs4271->deemph = ucontrol->value.enumerated.item[0];
+       cs4271->deemph = ucontrol->value.integer.value[0];
        return cs4271_set_deemph(codec);
 }
 
index ffe96175a8a5ac62ee4721c1f17a54c305d447c7..911c26c705fc8d226e819d469c669b72f989517a 100644 (file)
@@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = {
 
 static const struct snd_soc_dapm_route da732x_dapm_routes[] = {
        /* Inputs */
-       {"AUX1L PGA", "NULL", "AUX1L"},
-       {"AUX1R PGA", "NULL", "AUX1R"},
+       {"AUX1L PGA", NULL, "AUX1L"},
+       {"AUX1R PGA", NULL, "AUX1R"},
        {"MIC1 PGA", NULL, "MIC1"},
-       {"MIC2 PGA", "NULL", "MIC2"},
-       {"MIC3 PGA", "NULL", "MIC3"},
+       {"MIC2 PGA", NULL, "MIC2"},
+       {"MIC3 PGA", NULL, "MIC3"},
 
        /* Capture Path */
        {"ADC1 Left MUX", "MIC1", "MIC1 PGA"},
index f27325155acef31e1f4a7fff4ea970b9405ca8a7..c5f35a07e8e48106335df49153acd81e104f7aa9 100644 (file)
@@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = es8328->deemph;
+       ucontrol->value.integer.value[0] = es8328->deemph;
        return 0;
 }
 
@@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
        int ret;
 
        if (deemph > 1)
index 1806333ea29e5a6b49e2a0adb8c58257d34b7508..e9e6efbc21dd527299fb459ec01903fa8c24dae2 100644 (file)
  * max98357a.c -- MAX98357A ALSA SoC Codec driver
  */
 
-#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
 #include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/soc-dapm.h>
 
 #define DRV_NAME "max98357a"
 
index a722a023c26280a3c89f7a198e95c818f0c2fe6a..477e13d309713e56a5a4416d054350185fea0007 100644 (file)
@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = priv->deemph;
+       ucontrol->value.integer.value[0] = priv->deemph;
 
        return 0;
 }
@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 
-       priv->deemph = ucontrol->value.enumerated.item[0];
+       priv->deemph = ucontrol->value.integer.value[0];
 
        return pcm1681_set_deemph(codec);
 }
index f374840a5a7ce376272d8f7e3290f91c099cb19f..9b541e52da8c77da4134fdf8e0c551d35a2a402b 100644 (file)
@@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = {
                .ident = "Dell Dino",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_BOARD_NAME, "0144P8")
+                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
                }
        },
        { }
index e1a4a45c57e229b12dcd174ac8453f8772b6e3b1..fd102613d20d8747c7554f5a3882c027fd0024c8 100644 (file)
@@ -225,7 +225,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
        case RT5670_ADC_EQ_CTRL1:
        case RT5670_EQ_CTRL1:
        case RT5670_ALC_CTRL_1:
-       case RT5670_IRQ_CTRL1:
        case RT5670_IRQ_CTRL2:
        case RT5670_INT_IRQ_ST:
        case RT5670_IL_CMD:
@@ -2703,6 +2702,12 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
 
        regmap_write(rt5670->regmap, RT5670_RESET, 0);
 
+       regmap_read(rt5670->regmap, RT5670_VENDOR_ID, &val);
+       if (val >= 4)
+               regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0980);
+       else
+               regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0d00);
+
        ret = regmap_register_patch(rt5670->regmap, init_list,
                                    ARRAY_SIZE(init_list));
        if (ret != 0)
index 5d0bb8748dd1df5cd6a262d7e3a925fe4b4b5efd..fb9c20eace3fbe9d7bb009862a17c0e8644ec296 100644 (file)
@@ -3284,8 +3284,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
        { "IB45 Bypass Mux", "Bypass", "IB45 Mux" },
        { "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" },
 
-       { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6" },
-       { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6" },
+       { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6 Mux" },
+       { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6 Mux" },
        { "IB6 Mux", "SLB DAC 6", "SLB DAC6" },
        { "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" },
        { "IB6 Mux", "IF4 DAC L", "IF4 DAC L" },
@@ -3293,8 +3293,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
        { "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" },
        { "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" },
 
-       { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7" },
-       { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7" },
+       { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7 Mux" },
+       { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7 Mux" },
        { "IB7 Mux", "SLB DAC 7", "SLB DAC7" },
        { "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" },
        { "IB7 Mux", "IF4 DAC R", "IF4 DAC R" },
@@ -3635,15 +3635,15 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
        { "DAC1 FS", NULL, "DAC1 MIXL" },
        { "DAC1 FS", NULL, "DAC1 MIXR" },
 
-       { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2" },
-       { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2" },
+       { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2 Mux" },
+       { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2 Mux" },
        { "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" },
        { "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" },
        { "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" },
        { "DAC2 L Mux", "OB 2", "OutBound2" },
 
-       { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3" },
-       { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3" },
+       { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3 Mux" },
+       { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3 Mux" },
        { "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" },
        { "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" },
        { "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" },
@@ -3651,29 +3651,29 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
        { "DAC2 R Mux", "Haptic Generator", "Haptic Generator" },
        { "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" },
 
-       { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4" },
-       { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4" },
+       { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4 Mux" },
+       { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4 Mux" },
        { "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" },
        { "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" },
        { "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" },
        { "DAC3 L Mux", "OB 4", "OutBound4" },
 
-       { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC4" },
-       { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC4" },
+       { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC5 Mux" },
+       { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC5 Mux" },
        { "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" },
        { "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" },
        { "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" },
        { "DAC3 R Mux", "OB 5", "OutBound5" },
 
-       { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6" },
-       { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6" },
+       { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6 Mux" },
+       { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6 Mux" },
        { "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" },
        { "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" },
        { "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" },
        { "DAC4 L Mux", "OB 6", "OutBound6" },
 
-       { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7" },
-       { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7" },
+       { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7 Mux" },
+       { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7 Mux" },
        { "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" },
        { "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" },
        { "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" },
index e182e6569bbd138713ec0047de606136254d81c4..3593a1496056d2aa7648142c67bd7952d5b589f6 100644 (file)
@@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
                /* Enable VDDC charge pump */
                ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
        } else if (vddio >= 3100 && vdda >= 3100) {
-               /*
-                * if vddio and vddd > 3.1v,
-                * charge pump should be clean before set ana_pwr
-                */
-               snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
-                               SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
-
+               ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
                /* VDDC use VDDIO rail */
                lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
                lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
index 47b257e4180965f38be5c393cab204945f444b7f..82095d6cd07036d0ad81bf63cdc9e41746c630c3 100644 (file)
@@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = {
        /* speaker map */
        { "IHFOUTL", NULL, "Speaker Rail"},
        { "IHFOUTR", NULL, "Speaker Rail"},
-       { "IHFOUTL", "NULL", "Speaker Left Playback"},
-       { "IHFOUTR", "NULL", "Speaker Right Playback"},
+       { "IHFOUTL", NULL, "Speaker Left Playback"},
+       { "IHFOUTR", NULL, "Speaker Right Playback"},
        { "Speaker Left Playback", NULL, "Speaker Left Filter"},
        { "Speaker Right Playback", NULL, "Speaker Right Filter"},
        { "Speaker Left Filter", NULL, "IHFDAC Left"},
index 3a1343fa109b482860811baea1566d354e574399..007a0e3bc2735c0c6f9d8325f15445657fcfd374 100644 (file)
@@ -106,13 +106,11 @@ static const struct reg_default sta32x_regs[] = {
 };
 
 static const struct regmap_range sta32x_write_regs_range[] = {
-       regmap_reg_range(STA32X_CONFA,  STA32X_AUTO2),
-       regmap_reg_range(STA32X_C1CFG,  STA32X_FDRC2),
+       regmap_reg_range(STA32X_CONFA,  STA32X_FDRC2),
 };
 
 static const struct regmap_range sta32x_read_regs_range[] = {
-       regmap_reg_range(STA32X_CONFA,  STA32X_AUTO2),
-       regmap_reg_range(STA32X_C1CFG,  STA32X_FDRC2),
+       regmap_reg_range(STA32X_CONFA,  STA32X_FDRC2),
 };
 
 static const struct regmap_range sta32x_volatile_regs_range[] = {
index 249ef5c4c762795bfb19f3545ce8398868f4dba3..32942bed34b1e9bb67227adddd5c662b92aa2a9b 100644 (file)
@@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = priv->deemph;
+       ucontrol->value.integer.value[0] = priv->deemph;
 
        return 0;
 }
@@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 
-       priv->deemph = ucontrol->value.enumerated.item[0];
+       priv->deemph = ucontrol->value.integer.value[0];
 
        return tas5086_set_deemph(codec);
 }
index 8d9de49a50524bb4e88ebd62cb580e17239b07d6..21d5402e343fbcdd7a433931bc9a78d0edac45e9 100644 (file)
@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
 
-       ucontrol->value.enumerated.item[0] = wm2000->anc_active;
+       ucontrol->value.integer.value[0] = wm2000->anc_active;
 
        return 0;
 }
@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
-       int anc_active = ucontrol->value.enumerated.item[0];
+       int anc_active = ucontrol->value.integer.value[0];
        int ret;
 
        if (anc_active > 1)
@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
 
-       ucontrol->value.enumerated.item[0] = wm2000->spk_ena;
+       ucontrol->value.integer.value[0] = wm2000->spk_ena;
 
        return 0;
 }
@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
-       int val = ucontrol->value.enumerated.item[0];
+       int val = ucontrol->value.integer.value[0];
        int ret;
 
        if (val > 1)
index 098c143f44d653d99c33f36e02edde9eca6600d8..c6d10533e2bde5170f8ac963333b52c62782c27f 100644 (file)
@@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = wm8731->deemph;
+       ucontrol->value.integer.value[0] = wm8731->deemph;
 
        return 0;
 }
@@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
        int ret = 0;
 
        if (deemph > 1)
index dde462c082be0eb6124e0f881b9a1ed936e34c77..04b04f8e147c6bdeada6decdca981a026ce57afa 100644 (file)
@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = wm8903->deemph;
+       ucontrol->value.integer.value[0] = wm8903->deemph;
 
        return 0;
 }
@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
        int ret = 0;
 
        if (deemph > 1)
index d3b3f57668ccae412f0a75d61f91f9f34f1f0fb0..215e93c1ddf0358f42dc5581fd01b1d9a3d7e1e0 100644 (file)
@@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = wm8904->deemph;
+       ucontrol->value.integer.value[0] = wm8904->deemph;
        return 0;
 }
 
@@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
 
        if (deemph > 1)
                return -EINVAL;
index 1ab2d462afadfb24a7205638a779bb0152eb702a..00bec915d6522152a5d40dcc3d2df52ef16424eb 100644 (file)
@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = wm8955->deemph;
+       ucontrol->value.integer.value[0] = wm8955->deemph;
        return 0;
 }
 
@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
 
        if (deemph > 1)
                return -EINVAL;
index cf8fecf97f2c7ee06fff2741bea074bd32ceb7fc..3035d98564156746bc4e814f744e1ea0f236564a 100644 (file)
@@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.enumerated.item[0] = wm8960->deemph;
+       ucontrol->value.integer.value[0] = wm8960->deemph;
        return 0;
 }
 
@@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
-       int deemph = ucontrol->value.enumerated.item[0];
+       int deemph = ucontrol->value.integer.value[0];
 
        if (deemph > 1)
                return -EINVAL;
index 9517571e820d9576b9505be863a73f3c02116cb8..98c9525bd751fbacb0eec07f6465869cbc3e2aa5 100644 (file)
@@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
        struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
        struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-       unsigned int val = ucontrol->value.enumerated.item[0];
+       unsigned int val = ucontrol->value.integer.value[0];
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        unsigned int mixer, mask, shift, old;
@@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
 
        mutex_lock(&wm9712->lock);
        old = wm9712->hp_mixer[mixer];
-       if (ucontrol->value.enumerated.item[0])
+       if (ucontrol->value.integer.value[0])
                wm9712->hp_mixer[mixer] |= mask;
        else
                wm9712->hp_mixer[mixer] &= ~mask;
@@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol,
        mixer = mc->shift >> 8;
        shift = mc->shift & 0xff;
 
-       ucontrol->value.enumerated.item[0] =
+       ucontrol->value.integer.value[0] =
                (wm9712->hp_mixer[mixer] >> shift) & 1;
 
        return 0;
index 68222917b396666b975cc2aa96e13d983bf52fc2..79552953e1bdc607e4d25c95aa5ab473637be6cc 100644 (file)
@@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
        struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
        struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-       unsigned int val = ucontrol->value.enumerated.item[0];
+       unsigned int val = ucontrol->value.integer.value[0];
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        unsigned int mixer, mask, shift, old;
@@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
 
        mutex_lock(&wm9713->lock);
        old = wm9713->hp_mixer[mixer];
-       if (ucontrol->value.enumerated.item[0])
+       if (ucontrol->value.integer.value[0])
                wm9713->hp_mixer[mixer] |= mask;
        else
                wm9713->hp_mixer[mixer] &= ~mask;
@@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol,
        mixer = mc->shift >> 8;
        shift = mc->shift & 0xff;
 
-       ucontrol->value.enumerated.item[0] =
+       ucontrol->value.integer.value[0] =
                (wm9713->hp_mixer[mixer] >> shift) & 1;
 
        return 0;
index 75870c0ea2c9f613d9b4ebe3e10efd53430e5363..91eb3aef7f02f84d4dd2ec92cb46a9ce2ad67e55 100644 (file)
@@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
                                enum spdif_txrate index, bool round)
 {
        const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
-       bool is_sysclk = clk == spdif_priv->sysclk;
+       bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
        u64 rate_ideal, rate_actual, sub;
        u32 sysclk_dfmin, sysclk_dfmax;
        u32 txclk_df, sysclk_df, arate;
@@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
                        spdif_priv->txclk_src[index], rate[index]);
        dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n",
                        spdif_priv->txclk_df[index], rate[index]);
-       if (spdif_priv->txclk[index] == spdif_priv->sysclk)
+       if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk))
                dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n",
                                spdif_priv->sysclk_df[index], rate[index]);
        dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n",
index 2595611e8a6ded3876345767bde2ce51e079c9ee..6b0c8f717ec2c0781af066cfb1bc3bcce8567791 100644 (file)
@@ -603,17 +603,20 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        factor = (div2 + 1) * (7 * psr + 1) * 2;
 
        for (i = 0; i < 255; i++) {
-               /* The bclk rate must be smaller than 1/5 sysclk rate */
-               if (factor * (i + 1) < 5)
-                       continue;
-
-               tmprate = freq * factor * (i + 2);
+               tmprate = freq * factor * (i + 1);
 
                if (baudclk_is_used)
                        clkrate = clk_get_rate(ssi_private->baudclk);
                else
                        clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
 
+               /*
+                * Hardware limitation: The bclk rate must be
+                * never greater than 1/5 IPG clock rate
+                */
+               if (clkrate * 5 > clk_get_rate(ssi_private->clk))
+                       continue;
+
                clkrate /= factor;
                afreq = clkrate / (i + 1);
 
@@ -1224,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
        ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
        ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
 
-       ret = !of_property_read_u32_array(np, "dmas", dmas, 4);
+       ret = of_property_read_u32_array(np, "dmas", dmas, 4);
        if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
                ssi_private->use_dual_fifo = true;
                /* When using dual fifo mode, we need to keep watermark
index f7c6734bd5daee1dafd76ba8b6feafa05b2a6c52..fb550b5869d21f73307d36d2edc88ebcffa61f0c 100644 (file)
@@ -372,6 +372,11 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
                            strlen(dai_link->cpu_dai_name)   +
                            strlen(dai_link->codec_dai_name) + 2,
                            GFP_KERNEL);
+       if (!name) {
+               ret = -ENOMEM;
+               goto dai_link_of_err;
+       }
+
        sprintf(name, "%s-%s", dai_link->cpu_dai_name,
                                dai_link->codec_dai_name);
        dai_link->name = dai_link->stream_name = name;
index dfebfdd5eb2aaa65e1f21aa5d65d2c9052486b75..daecc58f28afafb28c8172649355ff9b30749783 100644 (file)
@@ -150,7 +150,7 @@ enum sst_cmd_type {
 
 enum sst_task {
        SST_TASK_SBA = 1,
-       SST_TASK_MMX,
+       SST_TASK_MMX = 3,
 };
 
 enum sst_type {
index c42ffae5fe9f6acab9ebe5723862c4e3a3b4bf8d..402b728c0a06479213425c2daf88eafe4caa2877 100644 (file)
@@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw)
                module = (void *)module + sizeof(*module) + module->mod_size;
        }
 
-       /* allocate scratch mem regions */
-       sst_block_alloc_scratch(dsp);
-
        return 0;
 }
 
index 394af5684c05e37403b50b67ebaa7ead69d14f1f..863a9ca34b8e37fee393930812845e672e717b8f 100644 (file)
@@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw)
 int sst_hsw_dsp_load(struct sst_hsw *hsw)
 {
        struct sst_dsp *dsp = hsw->dsp;
+       struct sst_fw *sst_fw, *t;
        int ret;
 
        dev_dbg(hsw->dev, "loading audio DSP....");
@@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw)
                return ret;
        }
 
-       ret = sst_fw_reload(hsw->sst_fw);
-       if (ret < 0) {
-               dev_err(hsw->dev, "error: SST FW reload failed\n");
-               sst_dsp_dma_put_channel(dsp);
-               return -ENOMEM;
+       list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) {
+               ret = sst_fw_reload(sst_fw);
+               if (ret < 0) {
+                       dev_err(hsw->dev, "error: SST FW reload failed\n");
+                       sst_dsp_dma_put_channel(dsp);
+                       return -ENOMEM;
+               }
        }
+       ret = sst_block_alloc_scratch(hsw->dsp);
+       if (ret < 0)
+               return -EINVAL;
 
        sst_dsp_dma_put_channel(dsp);
        return 0;
@@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw)
 
 int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw)
 {
-       sst_fw_unload(hsw->sst_fw);
-       sst_block_free_scratch(hsw->dsp);
+       struct sst_fw *sst_fw, *t;
+       struct sst_dsp *dsp = hsw->dsp;
+
+       list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
+               sst_fw_unload(sst_fw);
+       }
+       sst_block_free_scratch(dsp);
 
        hsw->boot_complete = false;
 
-       sst_dsp_sleep(hsw->dsp);
+       sst_dsp_sleep(dsp);
 
        return 0;
 }
@@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
                goto fw_err;
        }
 
+       /* allocate scratch mem regions */
+       ret = sst_block_alloc_scratch(hsw->dsp);
+       if (ret < 0)
+               goto boot_err;
+
        /* wait for DSP boot completion */
        sst_dsp_boot(hsw->dsp);
        ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
index d6fa9d5514e1923997066ec1e14c7dafac281387..7e21e8f85885e436cb896a806dde544d3cd8ec4d 100644 (file)
@@ -91,7 +91,8 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
                                  SNDRV_PCM_INFO_INTERLEAVED |
                                  SNDRV_PCM_INFO_PAUSE |
                                  SNDRV_PCM_INFO_RESUME |
-                                 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+                                 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+                                 SNDRV_PCM_INFO_DRAIN_TRIGGER,
        .formats                = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
                                  SNDRV_PCM_FMTBIT_S32_LE,
        .period_bytes_min       = PAGE_SIZE,
index 8a8d56a146e75a19d4587e169cb967b730b3eb36..11c578651c1c8cc778904ebb7665587cde44beef 100644 (file)
@@ -350,7 +350,9 @@ static inline void sst_save_shim64(struct intel_sst_drv *ctx,
 
        spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
 
-       shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
+       shim_regs->imrx = sst_shim_read64(shim, SST_IMRX);
+       shim_regs->csr = sst_shim_read64(shim, SST_CSR);
+
 
        spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
 }
@@ -367,6 +369,7 @@ static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
         */
        spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
        sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+       sst_shim_write64(shim, SST_CSR, shim_regs->csr),
        spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
 }
 
@@ -379,6 +382,10 @@ void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
         * initially active. So change the state to active before
         * enabling the pm
         */
+
+       if (!acpi_disabled)
+               pm_runtime_set_active(ctx->dev);
+
        pm_runtime_enable(ctx->dev);
 
        if (acpi_disabled)
@@ -409,6 +416,7 @@ static int intel_sst_runtime_suspend(struct device *dev)
        synchronize_irq(ctx->irq_num);
        flush_workqueue(ctx->post_msg_wq);
 
+       ctx->ops->reset(ctx);
        /* save the shim registers because PMC doesn't save state */
        sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
 
index def7d8260c4e579c06eeba8ba45f422e1fb482c5..d19483081f9bb8ffce6c9fdf8bc3a3d70b40faf3 100644 (file)
@@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
                if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
        } else {
-               if (priv->extclk == priv->clk) {
+               if (clk_is_match(priv->extclk, priv->clk)) {
                        devm_clk_put(&pdev->dev, priv->extclk);
                        priv->extclk = ERR_PTR(-EINVAL);
                } else {
index ccfb41c22e53b11cedbf5d8272a56a757ad283d1..f7eb42aa3f3893ca7cfd76fd2f8a96ff986d68b2 100644 (file)
@@ -352,6 +352,9 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
                return ret;
 
        card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
+
        card->name = devm_kasprintf(dev, GFP_KERNEL,
                                    "HDMI %s", dev_name(ad->dssdev));
        card->owner = THIS_MODULE;
index c7eb9dd67f608c47ffa93a97c1824e3dae3c867b..fd99d89de6a854606357b50c44deeb43bcfb04ab 100644 (file)
@@ -530,8 +530,19 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
 
        case OMAP_MCBSP_SYSCLK_CLKX_EXT:
                regs->srgr2     |= CLKSM;
+               regs->pcr0      |= SCLKME;
+               /*
+                * If McBSP is master but yet the CLKX/CLKR pin drives the SRG,
+                * disable output on those pins. This enables to inject the
+                * reference clock through CLKX/CLKR. For this to work
+                * set_dai_sysclk() _needs_ to be called after set_dai_fmt().
+                */
+               regs->pcr0      &= ~CLKXM;
+               break;
        case OMAP_MCBSP_SYSCLK_CLKR_EXT:
                regs->pcr0      |= SCLKME;
+               /* Disable ouput on CLKR pin in master mode */
+               regs->pcr0      &= ~CLKRM;
                break;
        default:
                err = -ENODEV;
index f4b05bc23e4bfb4d27adf8dea4702539c2696609..1343ecbf0bd5ee307d78d05f8a2e44abadc6c877 100644 (file)
@@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
 
index 3cebf6ca03dfb91891cd6e6d5eb51649b504c0b1..0632a36852c8476416f8fcb78d625abb98d658cf 100644 (file)
@@ -174,7 +174,7 @@ config SND_SOC_SMDK_WM8994_PCM
 
 config SND_SOC_SPEYSIDE
        tristate "Audio support for Wolfson Speyside"
-       depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410
+       depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C && SPI_MASTER
        select SND_SAMSUNG_I2S
        select SND_SOC_WM8996
        select SND_SOC_WM9081
@@ -189,7 +189,7 @@ config SND_SOC_TOBERMORY
 
 config SND_SOC_BELLS
        tristate "Audio support for Wolfson Bells"
-       depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA
+       depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA && I2C && SPI_MASTER
        select SND_SAMSUNG_I2S
        select SND_SOC_WM5102
        select SND_SOC_WM5110
@@ -206,7 +206,7 @@ config SND_SOC_LOWLAND
 
 config SND_SOC_LITTLEMILL
        tristate "Audio support for Wolfson Littlemill"
-       depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410
+       depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C
        select SND_SAMSUNG_I2S
        select MFD_WM8994
        select SND_SOC_WM8994
@@ -223,7 +223,7 @@ config SND_SOC_SNOW
 
 config SND_SOC_ODROIDX2
        tristate "Audio support for Odroid-X2 and Odroid-U3"
-       depends on SND_SOC_SAMSUNG
+       depends on SND_SOC_SAMSUNG && I2C
        select SND_SOC_MAX98090
        select SND_SAMSUNG_I2S
        help
@@ -231,6 +231,6 @@ config SND_SOC_ODROIDX2
 
 config SND_SOC_ARNDALE_RT5631_ALC5631
         tristate "Audio support for RT5631(ALC5631) on Arndale Board"
-        depends on SND_SOC_SAMSUNG
+        depends on SND_SOC_SAMSUNG && I2C
         select SND_SAMSUNG_I2S
         select SND_SOC_RT5631
index 1b53605f7154394d1746cb0a692953e296848c2a..110577c523179c3974a2ebe45ef577ea651aa5ce 100644 (file)
@@ -1252,6 +1252,8 @@ static int rsnd_probe(struct platform_device *pdev)
                        goto exit_snd_probe;
        }
 
+       dev_set_drvdata(dev, priv);
+
        /*
         *      asoc register
         */
@@ -1268,8 +1270,6 @@ static int rsnd_probe(struct platform_device *pdev)
                goto exit_snd_soc;
        }
 
-       dev_set_drvdata(dev, priv);
-
        pm_runtime_enable(dev);
 
        dev_info(dev, "probed\n");
index 30579ca5bacb985c7dfa9b9ad30f58cee45d6e1a..e5c990889dcc5e6d9a1ecca2682acf4384ab5ae5 100644 (file)
@@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&client_mutex);
+
        list_for_each_entry(codec, &codec_list, list) {
                len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
                               codec->component.name);
@@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
                }
        }
 
+       mutex_unlock(&client_mutex);
+
        if (ret >= 0)
                ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
@@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&client_mutex);
+
        list_for_each_entry(component, &component_list, list) {
                list_for_each_entry(dai, &component->dai_list, list) {
                        len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
@@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
                }
        }
 
+       mutex_unlock(&client_mutex);
+
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
        kfree(buf);
@@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&client_mutex);
+
        list_for_each_entry(platform, &platform_list, list) {
                len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
                               platform->component.name);
@@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file,
                }
        }
 
+       mutex_unlock(&client_mutex);
+
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
        kfree(buf);
@@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component(
 {
        struct snd_soc_component *component;
 
+       lockdep_assert_held(&client_mutex);
+
        list_for_each_entry(component, &component_list, list) {
                if (of_node) {
                        if (component->dev->of_node == of_node)
@@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai(
        struct snd_soc_component *component;
        struct snd_soc_dai *dai;
 
+       lockdep_assert_held(&client_mutex);
+
        /* Find CPU DAI from registered DAIs*/
        list_for_each_entry(component, &component_list, list) {
                if (dlc->of_node && component->dev->of_node != dlc->of_node)
@@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
        struct snd_soc_codec *codec;
        int ret, i, order;
 
+       mutex_lock(&client_mutex);
        mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
 
        /* bind DAIs */
@@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
        card->instantiated = 1;
        snd_soc_dapm_sync(&card->dapm);
        mutex_unlock(&card->mutex);
+       mutex_unlock(&client_mutex);
 
        return 0;
 
@@ -1680,6 +1698,7 @@ card_probe_error:
 
 base_error:
        mutex_unlock(&card->mutex);
+       mutex_unlock(&client_mutex);
 
        return ret;
 }
@@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
        list_del(&component->list);
 }
 
-static void snd_soc_component_del(struct snd_soc_component *component)
-{
-       mutex_lock(&client_mutex);
-       snd_soc_component_del_unlocked(component);
-       mutex_unlock(&client_mutex);
-}
-
 int snd_soc_register_component(struct device *dev,
                               const struct snd_soc_component_driver *cmpnt_drv,
                               struct snd_soc_dai_driver *dai_drv,
@@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev)
 {
        struct snd_soc_component *cmpnt;
 
+       mutex_lock(&client_mutex);
        list_for_each_entry(cmpnt, &component_list, list) {
                if (dev == cmpnt->dev && cmpnt->registered_as_component)
                        goto found;
        }
+       mutex_unlock(&client_mutex);
        return;
 
 found:
-       snd_soc_component_del(cmpnt);
+       snd_soc_component_del_unlocked(cmpnt);
+       mutex_unlock(&client_mutex);
        snd_soc_component_cleanup(cmpnt);
        kfree(cmpnt);
 }
@@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev)
 {
        struct snd_soc_platform *platform;
 
+       mutex_lock(&client_mutex);
        list_for_each_entry(platform, &platform_list, list) {
-               if (dev == platform->dev)
+               if (dev == platform->dev) {
+                       mutex_unlock(&client_mutex);
                        return platform;
+               }
        }
+       mutex_unlock(&client_mutex);
 
        return NULL;
 }
@@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev)
 {
        struct snd_soc_codec *codec;
 
+       mutex_lock(&client_mutex);
        list_for_each_entry(codec, &codec_list, list) {
                if (dev == codec->dev)
                        goto found;
        }
+       mutex_unlock(&client_mutex);
        return;
 
 found:
-
-       mutex_lock(&client_mutex);
        list_del(&codec->list);
        snd_soc_component_del_unlocked(&codec->component);
        mutex_unlock(&client_mutex);
index 03fed6611d9e83d489e2ab66d94320c84eed4c18..2ed260b10f6dc02cd129550ba1067c878034cb07 100644 (file)
@@ -303,6 +303,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
                return err;
        }
 
+       /* Don't check the sample rate for devices which we know don't
+        * support reading */
+       if (snd_usb_get_sample_rate_quirk(chip))
+               return 0;
+
        if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
                                   USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
                                   UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
index 99b63a7902f302f4a432425d7a6c3d23dbd571f7..81b7da8e56d39e352a5b629bbdf73aa460889b4d 100644 (file)
@@ -302,14 +302,17 @@ static void line6_data_received(struct urb *urb)
 /*
        Read data from device.
 */
-int line6_read_data(struct usb_line6 *line6, int address, void *data,
-                   size_t datalen)
+int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
+                   unsigned datalen)
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
        unsigned char len;
        unsigned count;
 
+       if (address > 0xffff || datalen > 0xff)
+               return -EINVAL;
+
        /* query the serial number: */
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -370,14 +373,17 @@ EXPORT_SYMBOL_GPL(line6_read_data);
 /*
        Write data to device.
 */
-int line6_write_data(struct usb_line6 *line6, int address, void *data,
-                    size_t datalen)
+int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
+                    unsigned datalen)
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
        unsigned char status;
        int count;
 
+       if (address > 0xffff || datalen > 0xffff)
+               return -EINVAL;
+
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
                              0x0022, address, data, datalen,
index 5d20294d64f43be80334bb0a902deb7ff7e63da0..7da643e79e3b50426c3cbaf4d46184d7283f3848 100644 (file)
@@ -147,8 +147,8 @@ struct usb_line6 {
 
 extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
                                      int code2, int size);
-extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
-                          size_t datalen);
+extern int line6_read_data(struct usb_line6 *line6, unsigned address,
+                          void *data, unsigned datalen);
 extern int line6_read_serial_number(struct usb_line6 *line6,
                                    u32 *serial_number);
 extern int line6_send_raw_message_async(struct usb_line6 *line6,
@@ -161,8 +161,8 @@ extern void line6_start_timer(struct timer_list *timer, unsigned long msecs,
                              void (*function)(unsigned long),
                              unsigned long data);
 extern int line6_version_request_async(struct usb_line6 *line6);
-extern int line6_write_data(struct usb_line6 *line6, int address, void *data,
-                           size_t datalen);
+extern int line6_write_data(struct usb_line6 *line6, unsigned address,
+                           void *data, unsigned datalen);
 
 int line6_probe(struct usb_interface *interface,
                const struct usb_device_id *id,
index 05dee690f4876167ace542fe500bb077bec26762..97ed593f6010f6dccf4e5eb5db39e4a2381a7692 100644 (file)
@@ -39,7 +39,7 @@ static void change_volume(struct urb *urb_out, int volume[],
                for (; p < buf_end; ++p) {
                        short pv = le16_to_cpu(*p);
                        int val = (pv * volume[chn & 1]) >> 8;
-                       pv = clamp(val, 0x7fff, -0x8000);
+                       pv = clamp(val, -0x8000, 0x7fff);
                        *p = cpu_to_le16(pv);
                        ++chn;
                }
@@ -54,7 +54,7 @@ static void change_volume(struct urb *urb_out, int volume[],
 
                        val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16);
                        val = (val * volume[chn & 1]) >> 8;
-                       val = clamp(val, 0x7fffff, -0x800000);
+                       val = clamp(val, -0x800000, 0x7fffff);
                        p[0] = val;
                        p[1] = val >> 8;
                        p[2] = val >> 16;
@@ -126,7 +126,7 @@ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal,
                        short pov = le16_to_cpu(*po);
                        short piv = le16_to_cpu(*pi);
                        int val = pov + ((piv * volume) >> 8);
-                       pov = clamp(val, 0x7fff, -0x8000);
+                       pov = clamp(val, -0x8000, 0x7fff);
                        *po = cpu_to_le16(pov);
                }
        }
index 67d476548dcf9094acd61e0ff74f0f320af1556a..07f984d5f5162809ee6124f651585df5c3a4496f 100644 (file)
@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       USB_DEVICE(0x0582, 0x0159),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               /* .vendor_name = "Roland", */
+               /* .product_name = "UA-22", */
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = & (const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 /* this catches most recent vendor-specific Roland devices */
 {
        .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
index a7398412310bd53e00b84c2aa6c567ac451a4e7a..753a47de8459b7a0b505e72d2f660793d9ede885 100644 (file)
@@ -1111,6 +1111,11 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        }
 }
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+{
+       /* MS Lifecam HD-5000 doesn't support reading the sample rate. */
+       return chip->usb_id == USB_ID(0x045E, 0x076D);
+}
 
 /* Marantz/Denon USB DACs need a vendor cmd to switch
  * between PCM and native DSD mode
@@ -1122,6 +1127,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
        int err;
 
        switch (subs->stream->chip->usb_id) {
+       case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
 
@@ -1201,6 +1207,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
 
                switch (le16_to_cpu(dev->descriptor.idProduct)) {
+               case 0x1003: /* Denon DA300-USB */
                case 0x3005: /* Marantz HD-DAC1 */
                case 0x3006: /* Marantz SA-14S1 */
                        mdelay(20);
@@ -1262,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 
        /* Denon/Marantz devices with USB DAC functionality */
        switch (chip->usb_id) {
+       case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
                if (fp->altsetting == 2)
index 1b862386577d036d262a2d760f4f3551337ac8be..2cd71ed1201f93ea8e6b54e1d487a84751e09885 100644 (file)
@@ -21,6 +21,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              struct audioformat *fmt);
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip);
+
 int snd_usb_is_big_endian_format(struct snd_usb_audio *chip,
                                 struct audioformat *fp);
 
index 6c14afe8c1b18ea1ab4a50aa4b6dc46925d304a1..db1d3a29d97fec67c47a4dd84eca2f7779ec1865 100644 (file)
@@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
        memcpy_t fn = r->fn.memcpy;
        int i;
 
-       memcpy_alloc_mem(&src, &dst, len);
+       memcpy_alloc_mem(&dst, &src, len);
 
        if (prefault)
                fn(dst, src, len);
@@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
        void *src = NULL, *dst = NULL;
        int i;
 
-       memcpy_alloc_mem(&src, &dst, len);
+       memcpy_alloc_mem(&dst, &src, len);
 
        if (prefault)
                fn(dst, src, len);
index ff95a68741d1ccdb54e54d929f2292e88963a5d1..ac8721ffa6c8c681ccdb563607d5b80917062438 100644 (file)
@@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64)
   endif
 endif
 
+ifeq ($(RAW_ARCH),sparc64)
+  ARCH ?= sparc
+endif
+
 ARCH ?= $(RAW_ARCH)
 
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
index 42ac05aaf8ac1a8bf004c1e664aa10a1853111bc..b32ff3372514da7da33da49cfc2d52755f1db935 100644 (file)
@@ -49,7 +49,7 @@ test-hello.bin:
        $(BUILD)
 
 test-pthread-attr-setaffinity-np.bin:
-       $(BUILD) -Werror -lpthread
+       $(BUILD) -D_GNU_SOURCE -Werror -lpthread
 
 test-stackprotector-all.bin:
        $(BUILD) -Werror -fstack-protector-all
index 0a0d3ecb4e8af81b77222c29f92efcbf4485313b..2b81b72eca23726a03b263fb21efa649c13c9495 100644 (file)
@@ -5,10 +5,11 @@ int main(void)
 {
        int ret = 0;
        pthread_attr_t thread_attr;
+       cpu_set_t cs;
 
        pthread_attr_init(&thread_attr);
        /* don't care abt exact args, just the API itself in libpthread */
-       ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
+       ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
 
        return ret;
 }
index 61bf9128e1f28ce40d3d694ef5d9de8bb9cda1f8..9d9db3b296dd6b50ed27e581c0d78aab65dfec49 100644 (file)
@@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp);
 
 static void ins__delete(struct ins_operands *ops)
 {
+       if (ops == NULL)
+               return;
        zfree(&ops->source.raw);
        zfree(&ops->source.name);
        zfree(&ops->target.raw);
index 47b78b3f03257385023629696dda95e57d2b5ab7..6da965bdbc2caf8762b5018c6837422a00aac456 100644 (file)
@@ -25,6 +25,10 @@ static int perf_flag_probe(void)
        if (cpu < 0)
                cpu = 0;
 
+       /*
+        * Using -1 for the pid is a workaround to avoid gratuitous jump label
+        * changes.
+        */
        while (1) {
                /* check cloexec flag */
                fd = sys_perf_event_open(&attr, pid, cpu, -1,
@@ -47,16 +51,24 @@ static int perf_flag_probe(void)
                  err, strerror_r(err, sbuf, sizeof(sbuf)));
 
        /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
-       fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+       while (1) {
+               fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+               if (fd < 0 && pid == -1 && errno == EACCES) {
+                       pid = 0;
+                       continue;
+               }
+               break;
+       }
        err = errno;
 
+       if (fd >= 0)
+               close(fd);
+
        if (WARN_ONCE(fd < 0 && err != EBUSY,
                      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
                      err, strerror_r(err, sbuf, sizeof(sbuf))))
                return -1;
 
-       close(fd);
-
        return 0;
 }
 
index c94a9e03ecf15744800d4a6bc68cca28ca70259e..e99a67632831a8e6548fae8a40b654f01b009d1e 100644 (file)
@@ -28,7 +28,7 @@ struct perf_mmap {
        int              mask;
        int              refcnt;
        unsigned int     prev;
-       char             event_copy[PERF_SAMPLE_MAX_SIZE];
+       char             event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
 };
 
 struct perf_evlist {
index b24f9d8727a894ccae13353abad0b951ec1b0916..33b7a2aef71322ab88b93e675c070bfc4a7c7da7 100644 (file)
 #include <symbol/kallsyms.h>
 #include "debug.h"
 
+#ifndef EM_AARCH64
+#define EM_AARCH64     183  /* ARM 64 bit */
+#endif
+
+
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
 extern char *cplus_demangle(const char *, int);
 
index 3ed7c0476d486d21dcb1c735478191ffa88c2fc7..2e2ba2efa0d9f97629ec5af9fb1136b3dee8962d 100644 (file)
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c
 
 $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
        $(ECHO) "  CC      " $@
-       $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@
+       $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
        $(QUIET) $(STRIPCMD) $@
 
 $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
index e238c9559caf9a7757d2d389e0bda57cd73229a8..8d5d1d2ee7c1d793405685e8266045363bcce960 100644 (file)
@@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp,
 #ifdef __NR_execveat
        return syscall(__NR_execveat, fd, path, argv, envp, flags);
 #else
-       errno = -ENOSYS;
+       errno = ENOSYS;
        return -1;
 #endif
 }
@@ -234,6 +234,14 @@ static int run_tests(void)
        int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC);
        int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC);
 
+       /* Check if we have execveat at all, and bail early if not */
+       errno = 0;
+       execveat_(-1, NULL, NULL, NULL, 0);
+       if (errno == ENOSYS) {
+               printf("[FAIL] ENOSYS calling execveat - no kernel support?\n");
+               return 1;
+       }
+
        /* Change file position to confirm it doesn't affect anything */
        lseek(fd, 10, SEEK_SET);
 
diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore
new file mode 100644 (file)
index 0000000..06e96be
--- /dev/null
@@ -0,0 +1 @@
+/tmon
index e775adcbd29fdd3c0fccb66a09efc86e1fd95216..0788621c8d760f01f714d377a18c6a390a990fde 100644 (file)
@@ -2,8 +2,8 @@ VERSION = 1.0
 
 BINDIR=usr/bin
 WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
-CFLAGS= -O1 ${WARNFLAGS} -fstack-protector
-CC=gcc
+CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector
+CC=$(CROSS_COMPILE)gcc
 
 CFLAGS+=-D VERSION=\"$(VERSION)\"
 LDFLAGS+=
@@ -16,12 +16,21 @@ INSTALL_CONFIGFILE=install -m 644 -p
 CONFIG_FILE=
 CONFIG_PATH=
 
+# Static builds might require -ltinfo, for instance
+ifneq ($(findstring -static, $(LDFLAGS)),)
+STATIC := --static
+endif
+
+TMON_LIBS=-lm -lpthread
+TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \
+                    pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \
+                    echo -lpanel -lncurses)
 
 OBJS = tmon.o tui.o sysfs.o pid.o
 OBJS +=
 
 tmon: $(OBJS) Makefile tmon.h
-       $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
+       $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS)  -o $(TARGET) $(TMON_LIBS)
 
 valgrind: tmon
         sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET)  1> /dev/null
index 0be727cb9892ccc50e412a8a8cdb28f3f2476c68..02d5179803aae1cdfa43ecf23e25bc8e1c9b9e43 100644 (file)
@@ -55,6 +55,8 @@ The \fB-l --log\fP option write data to /var/tmp/tmon.log
 .PP
 The \fB-t --time-interval\fP option sets the polling interval in seconds
 .PP
+The \fB-T --target-temp\fP option sets the initial target temperature
+.PP
 The \fB-v --version\fP option shows the version of \fBtmon \fP
 .PP
 The \fB-z --zone\fP option sets the target therma zone instance to be controlled
index 09b7c3218334ba29dad1192d5dbd5a963dd51553..9aa19652e8e859a34a5db1edbae0c02170499967 100644 (file)
@@ -64,6 +64,7 @@ void usage()
        printf("  -h, --help            show this help message\n");
        printf("  -l, --log             log data to /var/tmp/tmon.log\n");
        printf("  -t, --time-interval   sampling time interval, > 1 sec.\n");
+       printf("  -T, --target-temp     initial target temperature\n");
        printf("  -v, --version         show version\n");
        printf("  -z, --zone            target thermal zone id\n");
 
@@ -219,6 +220,7 @@ static struct option opts[] = {
        { "control", 1, NULL, 'c' },
        { "daemon", 0, NULL, 'd' },
        { "time-interval", 1, NULL, 't' },
+       { "target-temp", 1, NULL, 'T' },
        { "log", 0, NULL, 'l' },
        { "help", 0, NULL, 'h' },
        { "version", 0, NULL, 'v' },
@@ -231,7 +233,7 @@ int main(int argc, char **argv)
 {
        int err = 0;
        int id2 = 0, c;
-       double yk = 0.0; /* controller output */
+       double yk = 0.0, temp; /* controller output */
        int target_tz_index;
 
        if (geteuid() != 0) {
@@ -239,7 +241,7 @@ int main(int argc, char **argv)
                exit(EXIT_FAILURE);
        }
 
-       while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) {
+       while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) {
                switch (c) {
                case 'c':
                        no_control = 0;
@@ -254,6 +256,14 @@ int main(int argc, char **argv)
                        if (ticktime < 1)
                                ticktime = 1;
                        break;
+               case 'T':
+                       temp = strtod(optarg, NULL);
+                       if (temp < 0) {
+                               fprintf(stderr, "error: temperature must be positive\n");
+                               return 1;
+                       }
+                       target_temp_user = temp;
+                       break;
                case 'l':
                        printf("Logging data to /var/tmp/tmon.log\n");
                        logging = 1;
index 89f8ef0e15c810936737a51f49f1a87f1f2c5648..b5d1c6b22dd3c8781c7cadd590f2121dfb8328cc 100644 (file)
 
 #include "tmon.h"
 
+#define min(x, y) ({                           \
+       typeof(x) _min1 = (x);                  \
+       typeof(y) _min2 = (y);                  \
+       (void) (&_min1 == &_min2);              \
+       _min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({                           \
+       typeof(x) _max1 = (x);                  \
+       typeof(y) _max2 = (y);                  \
+       (void) (&_max1 == &_max2);              \
+       _max1 > _max2 ? _max1 : _max2; })
+
 static PANEL *data_panel;
 static PANEL *dialogue_panel;
 static PANEL *top;
@@ -98,6 +110,18 @@ void write_status_bar(int x, char *line)
        wrefresh(status_bar_window);
 }
 
+/* wrap at 5 */
+#define DIAG_DEV_ROWS  5
+/*
+ * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit
+ */
+static int diag_dev_rows(void)
+{
+       int entries = ptdata.nr_cooling_dev + 1;
+       int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2);
+       return min(rows, entries);
+}
+
 void setup_windows(void)
 {
        int y_begin = 1;
@@ -122,7 +146,7 @@ void setup_windows(void)
         * dialogue window is a pop-up, when needed it lays on top of cdev win
         */
 
-       dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50,
+       dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50,
                                DIAG_Y, DIAG_X);
 
        thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor *
@@ -258,21 +282,26 @@ void show_cooling_device(void)
 }
 
 const char DIAG_TITLE[] = "[ TUNABLES ]";
-#define DIAG_DEV_ROWS  5
 void show_dialogue(void)
 {
        int j, x = 0, y = 0;
+       int rows, cols;
        WINDOW *w = dialogue_window;
 
        if (tui_disabled || !w)
                return;
 
+       getmaxyx(w, rows, cols);
+
+       /* Silence compiler 'unused' warnings */
+       (void)cols;
+
        werase(w);
        box(w, 0, 0);
        mvwprintw(w, 0, maxx/4, DIAG_TITLE);
        /* list all the available tunables */
        for (j = 0; j <= ptdata.nr_cooling_dev; j++) {
-               y = j % DIAG_DEV_ROWS;
+               y = j % diag_dev_rows();
                if (y == 0 && j != 0)
                        x += 20;
                if (j == ptdata.nr_cooling_dev)
@@ -283,12 +312,10 @@ void show_dialogue(void)
                                ptdata.cdi[j].type, ptdata.cdi[j].instance);
        }
        wattron(w, A_BOLD);
-       mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?");
+       mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?");
        wattroff(w, A_BOLD);
-       /* y size of dialogue win is nr cdev + 5, so print legend
-        * at the bottom line
-        */
-       mvwprintw(w, ptdata.nr_cooling_dev+3, 1,
+       /* print legend at the bottom line */
+       mvwprintw(w, rows - 2, 1,
                "Legend: A=Active, P=Passive, C=Critical");
 
        wrefresh(dialogue_window);
@@ -437,7 +464,7 @@ static void handle_input_choice(int ch)
                        snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ",
                                ptdata.cdi[cdev_id].type,
                                ptdata.cdi[cdev_id].instance);
-               write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2);
+               write_dialogue_win(buf, diag_dev_rows() + 2, 2);
                handle_input_val(cdev_id);
        } else {
                snprintf(buf, sizeof(buf), "Invalid selection %d", ch);
index a0a7b5d1a0703a00f81c9421856c0fdf1b1f6aaa..f9b9c7c5137214cb56bcc0cf7455d2d712dc6848 100644 (file)
@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
 {
        if (!(lr_desc.state & LR_STATE_MASK))
                vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
+       else
+               vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
        return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
 }
 
+static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
+}
+
 static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
 {
        u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
        .sync_lr_elrsr          = vgic_v2_sync_lr_elrsr,
        .get_elrsr              = vgic_v2_get_elrsr,
        .get_eisr               = vgic_v2_get_eisr,
+       .clear_eisr             = vgic_v2_clear_eisr,
        .get_interrupt_status   = vgic_v2_get_interrupt_status,
        .enable_underflow       = vgic_v2_enable_underflow,
        .disable_underflow      = vgic_v2_disable_underflow,
index 3a62d8a9a2c6fce0cc2f94a6e558115785c03811..dff06021e74855a2d6cb9b8830fd30818a63c927 100644 (file)
@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
 {
        if (!(lr_desc.state & LR_STATE_MASK))
                vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
+       else
+               vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
 }
 
 static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
        return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
 }
 
+static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
+}
+
 static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
 {
        u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = {
        .sync_lr_elrsr          = vgic_v3_sync_lr_elrsr,
        .get_elrsr              = vgic_v3_get_elrsr,
        .get_eisr               = vgic_v3_get_eisr,
+       .clear_eisr             = vgic_v3_clear_eisr,
        .get_interrupt_status   = vgic_v3_get_interrupt_status,
        .enable_underflow       = vgic_v3_enable_underflow,
        .disable_underflow      = vgic_v3_disable_underflow,
index 0cc6ab6005a07bb2e9453106a672e9efd4e237a6..c9f60f52458802f4a66a3912732d8665bc4a3e32 100644 (file)
@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
        return vgic_ops->get_eisr(vcpu);
 }
 
+static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
+{
+       vgic_ops->clear_eisr(vcpu);
+}
+
 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
 {
        return vgic_ops->get_interrupt_status(vcpu);
@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
        vgic_set_lr(vcpu, lr_nr, vlr);
        clear_bit(lr_nr, vgic_cpu->lr_used);
        vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
+       vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
 }
 
 /*
@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
                        BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
                        vlr.state |= LR_STATE_PENDING;
                        vgic_set_lr(vcpu, lr, vlr);
+                       vgic_sync_lr_elrsr(vcpu, lr, vlr);
                        return true;
                }
        }
@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
                vlr.state |= LR_EOI_INT;
 
        vgic_set_lr(vcpu, lr, vlr);
+       vgic_sync_lr_elrsr(vcpu, lr, vlr);
 
        return true;
 }
@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
        if (status & INT_STATUS_UNDERFLOW)
                vgic_disable_underflow(vcpu);
 
+       /*
+        * In the next iterations of the vcpu loop, if we sync the vgic state
+        * after flushing it, but before entering the guest (this happens for
+        * pending signals and vmid rollovers), then make sure we don't pick
+        * up any old maintenance interrupts here.
+        */
+       vgic_clear_eisr(vcpu);
+
        return level_pending;
 }
 
@@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
         * emulation. So check this here again. KVM_CREATE_DEVICE does
         * the proper checks already.
         */
-       if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2)
-               return -ENODEV;
+       if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
+               ret = -ENODEV;
+               goto out;
+       }
 
        /*
         * Any time a vcpu is run, vcpu_load is called which tries to grab the
index a1093700f3a41b84fc71591a7465940fdd4a017f..a2214d9609bda59c8e48e841823da4f459081d52 100644 (file)
@@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
        case KVM_CAP_SIGNAL_MSI:
 #endif
 #ifdef CONFIG_HAVE_KVM_IRQFD
+       case KVM_CAP_IRQFD:
        case KVM_CAP_IRQFD_RESAMPLE:
 #endif
        case KVM_CAP_CHECK_EXTENSION_VM: