Merge branch 'gpio-irq-validmask' of /home/linus/linux-pinctrl into devel
authorLinus Walleij <linus.walleij@linaro.org>
Fri, 23 Sep 2016 12:51:18 +0000 (14:51 +0200)
committerLinus Walleij <linus.walleij@linaro.org>
Fri, 23 Sep 2016 12:51:18 +0000 (14:51 +0200)
345 files changed:
Documentation/block/queue-sysfs.txt
Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-aspeed.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-axp209.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-tps65086.txt [deleted file]
Documentation/devicetree/bindings/gpio/gpio-ts4900.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
Documentation/devicetree/bindings/mfd/stmpe.txt
Documentation/gpio/board.txt
Documentation/gpio/gpio-legacy.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/dts/arm-realview-pbx-a9.dts
arch/arm/boot/dts/integratorap.dts
arch/arm/boot/dts/integratorcp.dts
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/configs/aspeed_g4_defconfig
arch/arm/configs/aspeed_g5_defconfig
arch/arm/include/asm/uaccess.h
arch/arm/kernel/sys_oabi-compat.c
arch/arm/kvm/arm.c
arch/arm/mach-clps711x/Kconfig
arch/arm/mach-mvebu/Makefile
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-oxnas/Kconfig
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/spitz.c
arch/arm/mach-realview/Makefile
arch/arm/mach-s5pv210/Makefile
arch/arm/mach-shmobile/platsmp.c
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/exynos/exynos7-espresso.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/kprobes.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/smp.c
arch/arm64/mm/fault.c
arch/blackfin/Kconfig
arch/h8300/include/asm/io.h
arch/ia64/Kconfig
arch/ia64/include/asm/uaccess.h
arch/m68k/kernel/signal.c
arch/metag/mm/init.c
arch/mips/kvm/emulate.c
arch/mips/kvm/mmu.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/crypto/crc32c-vpmsum_glue.c
arch/powerpc/include/asm/cpuidle.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/include/asm/xics.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/Makefile
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/lib/checksum_32.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/pasemi/iommu.c
arch/powerpc/platforms/powernv/opal-irqchip.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/xics/Kconfig
arch/powerpc/sysdev/xics/ics-opal.c
arch/powerpc/sysdev/xics/ics-rtas.c
arch/powerpc/sysdev/xics/xics-common.c
arch/s390/Kconfig
arch/s390/kvm/kvm-s390.c
arch/s390/lib/uaccess.c
arch/sparc/Kconfig
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/unicore32/include/asm/mmu_context.h
arch/x86/Kconfig
arch/x86/entry/Makefile
arch/x86/entry/entry_64.S
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/init.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/realmode.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uv/bios.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq.c
arch/x86/kernel/setup.c
arch/x86/kernel/tsc.c
arch/x86/kernel/uprobes.c
arch/x86/lib/hweight.S
arch/x86/lib/kaslr.c
arch/x86/mm/ident_map.c
arch/x86/mm/init.c
arch/x86/mm/kaslr.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/uv/bios_uv.c
arch/x86/power/hibernate_64.c
arch/x86/realmode/init.c
drivers/acpi/nfit/core.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/clocksource/arm_arch_timer.c
drivers/cpufreq/powernv-cpufreq.c
drivers/firmware/efi/capsule-loader.c
drivers/firmware/efi/capsule.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-altera.c
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-aspeed.c [new file with mode: 0644]
drivers/gpio/gpio-ath79.c
drivers/gpio/gpio-axp209.c [new file with mode: 0644]
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-da9055.c
drivers/gpio/gpio-f7188x.c
drivers/gpio/gpio-gpio-mm.c [new file with mode: 0644]
drivers/gpio/gpio-iop.c
drivers/gpio/gpio-it87.c
drivers/gpio/gpio-loongson1.c
drivers/gpio/gpio-lp873x.c [new file with mode: 0644]
drivers/gpio/gpio-lpc18xx.c
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpio-mmio.c
drivers/gpio/gpio-msic.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-palmas.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pisosr.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-spear-spics.c
drivers/gpio/gpio-stmpe.c
drivers/gpio/gpio-sx150x.c
drivers/gpio/gpio-tc3589x.c
drivers/gpio/gpio-tpic2810.c
drivers/gpio/gpio-tps65086.c
drivers/gpio/gpio-tps65218.c
drivers/gpio/gpio-tps65912.c
drivers/gpio/gpio-ts4800.c
drivers/gpio/gpio-ts4900.c [new file with mode: 0644]
drivers/gpio/gpio-twl4030.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpio-wcove.c [new file with mode: 0644]
drivers/gpio/gpio-wm831x.c
drivers/gpio/gpio-wm8350.c
drivers/gpio/gpio-wm8994.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/lp873x.c [new file with mode: 0644]
drivers/mfd/stmpe-i2c.c
drivers/mfd/stmpe.c
drivers/mfd/stmpe.h
drivers/misc/Makefile
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/native.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/vphb.c
drivers/misc/lkdtm_usercopy.c
drivers/nvdimm/btt.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/nd.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/pci/msi.c
drivers/perf/arm_pmu.c
drivers/platform/x86/dell-wmi.c
drivers/rapidio/rio_cm.c
drivers/regulator/Kconfig
drivers/s390/virtio/Makefile
drivers/s390/virtio/kvm_virtio.c
drivers/scsi/ipr.c
drivers/thermal/clock_cooling.c
drivers/thermal/fair_share.c
drivers/thermal/gov_bang_bang.c
drivers/thermal/intel_pch_thermal.c
drivers/thermal/intel_powerclamp.c
drivers/thermal/power_allocator.c
drivers/thermal/step_wise.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_hwmon.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vhost/vsock.c
drivers/virtio/virtio_ring.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/ceph/caps.c
fs/ceph/mds_client.c
fs/fs-writeback.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4state.c
fs/nfsd/vfs.c
fs/pipe.c
fs/proc/meminfo.c
include/asm-generic/qrwlock.h
include/drm/ttm/ttm_bo_driver.h
include/linux/bvec.h
include/linux/gpio/driver.h
include/linux/kvm_host.h
include/linux/mfd/lp873x.h [new file with mode: 0644]
include/linux/mfd/stmpe.h
include/linux/mmzone.h
include/linux/msi.h
include/linux/perf_event.h
include/linux/platform_data/gpio-lpc32xx.h [deleted file]
include/linux/printk.h
include/linux/slab.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/xprt.h
include/linux/thread_info.h
include/linux/uaccess.h
include/trace/events/timer.h
include/uapi/linux/virtio_vsock.h
include/uapi/misc/cxl.h
init/Kconfig
kernel/events/core.c
kernel/futex.c
kernel/irq/msi.c
kernel/locking/qspinlock_paravirt.h
kernel/locking/qspinlock_stat.h
kernel/power/hibernate.c
kernel/printk/internal.h
kernel/printk/nmi.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/time/timer.c
lib/strncpy_from_user.c
lib/strnlen_user.c
mm/Makefile
mm/hugetlb.c
mm/kasan/quarantine.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/oom_kill.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slub.c
mm/usercopy.c [new file with mode: 0644]
net/9p/trans_virtio.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/string_table.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
scripts/Kbuild.include
scripts/Makefile.gcc-plugins
scripts/gcc-plugin.sh
scripts/gcc-plugins/Makefile
scripts/get_maintainer.pl
security/Kconfig
sound/pci/hda/hda_intel.c
sound/usb/quirks.c
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/required-features.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/include/uapi/linux/bpf.h
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-script.txt
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-finder.c
tools/perf/util/sort.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/powerpc/Makefile
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/kvm_main.c

index d515d58962b9df66b39bdcf0fc6f981090ed4630..2a3904030dea5d287f6e1dc2f8f93461707ae018 100644 (file)
@@ -14,6 +14,12 @@ add_random (RW)
 This file allows to turn off the disk entropy contribution. Default
 value of this file is '1'(on).
 
+dax (RO)
+--------
+This file indicates whether the device supports Direct Access (DAX),
+used by CPU-addressable storage to bypass the pagecache.  It shows '1'
+if true, '0' if not.
+
 discard_granularity (RO)
 -----------------------
 This shows the size of internal allocation of the device in bytes, if
@@ -46,6 +52,12 @@ hw_sector_size (RO)
 -------------------
 This is the hardware sector size of the device, in bytes.
 
+io_poll (RW)
+------------
+When read, this file shows the total number of block IO polls and how
+many returned success.  Writing '0' to this file will disable polling
+for this device.  Writing any non-zero value will enable this feature.
+
 iostats (RW)
 -------------
 This file is used to control (on/off) the iostats accounting of the
@@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the
 setting from "write back" to "write through", since that will also
 eliminate cache flushes issued by the kernel.
 
+write_same_max_bytes (RO)
+-------------------------
+This is the number of bytes the device can write in a single write-same
+command.  A value of '0' means write-same is not supported by this
+device.
+
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,bcm6345-gpio.txt
new file mode 100644 (file)
index 0000000..e785314
--- /dev/null
@@ -0,0 +1,46 @@
+Bindings for the Broadcom's brcm,bcm6345-gpio memory-mapped GPIO controllers.
+
+These bindings can be used on any BCM63xx SoC. However, BCM6338 and BCM6345
+are the only ones which don't need a pinctrl driver.
+BCM6338 have 8-bit data and dirout registers, where GPIO state can be read
+and/or written, and the direction changed from input to output.
+BCM6345 have 16-bit data and dirout registers, where GPIO state can be read
+and/or written, and the direction changed from input to output.
+
+Required properties:
+       - compatible: should be "brcm,bcm6345-gpio"
+       - reg-names: must contain
+               "dat" - data register
+               "dirout" - direction (output) register
+       - reg: address + size pairs describing the GPIO register sets;
+               order must correspond with the order of entries in reg-names
+       - #gpio-cells: must be set to 2. The first cell is the pin number and
+                       the second cell is used to specify the gpio polarity:
+                       0 = active high
+                       1 = active low
+       - gpio-controller: Marks the device node as a gpio controller.
+
+Optional properties:
+       - native-endian: use native endian memory.
+
+Examples:
+       - BCM6338:
+       gpio: gpio-controller@fffe0407 {
+               compatible = "brcm,bcm6345-gpio";
+               reg-names = "dirout", "dat";
+               reg = <0xfffe0407 1>, <0xfffe040f 1>;
+
+               #gpio-cells = <2>;
+               gpio-controller;
+       };
+
+       - BCM6345:
+       gpio: gpio-controller@fffe0406 {
+               compatible = "brcm,bcm6345-gpio";
+               reg-names = "dirout", "dat";
+               reg = <0xfffe0406 2>, <0xfffe040a 2>;
+               native-endian;
+
+               #gpio-cells = <2>;
+               gpio-controller;
+       };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
new file mode 100644 (file)
index 0000000..393bb2e
--- /dev/null
@@ -0,0 +1,36 @@
+Aspeed GPIO controller Device Tree Bindings
+-------------------------------------------
+
+Required properties:
+- compatible           : Either "aspeed,ast2400-gpio" or "aspeed,ast2500-gpio"
+
+- #gpio-cells          : Should be two
+                         - First cell is the GPIO line number
+                         - Second cell is used to specify optional
+                           parameters (unused)
+
+- reg                  : Address and length of the register set for the device
+- gpio-controller      : Marks the device node as a GPIO controller.
+- interrupts           : Interrupt specifier (see interrupt bindings for
+                         details)
+- interrupt-controller : Mark the GPIO controller as an interrupt-controller
+
+Optional properties:
+
+- interrupt-parent     : The parent interrupt controller, optional if inherited
+
+The gpio and interrupt properties are further described in their respective
+bindings documentation:
+
+- Documentation/devicetree/bindings/gpio/gpio.txt
+- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+
+  Example:
+       gpio@1e780000 {
+               #gpio-cells = <2>;
+               compatible = "aspeed,ast2400-gpio";
+               gpio-controller;
+               interrupts = <20>;
+               reg = <0x1e780000 0x1000>;
+               interrupt-controller;
+       };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-axp209.txt b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
new file mode 100644 (file)
index 0000000..a661130
--- /dev/null
@@ -0,0 +1,30 @@
+AXP209 GPIO controller
+
+This driver follows the usual GPIO bindings found in
+Documentation/devicetree/bindings/gpio/gpio.txt
+
+Required properties:
+- compatible: Should be "x-powers,axp209-gpio"
+- #gpio-cells: Should be two. The first cell is the pin number and the
+  second is the GPIO flags.
+- gpio-controller: Marks the device node as a GPIO controller.
+
+This node must be a subnode of the axp20x PMIC, documented in
+Documentation/devicetree/bindings/mfd/axp20x.txt
+
+Example:
+
+axp209: pmic@34 {
+       compatible = "x-powers,axp209";
+       reg = <0x34>;
+       interrupt-parent = <&nmi_intc>;
+       interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+       interrupt-controller;
+       #interrupt-cells = <1>;
+
+       axp_gpio: gpio {
+               compatible = "x-powers,axp209-gpio";
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt
new file mode 100644 (file)
index 0000000..1afc2de
--- /dev/null
@@ -0,0 +1,16 @@
+TPIC2810 GPIO controller bindings
+
+Required properties:
+ - compatible          : Should be "ti,tpic2810".
+ - reg                 : The I2C address of the device
+ - gpio-controller     : Marks the device node as a GPIO controller.
+ - #gpio-cells         : Should be two. For consumer use see gpio.txt.
+
+Example:
+
+       gpio@60 {
+               compatible = "ti,tpic2810";
+               reg = <0x60>;
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tps65086.txt b/Documentation/devicetree/bindings/gpio/gpio-tps65086.txt
deleted file mode 100644 (file)
index ba05107..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-* TPS65086 GPO Controller bindings
-
-Required properties:
- - compatible          : Should be "ti,tps65086-gpio".
- - gpio-controller     : Marks the device node as a GPIO Controller.
- - #gpio-cells         : Should be two. The first cell is the pin number
-                           and the second cell is used to specify flags.
-                           See ../gpio/gpio.txt for possible values.
-
-Example:
-
-       gpio4: gpio {
-               compatible = "ti,tps65086-gpio";
-               gpio-controller;
-               #gpio-cells = <2>;
-       };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-ts4900.txt b/Documentation/devicetree/bindings/gpio/gpio-ts4900.txt
new file mode 100644 (file)
index 0000000..3f8e71b
--- /dev/null
@@ -0,0 +1,30 @@
+* Technologic Systems I2C-FPGA's GPIO controller bindings
+
+This bindings describes the GPIO controller for Technologic's FPGA core.
+TS-4900's FPGA encodes the GPIO state on 3 bits, whereas the TS-7970's FPGA
+uses 2 bits: it doesn't use a dedicated input bit.
+
+Required properties:
+- compatible: Should be one of the following
+               "technologic,ts4900-gpio"
+               "technologic,ts7970-gpio"
+- reg: Physical base address of the controller and length
+       of memory mapped region.
+- #gpio-cells: Should be two. The first cell is the pin number.
+- gpio-controller: Marks the device node as a gpio controller.
+
+Optional property:
+- ngpios: Number of GPIOs this controller is instantiated with,
+  the default is 32. See gpio.txt for more details.
+
+Example:
+
+&i2c2 {
+       gpio8: gpio@28 {
+               compatible = "technologic,ts4900-gpio";
+               reg = <0x28>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               ngpios = <32>;
+       };
+};
index 98d1983969566a0278b4544a5a24cef3c14fe85c..c3d016532d8e69b81fbb834057c5cd4976f430ba 100644 (file)
@@ -44,26 +44,3 @@ Example for a PXA3xx platform:
                interrupt-controller;
                #interrupt-cells = <0x2>;
        };
-
-* Marvell Orion GPIO Controller
-
-Required properties:
-- compatible         : Should be "marvell,orion-gpio"
-- reg                : Address and length of the register set for controller.
-- gpio-controller    : So we know this is a gpio controller.
-- ngpio              : How many gpios this controller has.
-- interrupts        : Up to 4 Interrupts for the controller.
-
-Optional properties:
-- mask-offset        : For SMP Orions, offset for Nth CPU
-
-Example:
-
-               gpio0: gpio@10100 {
-                       compatible = "marvell,orion-gpio";
-                       #gpio-cells = <2>;
-                       gpio-controller;
-                       reg = <0x10100 0x40>;
-                       ngpio = <32>;
-                       interrupts = <35>, <36>, <37>, <38>;
-               };
index 8da26b35b5c3c8857a6ebb4dec71bf32e83d8b57..7c1ab3b3254f44f51b10e0d9ffeaba6fbd1af006 100644 (file)
@@ -11,6 +11,7 @@ Required Properties:
     - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
     - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
     - "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
+    - "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
     - "renesas,gpio-rcar": for generic R-Car GPIO controller.
 
   - reg: Base address and length of each memory resource used by the GPIO
index 3fb68bfefc8b30eb149d8ddf36d7b0eadccb1ded..f9065a5781a22e7b7bba02e4194ecf551f453d15 100644 (file)
@@ -4,7 +4,7 @@ STMPE is an MFD device which may expose the following inbuilt devices: gpio,
 keypad, touchscreen, adc, pwm, rotator.
 
 Required properties:
- - compatible                   : "st,stmpe[610|801|811|1601|2401|2403]"
+ - compatible                   : "st,stmpe[610|801|811|1600|1601|2401|2403]"
  - reg                          : I2C/SPI address of the device
 
 Optional properties:
index 86d3fa95fd12828f5110d4ae19e6721740af8d85..40884c4fe40c5f85c4f64e096b7469141652a8cd 100644 (file)
@@ -8,9 +8,9 @@ gpio-legacy.txt (actually, there is no real mapping possible with the old
 interface; you just fetch an integer from somewhere and request the
 corresponding GPIO.
 
-Platforms that make use of GPIOs must select ARCH_REQUIRE_GPIOLIB (if GPIO usage
-is mandatory) or ARCH_WANT_OPTIONAL_GPIOLIB (if GPIO support can be omitted) in
-their Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
+All platforms can enable the GPIO library, but if the platform strictly
+requires GPIO functionality to be present, it needs to select GPIOLIB from its
+Kconfig. Then, how GPIOs are mapped depends on what the platform uses to
 describe its hardware layout. Currently, mappings can be defined through device
 tree, ACPI, and platform data.
 
index 79ab5648d69b3e39a198c108c739eb1a08c20b06..b34fd94f70898a7f65c2a0313349588411eb8e81 100644 (file)
@@ -72,8 +72,8 @@ in this document, but drivers acting as clients to the GPIO interface must
 not care how it's implemented.)
 
 That said, if the convention is supported on their platform, drivers should
-use it when possible.  Platforms must select ARCH_REQUIRE_GPIOLIB or
-ARCH_WANT_OPTIONAL_GPIOLIB in their Kconfig.  Drivers that can't work without
+use it when possible.  Platforms must select GPIOLIB if GPIO functionality
+is strictly required.  Drivers that can't work without
 standard GPIO calls should have Kconfig entries which depend on GPIOLIB.  The
 GPIO calls are available, either as "real code" or as optimized-away stubs,
 when drivers use the include file:
@@ -553,22 +553,14 @@ either NULL or the label associated with that GPIO when it was requested.
 
 Platform Support
 ----------------
-To support this framework, a platform's Kconfig will "select" either
-ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
-and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
-three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
+To force-enable this framework, a platform's Kconfig will "select" GPIOLIB,
+else it is up to the user to configure support for GPIO.
 
 It may also provide a custom value for ARCH_NR_GPIOS, so that it better
 reflects the number of GPIOs in actual use on that platform, without
 wasting static table space.  (It should count both built-in/SoC GPIOs and
 also ones on GPIO expanders.
 
-ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
-into the kernel on that architecture.
-
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
-can enable it and build it into the kernel optionally.
-
 If neither of these options are selected, the platform does not support
 GPIOs through GPIO-lib and the code cannot be enabled by the user.
 
index 20bb1d00098c70dacad7a9c778087f9319b0c5c6..5d0f88e0e8e3b6bc2310585214318cae0df04be9 100644 (file)
@@ -1004,6 +1004,7 @@ N:        meson
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
 M:     Antoine Tenart <antoine.tenart@free-electrons.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-alpine/
 F:     arch/arm/boot/dts/alpine*
@@ -3768,6 +3769,12 @@ F:       include/linux/regulator/da9211.h
 F:     include/sound/da[79]*.h
 F:     sound/soc/codecs/da[79]*.[ch]
 
+DIAMOND SYSTEMS GPIO-MM GPIO DRIVER
+M:     William Breathitt Gray <vilhelm.gray@gmail.com>
+L:     linux-gpio@vger.kernel.org
+S:     Maintained
+F:     drivers/gpio/gpio-gpio-mm.c
+
 DIGI NEO AND CLASSIC PCI PRODUCTS
 M:     Lidza Louina <lidza.louina@gmail.com>
 M:     Mark Hounschell <markh@compro.net>
index 70de1448c5717b6914bc6626fe1663e4d52c1e95..5c18baad72182db719ec0ba53182fe173fb7b690 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -635,13 +635,6 @@ endif
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
-PHONY += gcc-plugins
-gcc-plugins: scripts_basic
-ifdef CONFIG_GCC_PLUGINS
-       $(Q)$(MAKE) $(build)=scripts/gcc-plugins
-endif
-       @:
-
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
index bd8056b5b246058a8c06190811f875a00f30df8d..e9c9334507ddd57f2fd787f2faa3dac71edc18a7 100644 (file)
@@ -461,6 +461,15 @@ config CC_STACKPROTECTOR_STRONG
 
 endchoice
 
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+       bool
+       help
+         An architecture should select this if it can walk the kernel stack
+         frames to determine if an object is part of either the arguments
+         or local variables (i.e. that it excludes saved return addresses,
+         and similar) by implementing an inline arch_within_stack_frames(),
+         which is used by CONFIG_HARDENED_USERCOPY.
+
 config HAVE_CONTEXT_TRACKING
        bool
        help
index 2d601d769a1cdddae7bbba2bb22571731e3d4f5e..a9c4e48bb7ec997bec394066914d26f337a2fec4 100644 (file)
@@ -35,6 +35,7 @@ config ARM
        select HARDIRQS_SW_RESEND
        select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
index 56ea5c60b31883bdf52ca01c4748061c4ed39871..61f6ccc19cfa94364e777cc68d10ce5a24093c0f 100644 (file)
@@ -260,12 +260,14 @@ machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y))
 platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
 
 ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
+ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
 ifeq ($(KBUILD_SRC),)
 KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
 else
 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
 endif
 endif
+endif
 
 export TEXT_OFFSET GZFLAGS MMUEXT
 
index db808f92dd79c975b1f059d824e263080208d20b..90d00b407f851dfce2706d8e85bb436ac39a3921 100644 (file)
                 * associativity as these may be erroneously set
                 * up by boot loader(s).
                 */
-               cache-size = <1048576>; // 1MB
-               cache-sets = <4096>;
+               cache-size = <131072>; // 128KB
+               cache-sets = <512>;
                cache-line-size = <32>;
                arm,parity-disable;
-               arm,tag-latency = <1>;
-               arm,data-latency = <1 1>;
-               arm,dirty-latency = <1>;
+               arm,tag-latency = <1 1 1>;
+               arm,data-latency = <1 1 1>;
        };
 
        scu: scu@1f000000 {
index cf06e32ee108a221c330c8ff521e0a82d8b78e4d..4b34b54e09a193ebd93ef82566d5c012274241d5 100644 (file)
@@ -42,7 +42,7 @@
        };
 
        syscon {
-               compatible = "arm,integrator-ap-syscon";
+               compatible = "arm,integrator-ap-syscon", "syscon";
                reg = <0x11000000 0x100>;
                interrupt-parent = <&pic>;
                /* These are the logical module IRQs */
index d43f15b4f79a242d2437f6ea45626556380c010c..79430fbfec3bd17ec311625e67695aabee3e0726 100644 (file)
@@ -94,7 +94,7 @@
        };
 
        syscon {
-               compatible = "arm,integrator-cp-syscon";
+               compatible = "arm,integrator-cp-syscon", "syscon";
                reg = <0xcb000000 0x100>;
        };
 
index 00cb314d5e4db81fcc035b2a68101a557cf751a4..e23f46d15c806566abc2ec88828bc8d053ffd6e5 100644 (file)
                cpu_on          = <0x84000003>;
        };
 
-       psci {
-               compatible      = "arm,psci";
-               method          = "smc";
-               cpu_suspend     = <0x84000001>;
-               cpu_off         = <0x84000002>;
-               cpu_on          = <0x84000003>;
-       };
-
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
index e52b82449a79528bc362d96fabe7b2d688abd78e..6403e0de540e842b952b3bb02b5673bbbfb3e683 100644 (file)
         *   Pin 41: BR_UART1_TXD
         *   Pin 44: BR_UART1_RXD
         */
-       serial@70006000 {
+       serial@0,70006000 {
                compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
                status = "okay";
        };
         *   Pin 71: UART2_CTS_L
         *   Pin 74: UART2_RTS_L
         */
-       serial@70006040 {
+       serial@0,70006040 {
                compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
                status = "okay";
        };
index b6e54ee9bdbd8e54a4c740d0d2308dd26f682b42..ca39c04fec6b7af28847b78b6b3ff36a75811b31 100644 (file)
@@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_FIRMWARE_MEMMAP=y
 CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_PAGE_POISONING=y
index 89260516735720460b4002e462ae5d3e5811125b..4f366b0370e939a27056f0230b49140ddee85503 100644 (file)
@@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_FIRMWARE_MEMMAP=y
 CONFIG_FANOTIFY=y
-CONFIG_PRINTK_TIME=1
+CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_PAGE_POISONING=y
index 62a6f65029e61aebf9b64e1df59fb9383f4064a9..a93c0f99acf7767c680158cf96acef87d1f0da51 100644 (file)
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(to, n, false);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_from_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(from, n, true);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_to_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
 #else
+       check_object_size(from, n, true);
        return arm_copy_to_user(to, from, n);
 #endif
 }
index 087acb569b63a4bd90982e0c9b15fc2313636c53..5f221acd21aebb3ca1c2ee560fb68241bc1e02c9 100644 (file)
@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
        mm_segment_t fs;
        long ret, err, i;
 
-       if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+       if (maxevents <= 0 ||
+                       maxevents > (INT_MAX/sizeof(*kbuf)) ||
+                       maxevents > (INT_MAX/sizeof(*events)))
                return -EINVAL;
+       if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
+               return -EFAULT;
        kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
        if (!kbuf)
                return -ENOMEM;
@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
 
        if (nsops < 1 || nsops > SEMOPM)
                return -EINVAL;
+       if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
+               return -EFAULT;
        sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
        if (!sops)
                return -ENOMEM;
index d94bb9093ead7d10641aecfaed668d0c3ee3fa39..75f130ef650413036625c59f34ab864c1649052e 100644 (file)
@@ -1009,9 +1009,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
 
        switch (ioctl) {
        case KVM_CREATE_IRQCHIP: {
+               int ret;
                if (!vgic_present)
                        return -ENXIO;
-               return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+               mutex_lock(&kvm->lock);
+               ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+               mutex_unlock(&kvm->lock);
+               return ret;
        }
        case KVM_ARM_SET_DEVICE_ADDR: {
                struct kvm_arm_device_addr dev_addr;
index dc7c6edeab39a89d24f8211d4d11ace5494bafee..61284b9389cf5e41de92215b3fa45ee9c2bc79df 100644 (file)
@@ -1,13 +1,13 @@
 menuconfig ARCH_CLPS711X
        bool "Cirrus Logic EP721x/EP731x-based"
        depends on ARCH_MULTI_V4T
-       select ARCH_REQUIRE_GPIOLIB
        select AUTO_ZRELADDR
        select CLKSRC_OF
        select CLPS711X_TIMER
        select COMMON_CLK
        select CPU_ARM720T
        select GENERIC_CLOCKEVENTS
+       select GPIOLIB
        select MFD_SYSCON
        select OF_IRQ
        select USE_OF
index e53c6cfcab51cd12c798fd11d663686c77761b02..6c6497e80a7b13433d833923d8c5003c52039d9a 100644 (file)
@@ -1,5 +1,4 @@
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-       -I$(srctree)/arch/arm/plat-orion/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include
 
 AFLAGS_coherency_ll.o          := -Wa,-march=armv7-a
 CFLAGS_pmsu.o                  := -march=armv7-a
index a5ab712c1a59785db07431f1089899ea04bf6053..6d3af43ae3e4a7a62ba499fb057a646c4badfd12 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/power/isp1704_charger.h>
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <linux/platform_data/mtd-onenand-omap2.h>
+#include <linux/module.h>
 
 #include <plat/dmtimer.h>
 
index 567496bd250a2fc5c8037ed203dc71d203521be0..29100beb2e7f201403ce9bc84c78dab1aea818eb 100644 (file)
@@ -11,11 +11,13 @@ if ARCH_OXNAS
 
 config MACH_OX810SE
        bool "Support OX810SE Based Products"
+       select ARCH_HAS_RESET_CONTROLLER
        select COMMON_CLK_OXNAS
        select CPU_ARM926T
        select MFD_SYSCON
        select OXNAS_RPS_TIMER
        select PINCTRL_OXNAS
+       select RESET_CONTROLLER
        select RESET_OXNAS
        select VERSATILE_FPGA_IRQ
        help
index dc109dc3a622834bcca135322672e754cc1db488..10bfdb169366b0a7b4e6403cd3fd5242a940f3e8 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>      /* symbol_get ; symbol_put */
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/major.h>
index 1080580b1343d1bdf164230bff82d6fed2e8b0d5..2c150bfc0cd5128dcc252e63ca485076e9469ba9 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>      /* symbol_get ; symbol_put */
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/gpio_keys.h>
index dae8d86ef4ccc75c4bb3dfbb98aba9e7e12eec9f..4048821309566281d61e5dd478f2db7c921b9491 100644 (file)
@@ -1,8 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-       -I$(srctree)/arch/arm/plat-versatile/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include
 
 obj-y                                  := core.o
 obj-$(CONFIG_REALVIEW_DT)              += realview-dt.o
index 72b9e96715070f2c47b13b7f7aade101abb9586e..fa7fb716e388a7ef4f4da1b89f58090e60805245 100644 (file)
@@ -5,7 +5,7 @@
 #
 # Licensed under GPLv2
 
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include
 
 # Core
 
index f3dba6f356e29446c0960af2d37e51d2eacc8302..02e21bceb0856bc5ac5c769af3362afab2927c99 100644 (file)
@@ -40,5 +40,8 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 bool __init shmobile_smp_init_fallback_ops(void)
 {
        /* fallback on PSCI/smp_ops if no other DT based method is detected */
+       if (!IS_ENABLED(CONFIG_SMP))
+               return false;
+
        return platform_can_secondary_boot() ? true : false;
 }
index 69c8787bec7d3f3e343b592b01997667f3f8c53d..41f300a87c5147c568de6156a5f954fe9c29f780 100644 (file)
@@ -15,7 +15,6 @@ config ARM64
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_NUMA_BALANCING
-       select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_HAS_UBSAN_SANITIZE_ALL
@@ -54,6 +53,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
index bb2616b161576b4a664536ccae5c761544e93167..41d82b327e6a0eeb61d24f65e0b7d1d80318b703 100644 (file)
@@ -8,15 +8,15 @@ config ARCH_SUNXI
 
 config ARCH_ALPINE
        bool "Annapurna Labs Alpine platform"
-       select ALPINE_MSI
+       select ALPINE_MSI if PCI
        help
          This enables support for the Annapurna Labs Alpine
          Soc family.
 
 config ARCH_BCM2835
        bool "Broadcom BCM2835 family"
-       select ARCH_REQUIRE_GPIOLIB
        select CLKSRC_OF
+       select GPIOLIB
        select PINCTRL
        select PINCTRL_BCM2835
        select ARM_AMBA
@@ -29,15 +29,15 @@ config ARCH_BCM2835
 config ARCH_BCM_IPROC
        bool "Broadcom iProc SoC Family"
        select COMMON_CLK_IPROC
+       select GPIOLIB
        select PINCTRL
-       select ARCH_REQUIRE_GPIOLIB
        help
          This enables support for Broadcom iProc based SoCs
 
 config ARCH_BERLIN
        bool "Marvell Berlin SoC Family"
-       select ARCH_REQUIRE_GPIOLIB
        select DW_APB_ICTL
+       select GPIOLIB
        select PINCTRL
        help
          This enables support for Marvell Berlin SoC Family
@@ -66,7 +66,7 @@ config ARCH_LG1K
 config ARCH_HISI
        bool "Hisilicon SoC Family"
        select ARM_TIMER_SP804
-       select HISILICON_IRQ_MBIGEN
+       select HISILICON_IRQ_MBIGEN if PCI
        help
          This enables support for Hisilicon ARMv8 SoC family
 
@@ -108,7 +108,7 @@ config ARCH_QCOM
 config ARCH_ROCKCHIP
        bool "Rockchip Platforms"
        select ARCH_HAS_RESET_CONTROLLER
-       select ARCH_REQUIRE_GPIOLIB
+       select GPIOLIB
        select PINCTRL
        select PINCTRL_ROCKCHIP
        select ROCKCHIP_TIMER
@@ -154,11 +154,11 @@ config ARCH_STRATIX10
 config ARCH_TEGRA
        bool "NVIDIA Tegra SoC Family"
        select ARCH_HAS_RESET_CONTROLLER
-       select ARCH_REQUIRE_GPIOLIB
        select CLKDEV_LOOKUP
        select CLKSRC_MMIO
        select CLKSRC_OF
        select GENERIC_CLOCKEVENTS
+       select GPIOLIB
        select HAVE_CLK
        select PINCTRL
        select RESET_CONTROLLER
@@ -183,8 +183,8 @@ config ARCH_UNIPHIER
 
 config ARCH_VEXPRESS
        bool "ARMv8 software model (Versatile Express)"
-       select ARCH_REQUIRE_GPIOLIB
        select COMMON_CLK_VERSATILE
+       select GPIOLIB
        select PM
        select PM_GENERIC_DOMAINS
        select POWER_RESET_VEXPRESS
index 299f3ce969ab8517a602ff7addda417ecd5aa5f3..c528dd52ba2d39b30547ab964eda219b1068a043 100644 (file)
@@ -12,6 +12,7 @@
 /dts-v1/;
 #include "exynos7.dtsi"
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/samsung,s2mps11.h>
 
 / {
        model = "Samsung Exynos7 Espresso board based on EXYNOS7";
@@ -43,6 +44,8 @@
 
 &rtc {
        status = "okay";
+       clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>;
+       clock-names = "rtc", "rtc_src";
 };
 
 &watchdog {
index 0555b7caaf2c2960a1e4f02eca120ddd5d3d01b1..eadf4855ad2d995072022254000a83cd0c476a0d 100644 (file)
@@ -1,4 +1,3 @@
-# CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_HUGETLB=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_NET_NS is not set
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
+CONFIG_SECCOMP=y
 CONFIG_XEN=y
 CONFIG_KEXEC=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -84,10 +88,37 @@ CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
 CONFIG_BPF_JIT=y
 CONFIG_CFG80211=m
 CONFIG_MAC80211=m
@@ -103,6 +134,7 @@ CONFIG_MTD=y
 CONFIG_MTD_M25P80=y
 CONFIG_MTD_SPI_NOR=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
 CONFIG_VIRTIO_BLK=y
 CONFIG_SRAM=y
 # CONFIG_SCSI_PROC_FS is not set
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
 CONFIG_TUN=y
+CONFIG_VETH=m
 CONFIG_VIRTIO_NET=y
 CONFIG_AMD_XGBE=y
 CONFIG_NET_XGENE=y
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
 CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=y
-CONFIG_CUSE=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
index 61b49150dfa3046cb521c6fc54232d718e840460..1737aecfcc5e462c78e6c47ab06ffe2e2fc12c60 100644 (file)
@@ -22,7 +22,6 @@
 
 #define __ARCH_WANT_KPROBES_INSN_SLOT
 #define MAX_INSN_SIZE                  1
-#define MAX_STACK_SIZE                 128
 
 #define flush_insn_slot(p)             do { } while (0)
 #define kretprobe_blacklist_size       0
@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe;
        struct kprobe_step_ctx ss_ctx;
        struct pt_regs jprobe_saved_regs;
-       char jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *);
index 5e834d10b2913d91bf3620c0e5930cdb5e14035e..c47257c91b77e3d6516000c0c8bec5705b97b6dc 100644 (file)
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        kasan_check_write(to, n);
-       return  __arch_copy_from_user(to, from, n);
+       check_object_size(to, n, false);
+       return __arch_copy_from_user(to, from, n);
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        kasan_check_read(from, n);
-       return  __arch_copy_to_user(to, from, n);
+       check_object_size(from, n, true);
+       return __arch_copy_to_user(to, from, n);
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        kasan_check_write(to, n);
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               check_object_size(to, n, false);
                n = __arch_copy_from_user(to, from, n);
-       else /* security hole - plug it */
+       else /* security hole - plug it */
                memset(to, 0, n);
        return n;
 }
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 {
        kasan_check_read(from, n);
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               check_object_size(from, n, true);
                n = __arch_copy_to_user(to, from, n);
+       }
        return n;
 }
 
index 96e4a2b64cc1221420540df2d7255dd2aca1b298..441420ca7d084af131bf7e7373c7bfc22c8ae26f 100644 (file)
@@ -353,6 +353,8 @@ el1_sync:
        lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
        cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
        b.eq    el1_da
+       cmp     x24, #ESR_ELx_EC_IABT_CUR       // instruction abort in EL1
+       b.eq    el1_ia
        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
        b.eq    el1_undef
        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
        cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
        b.ge    el1_dbg
        b       el1_inv
+
+el1_ia:
+       /*
+        * Fall through to the Data abort case
+        */
 el1_da:
        /*
         * Data abort handling
index 21ab5df9fa76ddea54d58abf77812593ddbb0f6d..65d81f965e7491daba3e05be40052776a6d1f64d 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/sections.h>
 #include <asm/smp.h>
 #include <asm/suspend.h>
+#include <asm/sysreg.h>
 #include <asm/virt.h>
 
 /*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
        set_pte(pte, __pte(virt_to_phys((void *)dst) |
                         pgprot_val(PAGE_KERNEL_EXEC)));
 
-       /* Load our new page tables */
-       asm volatile("msr       ttbr0_el1, %0;"
-                    "isb;"
-                    "tlbi      vmalle1is;"
-                    "dsb       ish;"
-                    "isb" : : "r"(virt_to_phys(pgd)));
+       /*
+        * Load our new page tables. A strict BBM approach requires that we
+        * ensure that TLBs are free of any entries that may overlap with the
+        * global mappings we are about to install.
+        *
+        * For a real hibernate/resume cycle TTBR0 currently points to a zero
+        * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+        * runtime services), while for a userspace-driven test_resume cycle it
+        * points to userspace page tables (and we must point it at a zero page
+        * ourselves). Elsewhere we only (un)install the idmap with preemption
+        * disabled, so T0SZ should be as required regardless.
+        */
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+       isb();
 
        *phys_dst_addr = virt_to_phys((void *)dst);
 
@@ -393,6 +404,38 @@ int swsusp_arch_resume(void)
        void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
                                          void *, phys_addr_t, phys_addr_t);
 
+       /*
+        * Restoring the memory image will overwrite the ttbr1 page tables.
+        * Create a second copy of just the linear map, and use this when
+        * restoring.
+        */
+       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+       if (!tmp_pg_dir) {
+               pr_err("Failed to allocate memory for temporary page tables.");
+               rc = -ENOMEM;
+               goto out;
+       }
+       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+       if (rc)
+               goto out;
+
+       /*
+        * Since we only copied the linear map, we need to find restore_pblist's
+        * linear map address.
+        */
+       lm_restore_pblist = LMADDR(restore_pblist);
+
+       /*
+        * We need a zero page that is zero before & after resume in order to
+        * to break before make on the ttbr1 page tables.
+        */
+       zero_page = (void *)get_safe_page(GFP_ATOMIC);
+       if (!zero_page) {
+               pr_err("Failed to allocate zero page.");
+               rc = -ENOMEM;
+               goto out;
+       }
+
        /*
         * Locate the exit code in the bottom-but-one page, so that *NULL
         * still has disastrous affects.
@@ -418,27 +461,6 @@ int swsusp_arch_resume(void)
         */
        __flush_dcache_area(hibernate_exit, exit_size);
 
-       /*
-        * Restoring the memory image will overwrite the ttbr1 page tables.
-        * Create a second copy of just the linear map, and use this when
-        * restoring.
-        */
-       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
-       if (!tmp_pg_dir) {
-               pr_err("Failed to allocate memory for temporary page tables.");
-               rc = -ENOMEM;
-               goto out;
-       }
-       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
-       if (rc)
-               goto out;
-
-       /*
-        * Since we only copied the linear map, we need to find restore_pblist's
-        * linear map address.
-        */
-       lm_restore_pblist = LMADDR(restore_pblist);
-
        /*
         * KASLR will cause the el2 vectors to be in a different location in
         * the resumed kernel. Load hibernate's temporary copy into el2.
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
                __hyp_set_vectors(el2_vectors);
        }
 
-       /*
-        * We need a zero page that is zero before & after resume in order to
-        * to break before make on the ttbr1 page tables.
-        */
-       zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
        hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
                       resume_hdr.reenter_kernel, lm_restore_pblist,
                       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
index bf97685882883e8970e472edeb0b457cbc2b76e6..c6b0f40620d868d1bf031db8e0cb0dfff3b134dc 100644 (file)
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 static void __kprobes
 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
 
-static inline unsigned long min_stack_size(unsigned long addr)
-{
-       unsigned long size;
-
-       if (on_irq_stack(addr, raw_smp_processor_id()))
-               size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
-       else
-               size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
-
-       return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
-}
-
 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
 {
        /* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct jprobe *jp = container_of(p, struct jprobe, kp);
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long stack_ptr = kernel_stack_pointer(regs);
 
        kcb->jprobe_saved_regs = *regs;
        /*
-        * As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
+        * Since we can't be sure where in the stack frame "stacked"
+        * pass-by-value arguments are stored we just don't try to
+        * duplicate any of the stack. Do not use jprobes on functions that
+        * use more than 64 bytes (after padding each to an 8 byte boundary)
+        * of arguments, or pass individual arguments larger than 16 bytes.
         */
-       kasan_disable_current();
-       memcpy(kcb->jprobes_stack, (void *)stack_ptr,
-              min_stack_size(stack_ptr));
-       kasan_enable_current();
 
        instruction_pointer_set(regs, (unsigned long) jp->entry);
        preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        }
        unpause_graph_tracing();
        *regs = kcb->jprobe_saved_regs;
-       kasan_disable_current();
-       memcpy((void *)stack_addr, kcb->jprobes_stack,
-              min_stack_size(stack_addr));
-       kasan_enable_current();
        preempt_enable_no_resched();
        return 1;
 }
index 76a6d9263908faf4800cb0807952d17c6f8276d9..d93d433525047e41c20760d0ec24e10524333804 100644 (file)
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
                acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
                                      acpi_parse_gic_cpu_interface, 0);
 
-       if (cpu_count > NR_CPUS)
-               pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
-                       cpu_count, NR_CPUS);
+       if (cpu_count > nr_cpu_ids)
+               pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
+                       cpu_count, nr_cpu_ids);
 
        if (!bootcpu_valid) {
                pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
         * with entries in cpu_logical_map while initializing the cpus.
         * If the cpu set-up fails, invalidate the cpu_logical_map entry.
         */
-       for (i = 1; i < NR_CPUS; i++) {
+       for (i = 1; i < nr_cpu_ids; i++) {
                if (cpu_logical_map(i) != INVALID_HWID) {
                        if (smp_cpu_setup(i))
                                cpu_logical_map(i) = INVALID_HWID;
index c8beaa0da7df4e60e3939c962b268cf666337dde..05d2bd776c69b932397ccec82306d8fbb46272bf 100644 (file)
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
 }
 #endif
 
+static bool is_el1_instruction_abort(unsigned int esr)
+{
+       return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
+}
+
 /*
  * The kernel tried to access some page that wasn't present.
  */
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
 {
        /*
         * Are we prepared to handle this kernel fault?
+        * We are almost certainly not prepared to handle instruction faults.
         */
-       if (fixup_exception(regs))
+       if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
                return;
 
        /*
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
        unsigned int ec       = ESR_ELx_EC(esr);
        unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 
-       return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+       return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
+              (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
 }
 
 static bool is_el0_instruction_abort(unsigned int esr)
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
                if (regs->orig_addr_limit == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
 
+               if (is_el1_instruction_abort(esr))
+                       die("Attempting to execute userspace memory", regs, esr);
+
                if (!search_exception_tables(regs->pc))
                        die("Accessing user space memory outside uaccess.h routines", regs, esr);
        }
index 28c63fea786d38b1e6f671296e908f784eb18a59..3c1bd640042a0dc9f0c0866c8c93c7346cf1171a 100644 (file)
@@ -26,7 +26,7 @@ config BLACKFIN
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select ARCH_HAVE_CUSTOM_GPIO_H
-       select ARCH_REQUIRE_GPIOLIB
+       select GPIOLIB
        select HAVE_UID16
        select HAVE_UNDERSCORE_SYMBOL_PREFIX
        select VIRT_TO_BUS
index 2e221c5f0203b22d3bda055862548afb3803bb2d..f86918aed9e1181e3dc2546a1b0520f814138164 100644 (file)
@@ -3,6 +3,8 @@
 
 #ifdef __KERNEL__
 
+#include <linux/types.h>
+
 /* H8/300 internal I/O functions */
 
 #define __raw_readb __raw_readb
index 6a15083cc366df9c13962206952d266625034c7f..18ca6a9ce566cc3b8463d22ecfe3e27c25fc24f7 100644 (file)
@@ -52,6 +52,7 @@ config IA64
        select MODULES_USE_ELF_RELA
        select ARCH_USE_CMPXCHG_LOCKREF
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
index 2189d5ddc1eeef552dd602875ae7d34c8baf17f7..465c70982f40d8960925bcefa29bb9402167aba9 100644 (file)
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
+       if (!__builtin_constant_p(count))
+               check_object_size(from, count, true);
+
        return __copy_user(to, (__force void __user *) from, count);
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
+       if (!__builtin_constant_p(count))
+               check_object_size(to, count, false);
+
        return __copy_user((__force void __user *) to, from, count);
 }
 
@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        const void *__cu_from = (from);                                                 \
        long __cu_len = (n);                                                            \
                                                                                        \
-       if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
-               __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
+       if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_from, __cu_len, true);                   \
+               __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        long __cu_len = (n);                                                            \
                                                                                        \
        __chk_user_ptr(__cu_from);                                                      \
-       if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
+       if (__access_ok(__cu_from, __cu_len, get_fs())) {                               \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_to, __cu_len, false);                    \
                __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
index 2dcee3a88867536b2186f01252f1bf1d842ec279..9202f82dfce60e919d884e37c89924ade5a33569 100644 (file)
@@ -213,7 +213,6 @@ static inline int frame_extra_sizes(int f)
 
 static inline void adjustformat(struct pt_regs *regs)
 {
-       ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
        /*
         * set format byte to make stack appear modulo 4, which it will
         * be when doing the rte
index 11fa51c89617deb1a303c6bfdbf82b2a32a1e4db..c0ec116b3993a3a61b852c9daf14a34ab0962e15 100644 (file)
@@ -390,7 +390,6 @@ void __init mem_init(void)
 
        free_all_bootmem();
        mem_init_print_info(NULL);
-       show_mem(0);
 }
 
 void free_initmem(void)
index 6eb52b9c98183b95134710c22187b1a6f0225ef9..e788515f766b46cefb2a36dfc95ab6bfcec6e8e3 100644 (file)
@@ -1642,8 +1642,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
 
        preempt_disable();
        if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
-                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+                   kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+                       kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+                               __func__, va, vcpu, read_c0_entryhi());
+                       er = EMULATE_FAIL;
+                       preempt_enable();
+                       goto done;
+               }
        } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
                   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
                int index;
@@ -1680,12 +1686,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
                                                                run, vcpu);
                                preempt_enable();
                                goto dont_update_pc;
-                       } else {
-                               /*
-                                * We fault an entry from the guest tlb to the
-                                * shadow host TLB
-                                */
-                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+                       }
+                       /*
+                        * We fault an entry from the guest tlb to the
+                        * shadow host TLB
+                        */
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+                               kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, va, index, vcpu,
+                                       read_c0_entryhi());
+                               er = EMULATE_FAIL;
+                               preempt_enable();
+                               goto done;
                        }
                }
        } else {
@@ -2659,7 +2671,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
                         * OK we have a Guest TLB entry, now inject it into the
                         * shadow host TLB
                         */
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+                               kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, va, index, vcpu,
+                                       read_c0_entryhi());
+                               er = EMULATE_FAIL;
+                       }
                }
        }
 
index 57319ee57c4fdd1aada66e1648adc3981edf9dc3..6cfdcf55572d6aaa747998683f06bbf7f5a3e5ab 100644 (file)
@@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
        }
 
        gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
-       if (gfn >= kvm->arch.guest_pmap_npages) {
+       if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
                kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
                        gfn, badvaddr);
                kvm_mips_dump_host_tlbs();
@@ -138,35 +138,49 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
        unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
        struct kvm *kvm = vcpu->kvm;
        kvm_pfn_t pfn0, pfn1;
+       gfn_t gfn0, gfn1;
+       long tlb_lo[2];
        int ret;
 
-       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
-               pfn0 = 0;
-               pfn1 = 0;
-       } else {
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
-                                          >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               pfn0 = kvm->arch.guest_pmap[
-                       mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
-               pfn1 = kvm->arch.guest_pmap[
-                       mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
+       tlb_lo[0] = tlb->tlb_lo[0];
+       tlb_lo[1] = tlb->tlb_lo[1];
+
+       /*
+        * The commpage address must not be mapped to anything else if the guest
+        * TLB contains entries nearby, or commpage accesses will break.
+        */
+       if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
+                       VPN2_MASK & (PAGE_MASK << 1)))
+               tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
+
+       gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
+       gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
+       if (gfn0 >= kvm->arch.guest_pmap_npages ||
+           gfn1 >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
+                       __func__, gfn0, gfn1, tlb->tlb_hi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               return -1;
        }
 
+       if (kvm_mips_map_page(kvm, gfn0) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(kvm, gfn1) < 0)
+               return -1;
+
+       pfn0 = kvm->arch.guest_pmap[gfn0];
+       pfn1 = kvm->arch.guest_pmap[gfn1];
+
        /* Get attributes from the Guest TLB */
        entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
                ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
-               (tlb->tlb_lo[0] & ENTRYLO_D) |
-               (tlb->tlb_lo[0] & ENTRYLO_V);
+               (tlb_lo[0] & ENTRYLO_D) |
+               (tlb_lo[0] & ENTRYLO_V);
        entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
                ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
-               (tlb->tlb_lo[1] & ENTRYLO_D) |
-               (tlb->tlb_lo[1] & ENTRYLO_V);
+               (tlb_lo[1] & ENTRYLO_D) |
+               (tlb_lo[1] & ENTRYLO_V);
 
        kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
                  tlb->tlb_lo[0], tlb->tlb_lo[1]);
@@ -354,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
                                local_irq_restore(flags);
                                return KVM_INVALID_INST;
                        }
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
-                                                            &vcpu->arch.
-                                                            guest_tlb[index]);
+                       if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                               &vcpu->arch.guest_tlb[index])) {
+                               kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
+                                       __func__, opc, index, vcpu,
+                                       read_c0_entryhi());
+                               kvm_mips_dump_guest_tlbs(vcpu);
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
                        inst = *(opc);
                }
                local_irq_restore(flags);
index ec4047e170a0e6dc5267509f36974f8cbaa1fa84..927d2ab2ce08a68c2574c41e6d1b732ecf597d9b 100644 (file)
@@ -166,6 +166,7 @@ config PPC
        select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
        select GENERIC_CPU_AUTOPROBE
        select HAVE_VIRT_CPU_ACCOUNTING
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index ca254546cd05a1a4fe09e87e63945d7b38b8873f..1934707bf321ecf47a835bc7a0c4cd88f092ed07 100644 (file)
@@ -66,29 +66,28 @@ endif
 UTS_MACHINE := $(OLDARCH)
 
 ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-override CC    += -mlittle-endian
-ifneq ($(cc-name),clang)
-override CC    += -mno-strict-align
-endif
-override AS    += -mlittle-endian
 override LD    += -EL
-override CROSS32CC += -mlittle-endian
 override CROSS32AS += -mlittle-endian
 LDEMULATION    := lppc
 GNUTARGET      := powerpcle
 MULTIPLEWORD   := -mno-multiple
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
 else
-ifeq ($(call cc-option-yn,-mbig-endian),y)
-override CC    += -mbig-endian
-override AS    += -mbig-endian
-endif
 override LD    += -EB
 LDEMULATION    := ppc
 GNUTARGET      := powerpc
 MULTIPLEWORD   := -mmultiple
 endif
 
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mlittle-endian
+ifneq ($(cc-name),clang)
+  cflags-$(CONFIG_CPU_LITTLE_ENDIAN)   += -mno-strict-align
+endif
+
+aflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(call cc-option,-mbig-endian)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mlittle-endian
+
 ifeq ($(HAS_BIARCH),y)
 override AS    += -a$(CONFIG_WORD_SIZE)
 override LD    += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
@@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200)               += -Wa,-me200
 KBUILD_AFLAGS += $(cpu-as-y)
 KBUILD_CFLAGS += $(cpu-as-y)
 
+KBUILD_AFLAGS += $(aflags-y)
+KBUILD_CFLAGS += $(cflags-y)
+
 head-y                         := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
 head-$(CONFIG_8xx)             := arch/powerpc/kernel/head_8xx.o
 head-$(CONFIG_40x)             := arch/powerpc/kernel/head_40x.o
index bfe3d37a24ef3a24c07e5a9f7ed8df3a13b4bf59..9fa046d56ebadd6ad25e62b5a29a853b123cd30a 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/cpufeature.h>
 #include <asm/switch_to.h>
 
 #define CHKSUM_BLOCK_SIZE      1
@@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void)
        crypto_unregister_shash(&alg);
 }
 
-module_init(crc32c_vpmsum_mod_init);
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
 module_exit(crc32c_vpmsum_mod_fini);
 
 MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
index 3d7fc06532a16a7b620a58342d70e41fd69a33da..01b8a13f022467be64ccd46f248344bdf96e9a41 100644 (file)
@@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state;
 
 #endif
 
+/* Idle state entry routines */
+#ifdef CONFIG_PPC_P7_NAP
+#define        IDLE_STATE_ENTER_SEQ(IDLE_INST)                         \
+       /* Magic NAP/SLEEP/WINKLE mode enter sequence */        \
+       std     r0,0(r1);                                       \
+       ptesync;                                                \
+       ld      r0,0(r1);                                       \
+1:     cmp     cr0,r0,r0;                                      \
+       bne     1b;                                             \
+       IDLE_INST;                                              \
+       b       .
+#endif /* CONFIG_PPC_P7_NAP */
+
 #endif
index 57fec8ac7b924cdeabb4a21b65d46785e9fbf745..ddf54f5bbdd1c05efbd286ec305beac3c459d8d6 100644 (file)
@@ -186,6 +186,7 @@ label##3:                                           \
 
 #ifndef __ASSEMBLY__
 void apply_feature_fixups(void);
+void setup_feature_keys(void);
 #endif
 
 #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
index 0a74ebe934e1cbcb61105b63d54a959614bca33d..17c8380673a60637c61fec5772162bf0ae5523cb 100644 (file)
@@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void)
 static inline void __giveup_spe(struct task_struct *t) { }
 #endif
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-extern void flush_tmregs_to_thread(struct task_struct *);
-#else
-static inline void flush_tmregs_to_thread(struct task_struct *t)
-{
-}
-#endif
-
 static inline void clear_task_ebb(struct task_struct *t)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
index b7c20f0b8fbeebe03a1c57a4c62f86c0c42e962d..c1dc6c14deb84a261ab19e509f12078a271937d1 100644 (file)
@@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
        if ((unsigned long)from < TASK_SIZE) {
                over = (unsigned long)from + n - TASK_SIZE;
+               if (!__builtin_constant_p(n - over))
+                       check_object_size(to, n - over, false);
                return __copy_tofrom_user((__force void __user *)to, from,
                                n - over) + over;
        }
@@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
+       }
        if ((unsigned long)to < TASK_SIZE) {
                over = (unsigned long)to + n - TASK_SIZE;
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n - over, true);
                return __copy_tofrom_user(to, (__force void __user *)from,
                                n - over) + over;
        }
@@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                if (ret == 0)
                        return 0;
        }
+
+       if (!__builtin_constant_p(n))
+               check_object_size(to, n, false);
+
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
index f5f729c115781a8d6c9c1850f89558b06383c761..f0b238516e9b44b5afabc52a8460daaa6e81ca97 100644 (file)
@@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void);
 extern void xics_kexec_teardown_cpu(int secondary);
 extern void xics_migrate_irqs_away(void);
 extern void icp_native_eoi(struct irq_data *d);
+extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
+extern int xics_retrigger(struct irq_data *data);
 #ifdef CONFIG_SMP
 extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
                               unsigned int strict_check);
index c9bc78e9c6101b2ae5016efec858c5c38b0f4158..7429556eb8df7e468b447a1b6d4c541253c06295 100644 (file)
@@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
        int n = 0, l = 0;
        char buffer[128];
 
-       n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
+       n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
                       edev->phb->global_number, pdn->busno,
                       PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
-       pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
+       pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
                edev->phb->global_number, pdn->busno,
                PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
 
index 41091fdf9bd88fbe68d9ef15aeb04c1b59b18e77..df6d45eb41150185e7cc17baa82feead23fa27f3 100644 (file)
@@ -144,29 +144,14 @@ machine_check_pSeries_1:
         * vector
         */
        SET_SCRATCH0(r13)               /* save r13 */
-#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
-       /* Running native on arch 2.06 or later, check if we are
-        * waking up from nap. We only handle no state loss and
-        * supervisor state loss. We do -not- handle hypervisor
-        * state loss at this time.
+       /*
+        * Running native on arch 2.06 or later, we may wakeup from winkle
+        * inside machine check. If yes, then last bit of HSPGR0 would be set
+        * to 1. Hence clear it unconditionally.
         */
-       mfspr   r13,SPRN_SRR1
-       rlwinm. r13,r13,47-31,30,31
-       OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-       beq     9f
-
-       mfspr   r13,SPRN_SRR1
-       rlwinm. r13,r13,47-31,30,31
-       /* waking up from powersave (nap) state */
-       cmpwi   cr1,r13,2
-       /* Total loss of HV state is fatal. let's just stay stuck here */
-       OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-       bgt     cr1,.
-9:
-       OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
+       GET_PACA(r13)
+       clrrdi  r13,r13,1
+       SET_PACA(r13)
        EXCEPTION_PROLOG_0(PACA_EXMC)
 BEGIN_FTR_SECTION
        b       machine_check_powernv_early
@@ -1273,25 +1258,51 @@ machine_check_handle_early:
         * Check if thread was in power saving mode. We come here when any
         * of the following is true:
         * a. thread wasn't in power saving mode
-        * b. thread was in power saving mode with no state loss or
-        *    supervisor state loss
+        * b. thread was in power saving mode with no state loss,
+        *    supervisor state loss or hypervisor state loss.
         *
-        * Go back to nap again if (b) is true.
+        * Go back to nap/sleep/winkle mode again if (b) is true.
         */
        rlwinm. r11,r12,47-31,30,31     /* Was it in power saving mode? */
        beq     4f                      /* No, it wasn;t */
        /* Thread was in power saving mode. Go back to nap again. */
        cmpwi   r11,2
-       bne     3f
-       /* Supervisor state loss */
+       blt     3f
+       /* Supervisor/Hypervisor state loss */
        li      r0,1
        stb     r0,PACA_NAPSTATELOST(r13)
 3:     bl      machine_check_queue_event
        MACHINE_CHECK_HANDLER_WINDUP
        GET_PACA(r13)
        ld      r1,PACAR1(r13)
-       li      r3,PNV_THREAD_NAP
-       b       pnv_enter_arch207_idle_mode
+       /*
+        * Check what idle state this CPU was in and go back to same mode
+        * again.
+        */
+       lbz     r3,PACA_THREAD_IDLE_STATE(r13)
+       cmpwi   r3,PNV_THREAD_NAP
+       bgt     10f
+       IDLE_STATE_ENTER_SEQ(PPC_NAP)
+       /* No return */
+10:
+       cmpwi   r3,PNV_THREAD_SLEEP
+       bgt     2f
+       IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+       /* No return */
+
+2:
+       /*
+        * Go back to winkle. Please note that this thread was woken up in
+        * machine check from winkle and have not restored the per-subcore
+        * state. Hence before going back to winkle, set last bit of HSPGR0
+        * to 1. This will make sure that if this thread gets woken up
+        * again at reset vector 0x100 then it will get chance to restore
+        * the subcore state.
+        */
+       ori     r13,r13,1
+       SET_PACA(r13)
+       IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
+       /* No return */
 4:
 #endif
        /*
index ba79d15f4ddd7c0d8ce946e15098d977a00338fa..2265c6398a17ec4af7fc5983df9dad7443ac2fce 100644 (file)
                                PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
                                PSSCR_MTL_MASK
 
-/* Idle state entry routines */
-
-#define        IDLE_STATE_ENTER_SEQ(IDLE_INST)                         \
-       /* Magic NAP/SLEEP/WINKLE mode enter sequence */        \
-       std     r0,0(r1);                                       \
-       ptesync;                                                \
-       ld      r0,0(r1);                                       \
-1:     cmp     cr0,r0,r0;                                      \
-       bne     1b;                                             \
-       IDLE_INST;                                              \
-       b       .
-
        .text
 
 /*
@@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop)
  * cr3 - set to gt if waking up with partial/complete hypervisor state loss
  */
 _GLOBAL(pnv_restore_hyp_resource)
-       ld      r2,PACATOC(r13);
 BEGIN_FTR_SECTION
+       ld      r2,PACATOC(r13);
        /*
         * POWER ISA 3. Use PSSCR to determine if we
         * are waking up from deep idle state
@@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
         */
        clrldi  r5,r13,63
        clrrdi  r13,r13,1
+
+       /* Now that we are sure r13 is corrected, load TOC */
+       ld      r2,PACATOC(r13);
        cmpwi   cr4,r5,1
        mtspr   SPRN_HSPRG0,r13
 
index ef267fd9dd225a3c7f3dfbcacc057a0bf28dca8e..5e7ece0fda9f5b802eb561ce51774cfb625032a6 100644 (file)
@@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
        mce->in_use = 1;
 
        mce->initiator = MCE_INITIATOR_CPU;
-       if (handled)
+       /* Mark it recovered if we have handled it and MSR(RI=1). */
+       if (handled && (regs->msr & MSR_RI))
                mce->disposition = MCE_DISPOSITION_RECOVERED;
        else
                mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
index a5c0153ede37f21d6dd8f47beac764587789fbc5..7fdf324d5b51f4ebb8716dfadda4ed40fb34d23d 100644 (file)
@@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops);
 static int get_phb_number(struct device_node *dn)
 {
        int ret, phb_id = -1;
+       u32 prop_32;
        u64 prop;
 
        /*
@@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn)
         * reading "ibm,opal-phbid", only present in OPAL environment.
         */
        ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
-       if (ret)
-               ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
+       if (ret) {
+               ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
+               prop = prop_32;
+       }
 
        if (!ret)
                phb_id = (int)(prop & (MAX_PHBS - 1));
index 58ccf86415b46cd5c2db593424ecde772cd0d959..9ee2623e0f674977ee8e8f07bbfb1297a2f05dc8 100644 (file)
@@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
 #endif
 }
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void flush_tmregs_to_thread(struct task_struct *tsk)
-{
-       /*
-        * Process self tracing is not yet supported through
-        * ptrace interface. Ptrace generic code should have
-        * prevented this from happening in the first place.
-        * Warn once here with the message, if some how it
-        * is attempted.
-        */
-       WARN_ONCE(tsk == current,
-               "Not expecting ptrace on self: TM regs may be incorrect\n");
-
-       /*
-        * If task is not current, it should have been flushed
-        * already to it's thread_struct during __switch_to().
-        */
-}
-#endif
-
 struct task_struct *__switch_to(struct task_struct *prev,
        struct task_struct *new)
 {
index 6ee4b72cda4201840cf2b85f38661831c5e37233..4e74fc588a3f6497177fa82e1d148b32383ffcb9 100644 (file)
@@ -2940,7 +2940,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
 
        /* Don't print anything after quiesce under OPAL, it crashes OFW */
        if (of_platform != PLATFORM_OPAL) {
-               prom_printf("Booting Linux via __start() ...\n");
+               prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
                prom_debug("->dt_header_start=0x%x\n", hdr);
        }
 
index 4f3c5756cc09898f984de4cc6cf6fc7c1ba830ac..bf91658a8a406b051e2072e2ce948317abfefe29 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/switch_to.h>
+#include <asm/tm.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
@@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = {
        REG_OFFSET_END,
 };
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+       /*
+        * If task is not current, it will have been flushed already to
+        * it's thread_struct during __switch_to().
+        *
+        * A reclaim flushes ALL the state.
+        */
+
+       if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(TM_CAUSE_SIGNAL);
+
+}
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+#endif
+
 /**
  * regs_query_register_offset() - query register offset from its name
  * @name:      the name of a register
index c3e861df4b203ce5be8e0e9f0fb8f4e236d41cd5..24ec3ea4b3a2eeeeae2e0f713226acd252bab806 100644 (file)
@@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
  * and we are running with enough of the MMU enabled to have our
  * proper kernel virtual addresses
  *
- * Find out what kind of machine we're on and save any data we need
- * from the early boot process (devtree is copied on pmac by prom_init()).
- * This is called very early on the boot process, after a minimal
- * MMU environment has been set up but before MMU_init is called.
+ * We do the initial parsing of the flat device-tree and prepares
+ * for the MMU to be fully initialized.
  */
 extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
 
 notrace void __init machine_init(u64 dt_ptr)
 {
+       /* Configure static keys first, now that we're relocated. */
+       setup_feature_keys();
+
        /* Enable early debugging if any specified (see udbg.h) */
        udbg_early_init();
 
index eafb9a79e0116b600624a16212c2a02bfb46e363..7ac8e6eaab5ba24566f1f6fe06829e22727e86ea 100644 (file)
@@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr)
 
        /* Apply all the dynamic patching */
        apply_feature_fixups();
+       setup_feature_keys();
 
        /* Initialize the hash table or TLB handling */
        early_init_mmu();
index 6767605ea8da2eddb54152dbf92538467fe2dc3e..4111d30badfad30fa1eb7dd29abb6a26d8a338de 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/security.h>
 #include <linux/memblock.h>
 
+#include <asm/cpu_has_feature.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
index cbabd143acae8e9db3b3883be876527b68b83574..78a7449bf489d49a400ae45703ce77bdc7206f4b 100644 (file)
@@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
        $(call if_changed,vdso32ld)
 
 # strip rule for the .so file
@@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
        $(call if_changed_dep,vdso32as)
 
 # actual build commands
 quiet_cmd_vdso32ld = VDSO32L $@
-      cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
 quiet_cmd_vdso32as = VDSO32A $@
       cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
 
index c710802b8fb685a7cb5815d86387f7debb356d9e..366ae09b14c1e3a5179987d312ec4d1fedf86313 100644 (file)
@@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
        $(call if_changed,vdso64ld)
 
 # strip rule for the .so file
@@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
        $(call if_changed_dep,vdso64as)
 
 # actual build commands
 quiet_cmd_vdso64ld = VDSO64L $@
-      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
 quiet_cmd_vdso64as = VDSO64A $@
       cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
 
index a75ba38a2d81415dfcbe497c27b1329fdd6c8736..05aa11399a7867c1a4148d0f9eb4bf80bca3ee9d 100644 (file)
@@ -1329,20 +1329,16 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
        xics->kvm = kvm;
 
        /* Already there ? */
-       mutex_lock(&kvm->lock);
        if (kvm->arch.xics)
                ret = -EEXIST;
        else
                kvm->arch.xics = xics;
-       mutex_unlock(&kvm->lock);
 
        if (ret) {
                kfree(xics);
                return ret;
        }
 
-       xics_debugfs_init(xics);
-
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        if (cpu_has_feature(CPU_FTR_ARCH_206)) {
                /* Enable real mode support */
@@ -1354,9 +1350,17 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
        return 0;
 }
 
+static void kvmppc_xics_init(struct kvm_device *dev)
+{
+       struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
+
+       xics_debugfs_init(xics);
+}
+
 struct kvm_device_ops kvm_xics_ops = {
        .name = "kvm-xics",
        .create = kvmppc_xics_create,
+       .init = kvmppc_xics_init,
        .destroy = kvmppc_xics_free,
        .set_attr = xics_set_attr,
        .get_attr = xics_get_attr,
index d90870a66b60b4b0820a3044466f70716e1ed610..0a57fe6d49ccf43a07224c4f7da1f597e36cc10c 100644 (file)
@@ -127,8 +127,9 @@ _GLOBAL(csum_partial_copy_generic)
        stw     r7,12(r1)
        stw     r8,8(r1)
 
-       andi.   r0,r4,1                 /* is destination address even ? */
-       cmplwi  cr7,r0,0
+       rlwinm  r0,r4,3,0x8
+       rlwnm   r6,r6,r0,0,31   /* odd destination address: rotate one byte */
+       cmplwi  cr7,r0,0        /* is destination address even ? */
        addic   r12,r6,0
        addi    r6,r4,-4
        neg     r0,r4
@@ -237,7 +238,7 @@ _GLOBAL(csum_partial_copy_generic)
 66:    addze   r3,r12
        addi    r1,r1,16
        beqlr+  cr7
-       rlwinm  r3,r3,8,0,31    /* swap bytes for odd destination */
+       rlwinm  r3,r3,8,0,31    /* odd destination address: rotate one byte */
        blr
 
 /* read fault */
index 74145f02ad417b496ceba07b0f114a2bbd77bc75..043415f0bdb1646fa85f7bb26d04f0241c68ff63 100644 (file)
@@ -188,7 +188,10 @@ void __init apply_feature_fixups(void)
                          &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 #endif
        do_final_fixups();
+}
 
+void __init setup_feature_keys(void)
+{
        /*
         * Initialise jump label. This causes all the cpu/mmu_has_feature()
         * checks to take on their correct polarity based on the current set of
index 5be15cff758df193bd935baa50867a5dc9e3b063..2975754c65ea9514e4d7d10284a2beb42d6dd3cb 100644 (file)
@@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
        gang = alloc_spu_gang();
        SPUFS_I(inode)->i_ctx = NULL;
        SPUFS_I(inode)->i_gang = gang;
-       if (!gang)
+       if (!gang) {
+               ret = -ENOMEM;
                goto out_iput;
+       }
 
        inode->i_op = &simple_dir_inode_operations;
        inode->i_fop = &simple_dir_operations;
index 309d9ccccd509c83097386dca03621f4db3b7ca3..c61667e8bb06c51385fbe21c18b288aeb889f2da 100644 (file)
@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
        if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
            !firmware_has_feature(FW_FEATURE_LPAR)) {
                dev->dev.archdata.dma_ops = &dma_direct_ops;
+               /*
+                * Set the coherent DMA mask to prevent the iommu
+                * being used unnecessarily
+                */
+               dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
                return;
        }
 #endif
index e505223b4ec5ed2d82bcb08b2e718a6b64601f18..ed8bba68a162120d282bfc2286a674b049e44d5c 100644 (file)
@@ -228,7 +228,8 @@ int __init opal_event_init(void)
                }
 
                /* Install interrupt handler */
-               rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
+               rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
+                                "opal", NULL);
                if (rc) {
                        irq_dispose_mapping(virq);
                        pr_warn("Error %d requesting irq %d (0x%x)\n",
index 8b4fc68cebcb2f4c02a716074cb1b1c1045d0d1f..6c9a65b52e63b589edbe809a4b65851ca2ea2f79 100644 (file)
@@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs,
 
        if (!(regs->msr & MSR_RI)) {
                /* If MSR_RI isn't set, we cannot recover */
+               pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
                recovered = 0;
        } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
                /* Platform corrected itself */
index 6b9528307f620e639be196d3b691e6ac8d159d28..fd9444f9fb0c24e0ed02dcb722c0e824ddc218e2 100644 (file)
@@ -111,10 +111,17 @@ static int __init iommu_setup(char *str)
 }
 early_param("iommu", iommu_setup);
 
-static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
+static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
 {
-       return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
-               (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
+       /*
+        * WARNING: We cannot rely on the resource flags. The Linux PCI
+        * allocation code sometimes decides to put a 64-bit prefetchable
+        * BAR in the 32-bit window, so we have to compare the addresses.
+        *
+        * For simplicity we only test resource start.
+        */
+       return (r->start >= phb->ioda.m64_base &&
+               r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
 }
 
 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
@@ -229,7 +236,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
        sgsz = phb->ioda.m64_segsize;
        for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
                r = &pdev->resource[i];
-               if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+               if (!r->parent || !pnv_pci_is_m64(phb, r))
                        continue;
 
                start = _ALIGN_DOWN(r->start - base, sgsz);
@@ -1877,7 +1884,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
                                        unsigned shift, unsigned long index,
                                        unsigned long npages)
 {
-       __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+       __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
        unsigned long start, end, inc;
 
        /* We'll invalidate DMA address in PE scope */
@@ -2863,7 +2870,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
                res = &pdev->resource[i + PCI_IOV_RESOURCES];
                if (!res->flags || res->parent)
                        continue;
-               if (!pnv_pci_is_mem_pref_64(res->flags)) {
+               if (!pnv_pci_is_m64(phb, res)) {
                        dev_warn(&pdev->dev, "Don't support SR-IOV with"
                                        " non M64 VF BAR%d: %pR. \n",
                                 i, res);
@@ -2958,7 +2965,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
                        index++;
                }
        } else if ((res->flags & IORESOURCE_MEM) &&
-                  !pnv_pci_is_mem_pref_64(res->flags)) {
+                  !pnv_pci_is_m64(phb, res)) {
                region.start = res->start -
                               phb->hose->mem_offset[0] -
                               phb->ioda.m32_pci_base;
@@ -3083,9 +3090,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
                bridge = bridge->bus->self;
        }
 
-       /* We fail back to M32 if M64 isn't supported */
-       if (phb->ioda.m64_segsize &&
-           pnv_pci_is_mem_pref_64(type))
+       /*
+        * We fall back to M32 if M64 isn't supported. We enforce the M64
+        * alignment for any 64-bit resource, PCIe doesn't care and
+        * bridges only do 64-bit prefetchable anyway.
+        */
+       if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
                return phb->ioda.m64_segsize;
        if (type & IORESOURCE_MEM)
                return phb->ioda.m32_segsize;
@@ -3125,7 +3135,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
                w = NULL;
                if (r->flags & type & IORESOURCE_IO)
                        w = &hose->io_resource;
-               else if (pnv_pci_is_mem_pref_64(r->flags) &&
+               else if (pnv_pci_is_m64(phb, r) &&
                         (type & IORESOURCE_PREFETCH) &&
                         phb->ioda.m64_segsize)
                        w = &hose->mem_resources[1];
index 43f7beb2902d0b5d5b1001cff100fdf67c650fb4..76ec104e88beea0e89e3473d988e23fbdb7312c1 100644 (file)
@@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
        return dlpar_update_device_tree_lmb(lmb);
 }
 
-static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
-{
-       unsigned long section_nr;
-       struct mem_section *mem_sect;
-       struct memory_block *mem_block;
-
-       section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
-       mem_sect = __nr_to_section(section_nr);
-
-       mem_block = find_memory_block(mem_sect);
-       return mem_block;
-}
-
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
 {
@@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
 
 static int dlpar_add_lmb(struct of_drconf_cell *);
 
+static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
+{
+       unsigned long section_nr;
+       struct mem_section *mem_sect;
+       struct memory_block *mem_block;
+
+       section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
+       mem_sect = __nr_to_section(section_nr);
+
+       mem_block = find_memory_block(mem_sect);
+       return mem_block;
+}
+
 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
 {
        struct memory_block *mem_block;
index 0031eda320c3de7d94e3d9198b0cbc9087a7aec4..385e7aa9e2731cabf13cfccef35a2934719cdbc3 100644 (file)
@@ -1,6 +1,7 @@
 config PPC_XICS
        def_bool n
        select PPC_SMP_MUXED_IPI
+       select HARDIRQS_SW_RESEND
 
 config PPC_ICP_NATIVE
        def_bool n
index 27c936c080a66ffb5523931ac74d6c4c3a01849d..1c6bf4b66f56854d0717a644c75dea70e3fabde7 100644 (file)
@@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = {
        .irq_mask = ics_opal_mask_irq,
        .irq_unmask = ics_opal_unmask_irq,
        .irq_eoi = NULL, /* Patched at init time */
-       .irq_set_affinity = ics_opal_set_affinity
+       .irq_set_affinity = ics_opal_set_affinity,
+       .irq_set_type = xics_set_irq_type,
+       .irq_retrigger = xics_retrigger,
 };
 
 static int ics_opal_map(struct ics *ics, unsigned int virq);
index 3854dd41558d2697e73f9d72f9dffb27327952a8..78ee5c778ef8c7650ccea536aff7fb5cefc0bca3 100644 (file)
@@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = {
        .irq_mask = ics_rtas_mask_irq,
        .irq_unmask = ics_rtas_unmask_irq,
        .irq_eoi = NULL, /* Patched at init time */
-       .irq_set_affinity = ics_rtas_set_affinity
+       .irq_set_affinity = ics_rtas_set_affinity,
+       .irq_set_type = xics_set_irq_type,
+       .irq_retrigger = xics_retrigger,
 };
 
 static int ics_rtas_map(struct ics *ics, unsigned int virq)
index a795a5f0301c482ec8edeb3047464ea709f7326b..9d530f47958857621ad19655ac12f975f17e090d 100644 (file)
@@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
 
        pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
 
-       /* They aren't all level sensitive but we just don't really know */
-       irq_set_status_flags(virq, IRQ_LEVEL);
+       /*
+        * Mark interrupts as edge sensitive by default so that resend
+        * actually works. The device-tree parsing will turn the LSIs
+        * back to level.
+        */
+       irq_clear_status_flags(virq, IRQ_LEVEL);
 
        /* Don't call into ICS for IPIs */
        if (hw == XICS_IPI) {
@@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
                           irq_hw_number_t *out_hwirq, unsigned int *out_flags)
 
 {
-       /* Current xics implementation translates everything
-        * to level. It is not technically right for MSIs but this
-        * is irrelevant at this point. We might get smarter in the future
-        */
        *out_hwirq = intspec[0];
-       *out_flags = IRQ_TYPE_LEVEL_LOW;
 
+       /*
+        * If intsize is at least 2, we look for the type in the second cell,
+        * we assume the LSB indicates a level interrupt.
+        */
+       if (intsize > 1) {
+               if (intspec[1] & 1)
+                       *out_flags = IRQ_TYPE_LEVEL_LOW;
+               else
+                       *out_flags = IRQ_TYPE_EDGE_RISING;
+       } else
+               *out_flags = IRQ_TYPE_LEVEL_LOW;
+
+       return 0;
+}
+
+int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
+{
+       /*
+        * We only support these. This has really no effect other than setting
+        * the corresponding descriptor bits mind you but those will in turn
+        * affect the resend function when re-enabling an edge interrupt.
+        *
+        * Set set the default to edge as explained in map().
+        */
+       if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
+               flow_type = IRQ_TYPE_EDGE_RISING;
+
+       if (flow_type != IRQ_TYPE_EDGE_RISING &&
+           flow_type != IRQ_TYPE_LEVEL_LOW)
+               return -EINVAL;
+
+       irqd_set_trigger_type(d, flow_type);
+
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+int xics_retrigger(struct irq_data *data)
+{
+       /*
+        * We need to push a dummy CPPR when retriggering, since the subsequent
+        * EOI will try to pop it. Passing 0 works, as the function hard codes
+        * the priority value anyway.
+        */
+       xics_push_cppr(0);
+
+       /* Tell the core to do a soft retrigger */
        return 0;
 }
 
index 9e607bf2d640df1ba7aee69b2cccbc4fd5ff61cb..e751fe25d6ab670428f5c97b8464d9102baddbe6 100644 (file)
@@ -123,6 +123,7 @@ config S390
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_EARLY_PFN_TO_NID
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
@@ -871,4 +872,17 @@ config S390_GUEST
          Select this option if you want to run the kernel as a guest under
          the KVM hypervisor.
 
+config S390_GUEST_OLD_TRANSPORT
+       def_bool y
+       prompt "Guest support for old s390 virtio transport (DEPRECATED)"
+       depends on S390_GUEST
+       help
+         Enable this option to add support for the old s390-virtio
+         transport (i.e. virtio devices NOT based on virtio-ccw). This
+         type of virtio devices is only available on the experimental
+         kuli userspace or with old (< 2.6) qemu. If you are running
+         with a modern version of qemu (which supports virtio-ccw since
+         1.4 and uses it by default since version 2.4), you probably won't
+         need this.
+
 endmenu
index 3f3ae4865d579e8a9420cc22c6c2b39f37e70a9f..f142215ed30dd1dc4ae7b14d57d72732a04b82b7 100644 (file)
@@ -1672,6 +1672,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                                    KVM_SYNC_CRS |
                                    KVM_SYNC_ARCH0 |
                                    KVM_SYNC_PFAULT;
+       kvm_s390_set_prefix(vcpu, 0);
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
        /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2361,8 +2362,10 @@ retry:
                rc = gmap_mprotect_notify(vcpu->arch.gmap,
                                          kvm_s390_get_prefix(vcpu),
                                          PAGE_SIZE * 2, PROT_WRITE);
-               if (rc)
+               if (rc) {
+                       kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
                        return rc;
+               }
                goto retry;
        }
 
index d96596128e9f2591c4cde57f3bb6b1bf256b30f9..f481fcde067ba145f6cc2ace122d9d0a543ebe00 100644 (file)
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       check_object_size(to, n, false);
        if (static_branch_likely(&have_mvcos))
                return copy_from_user_mvcos(to, from, n);
        return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        if (static_branch_likely(&have_mvcos))
                return copy_to_user_mvcos(to, from, n);
        return copy_to_user_mvcs(to, from, n);
index 546293d9e6c52906f2c1f72d1a9545749595f994..59b09600dd326b8d0e24853d720d18d73837cd69 100644 (file)
@@ -43,6 +43,7 @@ config SPARC
        select OLD_SIGSUSPEND
        select ARCH_HAS_SG_CHAIN
        select CPU_NO_EFFICIENT_FFS
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config SPARC32
        def_bool !64BIT
index 57aca2792d29f89203735f892ceab4b73340bd69..341a5a133f4837b98f5c9928865ccf6d088af419 100644 (file)
@@ -248,22 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) to, n))
+       if (n && __access_ok((unsigned long) to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
-       else
+       else
                return n;
 }
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) from, n))
+       if (n && __access_ok((unsigned long) from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
-       else
+       else
                return n;
 }
 
index e9a51d64974ddff102017ee8f86be01f7a41361a..8bda94fab8e8cc52baca57952391cbb7665afd1f 100644 (file)
@@ -210,8 +210,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
-       unsigned long ret = ___copy_from_user(to, from, size);
+       unsigned long ret;
 
+       if (!__builtin_constant_p(size))
+               check_object_size(to, size, false);
+
+       ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
                ret = copy_from_user_fixup(to, from, size);
 
@@ -227,8 +231,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
-       unsigned long ret = ___copy_to_user(to, from, size);
+       unsigned long ret;
 
+       if (!__builtin_constant_p(size))
+               check_object_size(from, size, true);
+       ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
        return ret;
index e35632ef23c759a43e4673d222aac430172cdad7..62dfc644c908ab5ffb173b493d5b97b93e8f7ca6 100644 (file)
@@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
 }
 
 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
-               bool write, bool foreign)
+               bool write, bool execute, bool foreign)
 {
        /* by default, allow everything */
        return true;
index 5c6e7471b732335bf0b4272a4f274520d2bb4f4e..c580d8c33562ec5eba4dbfc273ae3ed7b6b67072 100644 (file)
@@ -80,6 +80,7 @@ config X86
        select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
        select HAVE_AOUT                        if X86_32
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +92,7 @@ config X86
        select HAVE_ARCH_SOFT_DIRTY             if X86_64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_WITHIN_STACK_FRAMES
        select HAVE_EBPF_JIT                    if X86_64
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CMPXCHG_DOUBLE
index fe91c25092da2662b67413a154e446843e7d5f7a..77f28ce9c6464e71a942f767082391502c8fa80d 100644 (file)
@@ -5,6 +5,8 @@
 OBJECT_FILES_NON_STANDARD_entry_$(BITS).o   := y
 OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
 
+CFLAGS_syscall_64.o            += -Wno-override-init
+CFLAGS_syscall_32.o            += -Wno-override-init
 obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
 obj-y                          += common.o
 
index b846875aeea6792ce4a0c359f7635112f27e84d5..d172c619c44931b34b01d34b1dd09379ba26a5b4 100644 (file)
@@ -288,11 +288,15 @@ return_from_SYSCALL_64:
        jne     opportunistic_sysret_failed
 
        /*
-        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
-        * restoring TF results in a trap from userspace immediately after
-        * SYSRET.  This would cause an infinite loop whenever #DB happens
-        * with register state that satisfies the opportunistic SYSRET
-        * conditions.  For example, single-stepping this user code:
+        * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
+        * restore RF properly. If the slowpath sets it for whatever reason, we
+        * need to restore it correctly.
+        *
+        * SYSRET can restore TF, but unlike IRET, restoring TF results in a
+        * trap from userspace immediately after SYSRET.  This would cause an
+        * infinite loop whenever #DB happens with register state that satisfies
+        * the opportunistic SYSRET conditions.  For example, single-stepping
+        * this user code:
         *
         *           movq       $stuck_here, %rcx
         *           pushfq
@@ -601,9 +605,20 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym)
 .endm
 #endif
 
+/* Make sure APIC interrupt handlers end up in the irqentry section: */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
+# define POP_SECTION_IRQENTRY  .popsection
+#else
+# define PUSH_SECTION_IRQENTRY
+# define POP_SECTION_IRQENTRY
+#endif
+
 .macro apicinterrupt num sym do_sym
+PUSH_SECTION_IRQENTRY
 apicinterrupt3 \num \sym \do_sym
 trace_apicinterrupt \num \sym
+POP_SECTION_IRQENTRY
 .endm
 
 #ifdef CONFIG_SMP
index 97a69dbba649b6bb7c6dd936440f572c5d02ed6f..9d35ec0cb8fc916ba3b4b63f5bdb1b6ebda5de55 100644 (file)
@@ -100,6 +100,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
        }
 }
 
+static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+               SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+}
+
 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
 {
        if (box->pmu->pmu_idx == 0)
@@ -127,6 +133,7 @@ static struct attribute_group snb_uncore_format_group = {
 
 static struct intel_uncore_ops snb_uncore_msr_ops = {
        .init_box       = snb_uncore_msr_init_box,
+       .enable_box     = snb_uncore_msr_enable_box,
        .exit_box       = snb_uncore_msr_exit_box,
        .disable_event  = snb_uncore_msr_disable_event,
        .enable_event   = snb_uncore_msr_enable_event,
@@ -192,6 +199,12 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
        }
 }
 
+static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+       wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+               SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
+}
+
 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
 {
        if (box->pmu->pmu_idx == 0)
@@ -200,6 +213,7 @@ static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
 
 static struct intel_uncore_ops skl_uncore_msr_ops = {
        .init_box       = skl_uncore_msr_init_box,
+       .enable_box     = skl_uncore_msr_enable_box,
        .exit_box       = skl_uncore_msr_exit_box,
        .disable_event  = snb_uncore_msr_disable_event,
        .enable_event   = snb_uncore_msr_enable_event,
index 824e54086e071456b170380c52e561dbbac62cf7..8aee83bcf71f2dc5a380009957d0858a7b4a2507 100644 (file)
@@ -2626,7 +2626,7 @@ void hswep_uncore_cpu_init(void)
 
 static struct intel_uncore_type hswep_uncore_ha = {
        .name           = "ha",
-       .num_counters   = 5,
+       .num_counters   = 4,
        .num_boxes      = 2,
        .perf_ctr_bits  = 48,
        SNBEP_UNCORE_PCI_COMMON_INIT(),
@@ -2645,7 +2645,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = {
 
 static struct intel_uncore_type hswep_uncore_imc = {
        .name           = "imc",
-       .num_counters   = 5,
+       .num_counters   = 4,
        .num_boxes      = 8,
        .perf_ctr_bits  = 48,
        .fixed_ctr_bits = 48,
@@ -2691,7 +2691,7 @@ static struct intel_uncore_type hswep_uncore_irp = {
 
 static struct intel_uncore_type hswep_uncore_qpi = {
        .name                   = "qpi",
-       .num_counters           = 5,
+       .num_counters           = 4,
        .num_boxes              = 3,
        .perf_ctr_bits          = 48,
        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
@@ -2773,7 +2773,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
 
 static struct intel_uncore_type hswep_uncore_r3qpi = {
        .name           = "r3qpi",
-       .num_counters   = 4,
+       .num_counters   = 3,
        .num_boxes      = 3,
        .perf_ctr_bits  = 44,
        .constraints    = hswep_uncore_r3qpi_constraints,
@@ -2972,7 +2972,7 @@ static struct intel_uncore_type bdx_uncore_ha = {
 
 static struct intel_uncore_type bdx_uncore_imc = {
        .name           = "imc",
-       .num_counters   = 5,
+       .num_counters   = 4,
        .num_boxes      = 8,
        .perf_ctr_bits  = 48,
        .fixed_ctr_bits = 48,
index f5befd4945f2a84d4123ce47d2c68d5bab1a0a88..124357773ffade4a78766e3cd021d928fa4f5866 100644 (file)
@@ -135,6 +135,7 @@ extern void init_apic_mappings(void);
 void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
+extern void lapic_update_tsc_freq(void);
 extern int APIC_init_uniprocessor(void);
 
 #ifdef CONFIG_X86_64
@@ -170,6 +171,7 @@ static inline void init_apic_mappings(void) { }
 static inline void disable_local_APIC(void) { }
 # define setup_boot_APIC_clock x86_init_noop
 # define setup_secondary_APIC_clock x86_init_noop
+static inline void lapic_update_tsc_freq(void) { }
 #endif /* !CONFIG_X86_LOCAL_APIC */
 
 #ifdef CONFIG_X86_X2APIC
index 7178043b0e1dd69d20a6ff5ddaa37ee6c32841f8..59405a248fc2488c66259e5669c1f857b13954aa 100644 (file)
@@ -22,10 +22,6 @@ typedef struct {
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
-       /*
-        * irq_tlb_count is double-counted in irq_call_count, so it must be
-        * subtracted from irq_call_count when displaying irq_call_count
-        */
        unsigned int irq_tlb_count;
 #endif
 #ifdef CONFIG_X86_THERMAL_VECTOR
index 223042086f4e9aa29498d3f159aaa2b757fa766a..737da62bfeb095e875912b18e5732b7ce04cb18e 100644 (file)
@@ -5,10 +5,10 @@ struct x86_mapping_info {
        void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
        void *context;                   /* context for alloc_pgt_page */
        unsigned long pmd_flag;          /* page flag for PMD entry */
-       bool kernel_mapping;             /* kernel mapping or ident mapping */
+       unsigned long offset;            /* ident mapping offset */
 };
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-                               unsigned long addr, unsigned long end);
+                               unsigned long pstart, unsigned long pend);
 
 #endif /* _ASM_X86_INIT_H */
index 7e8ec7ae10faff67a1c6fd26314ed53be3ed598f..1cc82ece9ac1819b92ec82aca72805c0e966af97 100644 (file)
@@ -145,7 +145,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
  *
  * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2|1|0| <- bit number
  * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
- * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
+ * | OFFSET (14->63) | TYPE (9-13)  |0|X|X|X| X| X|X|X|0| <- swp entry
  *
  * G (8) is aliased and used as a PROT_NONE indicator for
  * !present ptes.  We need to start storing swap entries above
@@ -156,7 +156,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
 #define SWP_TYPE_BITS 5
 /* Place the offset above the type: */
-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
 
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
 
index 9c6b890d5e7a0733ed7e92f1d3f1b1aa81bd349f..b2988c0ed82961c043b682d44b829c6db00a1e46 100644 (file)
@@ -58,7 +58,15 @@ extern unsigned char boot_gdt[];
 extern unsigned char secondary_startup_64[];
 #endif
 
+static inline size_t real_mode_size_needed(void)
+{
+       if (real_mode_header)
+               return 0;       /* already allocated. */
+
+       return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
+}
+
+void set_real_mode_mem(phys_addr_t mem, size_t size);
 void reserve_real_mode(void);
-void setup_real_mode(void);
 
 #endif /* _ARCH_X86_REALMODE_H */
index 84b59846154a92be76ebd630a79283c9d99b2a39..8b7c8d8e0852cf50d5a03e24f89ae9c693a1891c 100644 (file)
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ *              1 if within a frame
+ *             -1 if placed across a frame boundary (or outside stack)
+ *              0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+       const void *frame = NULL;
+       const void *oldframe;
+
+       oldframe = __builtin_frame_address(1);
+       if (oldframe)
+               frame = __builtin_frame_address(2);
+       /*
+        * low ----------------------------------------------> high
+        * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+        *                     ^----------------^
+        *               allow copies only within here
+        */
+       while (stack <= frame && frame < stackend) {
+               /*
+                * If obj + len extends past the last frame, this
+                * check won't pass and the next frame will be 0,
+                * causing us to bail out and correctly report
+                * the copy as invalid.
+                */
+               if (obj + len <= frame)
+                       return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+               oldframe = frame;
+               frame = *(const void * const *)frame;
+       }
+       return -1;
+#else
+       return 0;
+#endif
+}
+
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
index 4e5be94e079a6c64353bd327c9fe4ef9796e16b1..6fa85944af83d8ddbbad3a344a31a7920e64e6d0 100644 (file)
@@ -135,7 +135,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
 
 static inline void __native_flush_tlb(void)
 {
+       /*
+        * If current->mm == NULL then we borrow a mm which may change during a
+        * task switch and therefore we must not be preempted while we write CR3
+        * back:
+        */
+       preempt_disable();
        native_write_cr3(native_read_cr3());
+       preempt_enable();
 }
 
 static inline void __native_flush_tlb_global_irq_disabled(void)
index c03bfb68c50352df52d6ae4e36fa54bdc22dbc50..a0ae610b9280183fc1ca42ddd3fcc45333b433c5 100644 (file)
@@ -761,9 +761,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
         * case, and do only runtime checking for non-constant sizes.
         */
 
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(to, n, false);
                n = _copy_from_user(to, from, n);
-       else if(__builtin_constant_p(n))
+       } else if (__builtin_constant_p(n))
                copy_from_user_overflow();
        else
                __copy_from_user_overflow(sz, n);
@@ -781,9 +782,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        might_fault();
 
        /* See the comment in copy_from_user() above. */
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(from, n, true);
                n = _copy_to_user(to, from, n);
-       else if(__builtin_constant_p(n))
+       } else if (__builtin_constant_p(n))
                copy_to_user_overflow();
        else
                __copy_to_user_overflow(sz, n);
@@ -812,21 +814,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 #define user_access_begin()    __uaccess_begin()
 #define user_access_end()      __uaccess_end()
 
-#define unsafe_put_user(x, ptr)                                                \
-({                                                                             \
+#define unsafe_put_user(x, ptr, err_label)                                     \
+do {                                                                           \
        int __pu_err;                                                           \
        __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
-       __builtin_expect(__pu_err, 0);                                          \
-})
+       if (unlikely(__pu_err)) goto err_label;                                 \
+} while (0)
 
-#define unsafe_get_user(x, ptr)                                                \
-({                                                                             \
+#define unsafe_get_user(x, ptr, err_label)                                     \
+do {                                                                           \
        int __gu_err;                                                           \
        unsigned long __gu_val;                                                 \
        __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
-       __builtin_expect(__gu_err, 0);                                          \
-})
+       if (unlikely(__gu_err)) goto err_label;                                 \
+} while (0)
 
 #endif /* _ASM_X86_UACCESS_H */
 
index 4b32da24faaf1cb21fe3d4450a425c5193ee4b97..7d3bdd1ed6977b5e1f69dc8ba3e3d6cfa8f861a3 100644 (file)
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        return __copy_to_user_ll(to, from, n);
 }
 
@@ -95,6 +96,7 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_fault();
+       check_object_size(to, n, false);
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
index 2eac2aa3e37f30318f6e20ed1753ae29d472f251..673059a109fee067a6739704470f947fd7acae57 100644 (file)
@@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(dst, size, false);
        if (!__builtin_constant_p(size))
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
@@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(src, size, true);
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst, src, size);
        switch (size) {
index c852590254d5f4191609f92fce8ee488896efe8b..e652a7cc61863667fad8ff2baa0a7242485600df 100644 (file)
@@ -79,7 +79,7 @@ struct uv_gam_range_entry {
        u16     nasid;          /* HNasid */
        u16     sockid;         /* Socket ID, high bits of APIC ID */
        u16     pnode;          /* Index to MMR and GRU spaces */
-       u32     pxm;            /* ACPI proximity domain number */
+       u32     unused2;
        u32     limit;          /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
 };
 
@@ -88,7 +88,8 @@ struct uv_gam_range_entry {
 #define        UV_SYSTAB_VERSION_UV4           0x400   /* UV4 BIOS base version */
 #define        UV_SYSTAB_VERSION_UV4_1         0x401   /* + gpa_shift */
 #define        UV_SYSTAB_VERSION_UV4_2         0x402   /* + TYPE_NVRAM/WINDOW/MBOX */
-#define        UV_SYSTAB_VERSION_UV4_LATEST    UV_SYSTAB_VERSION_UV4_2
+#define        UV_SYSTAB_VERSION_UV4_3         0x403   /* - GAM Range PXM Value */
+#define        UV_SYSTAB_VERSION_UV4_LATEST    UV_SYSTAB_VERSION_UV4_3
 
 #define        UV_SYSTAB_TYPE_UNUSED           0       /* End of table (offset == 0) */
 #define        UV_SYSTAB_TYPE_GAM_PARAMS       1       /* GAM PARAM conversions */
index 20abd912f0e4a6bbaa364dd97b388d0dc3da136a..cea4fc19e8447d14fd6dcba5df94aa91f7a2332d 100644 (file)
@@ -313,7 +313,7 @@ int lapic_get_maxlvt(void)
 
 /* Clock divisor */
 #define APIC_DIVISOR 16
-#define TSC_DIVISOR  32
+#define TSC_DIVISOR  8
 
 /*
  * This function sets up the local APIC timer, with a timeout of
@@ -565,12 +565,36 @@ static void setup_APIC_timer(void)
                                    CLOCK_EVT_FEAT_DUMMY);
                levt->set_next_event = lapic_next_deadline;
                clockevents_config_and_register(levt,
-                                               (tsc_khz / TSC_DIVISOR) * 1000,
+                                               tsc_khz * (1000 / TSC_DIVISOR),
                                                0xF, ~0UL);
        } else
                clockevents_register_device(levt);
 }
 
+/*
+ * Install the updated TSC frequency from recalibration at the TSC
+ * deadline clockevent devices.
+ */
+static void __lapic_update_tsc_freq(void *info)
+{
+       struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
+
+       if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+               return;
+
+       clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
+}
+
+void lapic_update_tsc_freq(void)
+{
+       /*
+        * The clockevent device's ->mult and ->shift can both be
+        * changed. In order to avoid races, schedule the frequency
+        * update code on each CPU.
+        */
+       on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
+}
+
 /*
  * In this functions we calibrate APIC bus clocks to the external timer.
  *
index 6368fa69d2afa0eb44c5e90fe5293c4ceeafa93c..54f35d988025b6d867f4915d5d4aeecf9461f352 100644 (file)
@@ -155,7 +155,7 @@ static void init_x2apic_ldr(void)
 /*
  * At CPU state changes, update the x2apic cluster sibling info.
  */
-int x2apic_prepare_cpu(unsigned int cpu)
+static int x2apic_prepare_cpu(unsigned int cpu)
 {
        if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
                return -ENOMEM;
@@ -168,7 +168,7 @@ int x2apic_prepare_cpu(unsigned int cpu)
        return 0;
 }
 
-int x2apic_dead_cpu(unsigned int this_cpu)
+static int x2apic_dead_cpu(unsigned int this_cpu)
 {
        int cpu;
 
@@ -186,13 +186,18 @@ int x2apic_dead_cpu(unsigned int this_cpu)
 static int x2apic_cluster_probe(void)
 {
        int cpu = smp_processor_id();
+       int ret;
 
        if (!x2apic_mode)
                return 0;
 
+       ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+                               x2apic_prepare_cpu, x2apic_dead_cpu);
+       if (ret < 0) {
+               pr_err("Failed to register X2APIC_PREPARE\n");
+               return 0;
+       }
        cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
-       cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
-                         x2apic_prepare_cpu, x2apic_dead_cpu);
        return 1;
 }
 
index 09b59adaea3f5499c1c3da0473e79f8f40695037..cb0673c1e940a2fe0e92f04304972f83aad3bacb 100644 (file)
@@ -223,6 +223,11 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
        if (strncmp(oem_id, "SGI", 3) != 0)
                return 0;
 
+       if (numa_off) {
+               pr_err("UV: NUMA is off, disabling UV support\n");
+               return 0;
+       }
+
        /* Setup early hub type field in uv_hub_info for Node 0 */
        uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
 
@@ -325,7 +330,7 @@ static __init void build_uv_gr_table(void)
        struct uv_gam_range_entry *gre = uv_gre_table;
        struct uv_gam_range_s *grt;
        unsigned long last_limit = 0, ram_limit = 0;
-       int bytes, i, sid, lsid = -1;
+       int bytes, i, sid, lsid = -1, indx = 0, lindx = -1;
 
        if (!gre)
                return;
@@ -356,11 +361,12 @@ static __init void build_uv_gr_table(void)
                }
                sid = gre->sockid - _min_socket;
                if (lsid < sid) {               /* new range */
-                       grt = &_gr_table[sid];
-                       grt->base = lsid;
+                       grt = &_gr_table[indx];
+                       grt->base = lindx;
                        grt->nasid = gre->nasid;
                        grt->limit = last_limit = gre->limit;
                        lsid = sid;
+                       lindx = indx++;
                        continue;
                }
                if (lsid == sid && !ram_limit) {        /* update range */
@@ -371,7 +377,7 @@ static __init void build_uv_gr_table(void)
                }
                if (!ram_limit) {               /* non-contiguous ram range */
                        grt++;
-                       grt->base = sid - 1;
+                       grt->base = lindx;
                        grt->nasid = gre->nasid;
                        grt->limit = last_limit = gre->limit;
                        continue;
@@ -1155,19 +1161,18 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s %3s\n",
+                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n",
                                "Range", "", "Size", "Type", "NASID",
-                               "SID", "PN", "PXM");
+                               "SID", "PN");
                }
                pr_info(
-               "UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x %3d\n",
+               "UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
                        ((unsigned long)(gre->limit - lgre)) >>
                                (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
-                       gre->type, gre->nasid, gre->sockid,
-                       gre->pnode, gre->pxm);
+                       gre->type, gre->nasid, gre->sockid, gre->pnode);
 
                lgre = gre->limit;
                if (sock_min > gre->sockid)
@@ -1286,7 +1291,7 @@ static void __init build_socket_tables(void)
                _pnode_to_socket[i] = SOCK_EMPTY;
 
        /* fill in pnode/node/addr conversion list values */
-       pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n");
+       pr_info("UV: GAM Building socket/pnode conversion tables\n");
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
                if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
                        continue;
@@ -1294,20 +1299,18 @@ static void __init build_socket_tables(void)
                if (_socket_to_pnode[i] != SOCK_EMPTY)
                        continue;       /* duplicate */
                _socket_to_pnode[i] = gre->pnode;
-               _socket_to_node[i] = gre->pxm;
 
                i = gre->pnode - minpnode;
                _pnode_to_socket[i] = gre->sockid;
 
                pr_info(
-               "UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n",
+               "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
                        gre->sockid, gre->type, gre->nasid,
                        _socket_to_pnode[gre->sockid - minsock],
-                       _socket_to_node[gre->sockid - minsock],
                        _pnode_to_socket[gre->pnode - minpnode]);
        }
 
-       /* check socket -> node values */
+       /* Set socket -> node values */
        lnid = -1;
        for_each_present_cpu(cpu) {
                int nid = cpu_to_node(cpu);
@@ -1318,14 +1321,9 @@ static void __init build_socket_tables(void)
                lnid = nid;
                apicid = per_cpu(x86_cpu_to_apicid, cpu);
                sockid = apicid >> uv_cpuid.socketid_shift;
-               i = sockid - minsock;
-
-               if (nid != _socket_to_node[i]) {
-                       pr_warn(
-                       "UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n",
-                               i, sockid, gre->type, _socket_to_node[i], nid);
-                       _socket_to_node[i] = nid;
-               }
+               _socket_to_node[sockid - minsock] = nid;
+               pr_info("UV: sid:%02x: apicid:%04x node:%2d\n",
+                       sockid, apicid, nid);
        }
 
        /* Setup physical blade to pnode translation from GAM Range Table */
index 680049aa4593ca773d9860a2b8af77eab3839f31..01567aa87503f021cfb3b4104520842b1ec87179 100644 (file)
@@ -866,105 +866,17 @@ const void *get_xsave_field_ptr(int xsave_state)
        return get_xsave_addr(&fpu->state.xsave, xsave_state);
 }
 
-
-/*
- * Set xfeatures (aka XSTATE_BV) bit for a feature that we want
- * to take out of its "init state".  This will ensure that an
- * XRSTOR actually restores the state.
- */
-static void fpu__xfeature_set_non_init(struct xregs_state *xsave,
-               int xstate_feature_mask)
-{
-       xsave->header.xfeatures |= xstate_feature_mask;
-}
-
-/*
- * This function is safe to call whether the FPU is in use or not.
- *
- * Note that this only works on the current task.
- *
- * Inputs:
- *     @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
- *     XFEATURE_MASK_SSE, etc...)
- *     @xsave_state_ptr: a pointer to a copy of the state that you would
- *     like written in to the current task's FPU xsave state.  This pointer
- *     must not be located in the current tasks's xsave area.
- * Output:
- *     address of the state in the xsave area or NULL if the state
- *     is not present or is in its 'init state'.
- */
-static void fpu__xfeature_set_state(int xstate_feature_mask,
-               void *xstate_feature_src, size_t len)
-{
-       struct xregs_state *xsave = &current->thread.fpu.state.xsave;
-       struct fpu *fpu = &current->thread.fpu;
-       void *dst;
-
-       if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
-               WARN_ONCE(1, "%s() attempted with no xsave support", __func__);
-               return;
-       }
-
-       /*
-        * Tell the FPU code that we need the FPU state to be in
-        * 'fpu' (not in the registers), and that we need it to
-        * be stable while we write to it.
-        */
-       fpu__current_fpstate_write_begin();
-
-       /*
-        * This method *WILL* *NOT* work for compact-format
-        * buffers.  If the 'xstate_feature_mask' is unset in
-        * xcomp_bv then we may need to move other feature state
-        * "up" in the buffer.
-        */
-       if (xsave->header.xcomp_bv & xstate_feature_mask) {
-               WARN_ON_ONCE(1);
-               goto out;
-       }
-
-       /* find the location in the xsave buffer of the desired state */
-       dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask);
-
-       /*
-        * Make sure that the pointer being passed in did not
-        * come from the xsave buffer itself.
-        */
-       WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself");
-
-       /* put the caller-provided data in the location */
-       memcpy(dst, xstate_feature_src, len);
-
-       /*
-        * Mark the xfeature so that the CPU knows there is state
-        * in the buffer now.
-        */
-       fpu__xfeature_set_non_init(xsave, xstate_feature_mask);
-out:
-       /*
-        * We are done writing to the 'fpu'.  Reenable preeption
-        * and (possibly) move the fpstate back in to the fpregs.
-        */
-       fpu__current_fpstate_write_end();
-}
-
 #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
 #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
 
 /*
- * This will go out and modify the XSAVE buffer so that PKRU is
- * set to a particular state for access to 'pkey'.
- *
- * PKRU state does affect kernel access to user memory.  We do
- * not modfiy PKRU *itself* here, only the XSAVE state that will
- * be restored in to PKRU when we return back to userspace.
+ * This will go out and modify PKRU register to set the access
+ * rights for @pkey to @init_val.
  */
 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
                unsigned long init_val)
 {
-       struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
-       struct pkru_state *old_pkru_state;
-       struct pkru_state new_pkru_state;
+       u32 old_pkru;
        int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
        u32 new_pkru_bits = 0;
 
@@ -974,6 +886,15 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
         */
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
                return -EINVAL;
+       /*
+        * For most XSAVE components, this would be an arduous task:
+        * brining fpstate up to date with fpregs, updating fpstate,
+        * then re-populating fpregs.  But, for components that are
+        * never lazily managed, we can just access the fpregs
+        * directly.  PKRU is never managed lazily, so we can just
+        * manipulate it directly.  Make sure it stays that way.
+        */
+       WARN_ON_ONCE(!use_eager_fpu());
 
        /* Set the bits we need in PKRU:  */
        if (init_val & PKEY_DISABLE_ACCESS)
@@ -984,37 +905,12 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
        /* Shift the bits in to the correct place in PKRU for pkey: */
        new_pkru_bits <<= pkey_shift;
 
-       /* Locate old copy of the state in the xsave buffer: */
-       old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU);
-
-       /*
-        * When state is not in the buffer, it is in the init
-        * state, set it manually.  Otherwise, copy out the old
-        * state.
-        */
-       if (!old_pkru_state)
-               new_pkru_state.pkru = 0;
-       else
-               new_pkru_state.pkru = old_pkru_state->pkru;
-
-       /* Mask off any old bits in place: */
-       new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
-
-       /* Set the newly-requested bits: */
-       new_pkru_state.pkru |= new_pkru_bits;
-
-       /*
-        * We could theoretically live without zeroing pkru.pad.
-        * The current XSAVE feature state definition says that
-        * only bytes 0->3 are used.  But we do not want to
-        * chance leaking kernel stack out to userspace in case a
-        * memcpy() of the whole xsave buffer was done.
-        *
-        * They're in the same cacheline anyway.
-        */
-       new_pkru_state.pad = 0;
+       /* Get old PKRU and mask off any old bits in place: */
+       old_pkru = read_pkru();
+       old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
 
-       fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state));
+       /* Write old part along with new part: */
+       write_pkru(old_pkru | new_pkru_bits);
 
        return 0;
 }
index 2dda0bc4576ebf7a6940056e621d0bdb4850aa79..f16c55bfc0907bc4a3b1f35174ca5f5cbc53ce99 100644 (file)
@@ -25,8 +25,6 @@ static void __init i386_default_early_setup(void)
        /* Initialize 32bit specific setup functions */
        x86_init.resources.reserve_resources = i386_reserve_resources;
        x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
-
-       reserve_bios_regions();
 }
 
 asmlinkage __visible void __init i386_start_kernel(void)
index 99d48e7d2974c64fe7b3fe9c1dd59e7f3b189cbb..54a2372f5dbb1eb0598788e944ad28708b638671 100644 (file)
@@ -183,7 +183,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
                copy_bootdata(__va(real_mode_data));
 
        x86_early_init_platform_quirks();
-       reserve_bios_regions();
 
        switch (boot_params.hdr.hardware_subarch) {
        case X86_SUBARCH_INTEL_MID:
index ed16e58658a4201184fd0b1d8c470f5eb3e32c1e..c6dfd801df973039e8c00f1d48ad4058713c914d 100644 (file)
@@ -1242,7 +1242,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
        memset(&curr_time, 0, sizeof(struct rtc_time));
 
        if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
-               mc146818_set_time(&curr_time);
+               mc146818_get_time(&curr_time);
 
        if (hpet_rtc_flags & RTC_UIE &&
            curr_time.tm_sec != hpet_prev_update_sec) {
index 61521dc19c102114e177cbc21a4f5da9d94c20cd..9f669fdd20106cbb53b404e7085d31b4244bd7c7 100644 (file)
@@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_puts(p, "  Rescheduling interrupts\n");
        seq_printf(p, "%*s: ", prec, "CAL");
        for_each_online_cpu(j)
-               seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
-                                       irq_stats(j)->irq_tlb_count);
+               seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
        seq_puts(p, "  Function call interrupts\n");
        seq_printf(p, "%*s: ", prec, "TLB");
        for_each_online_cpu(j)
index 991b77986d57529bc3102506b398b37217e66287..0fa60f5f5a1641dc8eb2c82968dbadd650ed6e37 100644 (file)
@@ -936,8 +936,6 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
-       kernel_randomize_memory();
-
        iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
        setup_memory_map();
        parse_setup_data();
@@ -1055,6 +1053,12 @@ void __init setup_arch(char **cmdline_p)
 
        max_possible_pfn = max_pfn;
 
+       /*
+        * Define random base addresses for memory sections after max_pfn is
+        * defined and before each memory section base is used.
+        */
+       kernel_randomize_memory();
+
 #ifdef CONFIG_X86_32
        /* max_low_pfn get updated here */
        find_low_pfn_range();
@@ -1097,6 +1101,8 @@ void __init setup_arch(char **cmdline_p)
                efi_find_mirror();
        }
 
+       reserve_bios_regions();
+
        /*
         * The EFI specification says that boot service code won't be called
         * after ExitBootServices(). This is, in fact, a lie.
@@ -1125,7 +1131,15 @@ void __init setup_arch(char **cmdline_p)
 
        early_trap_pf_init();
 
-       setup_real_mode();
+       /*
+        * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
+        * with the current CR4 value.  This may not be necessary, but
+        * auditing all the early-boot CR4 manipulation would be needed to
+        * rule it out.
+        */
+       if (boot_cpu_data.cpuid_level >= 0)
+               /* A CPU has %cr4 if and only if it has CPUID. */
+               mmu_cr4_features = __read_cr4();
 
        memblock_set_current_limit(get_max_mapped());
 
@@ -1174,13 +1188,6 @@ void __init setup_arch(char **cmdline_p)
 
        kasan_init();
 
-       if (boot_cpu_data.cpuid_level >= 0) {
-               /* A CPU has %cr4 if and only if it has CPUID */
-               mmu_cr4_features = __read_cr4();
-               if (trampoline_cr4_features)
-                       *trampoline_cr4_features = mmu_cr4_features;
-       }
-
 #ifdef CONFIG_X86_32
        /* sync back kernel address range */
        clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
index 1ef87e887051e950071aa746f68ca9736f3c7e4a..78b9cb5a26af31559625f3a4e40e62fdc7d53512 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/nmi.h>
 #include <asm/x86_init.h>
 #include <asm/geode.h>
+#include <asm/apic.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1249,6 +1250,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
                (unsigned long)tsc_khz / 1000,
                (unsigned long)tsc_khz % 1000);
 
+       /* Inform the TSC deadline clockevent devices about the recalibration */
+       lapic_update_tsc_freq();
+
 out:
        if (boot_cpu_has(X86_FEATURE_ART))
                art_related_clocksource = &clocksource_tsc;
index 6c1ff31d99ffeb0d0a28c5ee472bb1865ff23df3..495c776de4b470f8eb53236a0ddeb2ca8f043b6b 100644 (file)
@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
                *cursor &= 0xfe;
        }
        /*
-        * Similar treatment for VEX3 prefix.
-        * TODO: add XOP/EVEX treatment when insn decoder supports them
+        * Similar treatment for VEX3/EVEX prefix.
+        * TODO: add XOP treatment when insn decoder supports them
         */
-       if (insn->vex_prefix.nbytes == 3) {
+       if (insn->vex_prefix.nbytes >= 3) {
                /*
                 * vex2:     c5    rvvvvLpp   (has no b bit)
                 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
                 * evex:     62    rxbR00mm wvvvv1pp zllBVaaa
-                *   (evex will need setting of both b and x since
-                *   in non-sib encoding evex.x is 4th bit of MODRM.rm)
-                * Setting VEX3.b (setting because it has inverted meaning):
+                * Setting VEX3.b (setting because it has inverted meaning).
+                * Setting EVEX.x since (in non-SIB encoding) EVEX.x
+                * is the 4th bit of MODRM.rm, and needs the same treatment.
+                * For VEX3-encoded insns, VEX3.x value has no effect in
+                * non-SIB encoding, the change is superfluous but harmless.
                 */
                cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
-               *cursor |= 0x20;
+               *cursor |= 0x60;
        }
 
        /*
@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
 
        reg = MODRM_REG(insn);  /* Fetch modrm.reg */
        reg2 = 0xff;            /* Fetch vex.vvvv */
-       if (insn->vex_prefix.nbytes == 2)
-               reg2 = insn->vex_prefix.bytes[1];
-       else if (insn->vex_prefix.nbytes == 3)
+       if (insn->vex_prefix.nbytes)
                reg2 = insn->vex_prefix.bytes[2];
        /*
-        * TODO: add XOP, EXEV vvvv reading.
+        * TODO: add XOP vvvv reading.
         *
         * vex.vvvv field is in bits 6-3, bits are inverted.
         * But in 32-bit mode, high-order bit may be ignored.
index 02de3d74d2c5bb319d48503371f451672d66dc80..8a602a1e404a262f32fbe708e926e986636dcfca 100644 (file)
@@ -35,6 +35,7 @@ ENDPROC(__sw_hweight32)
 
 ENTRY(__sw_hweight64)
 #ifdef CONFIG_X86_64
+       pushq   %rdi
        pushq   %rdx
 
        movq    %rdi, %rdx                      # w -> t
@@ -60,6 +61,7 @@ ENTRY(__sw_hweight64)
        shrq    $56, %rax                       # w = w_tmp >> 56
 
        popq    %rdx
+       popq    %rdi
        ret
 #else /* CONFIG_X86_32 */
        /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
index f7dfeda83e5c444c85ec0527e2dba9fa86dc207a..121f59c6ee54e0159aab44889274f3bdf0b1041a 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/cpufeature.h>
 #include <asm/setup.h>
 
-#define debug_putstr(v) early_printk(v)
+#define debug_putstr(v) early_printk("%s", v)
 #define has_cpuflag(f) boot_cpu_has(f)
 #define get_boot_seed() kaslr_offset()
 #endif
index ec21796ac5fd5a95f5e386af57a67d2e4ba51f61..4473cb4f8b906dcae083a4b29c38b72d9b7d56d3 100644 (file)
@@ -3,15 +3,17 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
                           unsigned long addr, unsigned long end)
 {
        addr &= PMD_MASK;
        for (; addr < end; addr += PMD_SIZE) {
                pmd_t *pmd = pmd_page + pmd_index(addr);
 
-               if (!pmd_present(*pmd))
-                       set_pmd(pmd, __pmd(addr | pmd_flag));
+               if (pmd_present(*pmd))
+                       continue;
+
+               set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
        }
 }
 
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 
                if (pud_present(*pud)) {
                        pmd = pmd_offset(pud, 0);
-                       ident_pmd_init(info->pmd_flag, pmd, addr, next);
+                       ident_pmd_init(info, pmd, addr, next);
                        continue;
                }
                pmd = (pmd_t *)info->alloc_pgt_page(info->context);
                if (!pmd)
                        return -ENOMEM;
-               ident_pmd_init(info->pmd_flag, pmd, addr, next);
+               ident_pmd_init(info, pmd, addr, next);
                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
        }
 
@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 }
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-                             unsigned long addr, unsigned long end)
+                             unsigned long pstart, unsigned long pend)
 {
+       unsigned long addr = pstart + info->offset;
+       unsigned long end = pend + info->offset;
        unsigned long next;
        int result;
-       int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 
        for (; addr < end; addr = next) {
-               pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+               pgd_t *pgd = pgd_page + pgd_index(addr);
                pud_t *pud;
 
                next = (addr & PGDIR_MASK) + PGDIR_SIZE;
index 620928903be3eead494b3f3e290f0bdb2d8325ab..d28a2d741f9e9e74e989272101b4a50e1ffa3050 100644 (file)
@@ -122,8 +122,18 @@ __ref void *alloc_low_pages(unsigned int num)
        return __va(pfn << PAGE_SHIFT);
 }
 
-/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE      (6 * PAGE_SIZE)
+/*
+ * By default need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS.
+ * With KASLR memory randomization, depending on the machine e820 memory
+ * and the PUD alignment. We may need twice more pages when KASLR memory
+ * randomization is enabled.
+ */
+#ifndef CONFIG_RANDOMIZE_MEMORY
+#define INIT_PGD_PAGE_COUNT      6
+#else
+#define INIT_PGD_PAGE_COUNT      12
+#endif
+#define INIT_PGT_BUF_SIZE      (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
 void  __init early_alloc_pgt_buf(void)
 {
index 26dccd6c0df189a5b3a8df554af50801df8e3afc..ec8654f117d8e5152f359c66a23740f9d1a85160 100644 (file)
@@ -97,7 +97,7 @@ void __init kernel_randomize_memory(void)
         * add padding if needed (especially for memory hotplug support).
         */
        BUG_ON(kaslr_regions[0].base != &page_offset_base);
-       memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) +
+       memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
                CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
 
        /* Adapt phyiscal memory region size based on available memory */
index 4480c06cade78d663f18db8ef09ff3db723dc1fe..89d1146f5a6f76424a0045266862eee566c2b930 100644 (file)
@@ -254,6 +254,7 @@ void __init efi_free_boot_services(void)
        for_each_efi_memory_desc(md) {
                unsigned long long start = md->phys_addr;
                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+               size_t rm_size;
 
                if (md->type != EFI_BOOT_SERVICES_CODE &&
                    md->type != EFI_BOOT_SERVICES_DATA)
@@ -263,6 +264,26 @@ void __init efi_free_boot_services(void)
                if (md->attribute & EFI_MEMORY_RUNTIME)
                        continue;
 
+               /*
+                * Nasty quirk: if all sub-1MB memory is used for boot
+                * services, we can get here without having allocated the
+                * real mode trampoline.  It's too late to hand boot services
+                * memory back to the memblock allocator, so instead
+                * try to manually allocate the trampoline if needed.
+                *
+                * I've seen this on a Dell XPS 13 9350 with firmware
+                * 1.4.4 with SGX enabled booting Linux via Fedora 24's
+                * grub2-efi on a hard disk.  (And no, I don't know why
+                * this happened, but Linux should still try to boot rather
+                * panicing early.)
+                */
+               rm_size = real_mode_size_needed();
+               if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
+                       set_real_mode_mem(start, rm_size);
+                       start += rm_size;
+                       size -= rm_size;
+               }
+
                free_bootmem_late(start, size);
        }
 
index 66b2166ea4a1c715a0362ed99cbeb3692a031476..23f2f3e41c7f48a60d3bd35fef9ef7025ca3e389 100644 (file)
@@ -187,7 +187,8 @@ EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
 void uv_bios_init(void)
 {
        uv_systab = NULL;
-       if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) {
+       if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
+           !efi.uv_systab || efi_runtime_disabled()) {
                pr_crit("UV: UVsystab: missing\n");
                return;
        }
@@ -199,12 +200,14 @@ void uv_bios_init(void)
                return;
        }
 
+       /* Starting with UV4 the UV systab size is variable */
        if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
+               int size = uv_systab->size;
+
                iounmap(uv_systab);
-               uv_systab = ioremap(efi.uv_systab, uv_systab->size);
+               uv_systab = ioremap(efi.uv_systab, size);
                if (!uv_systab) {
-                       pr_err("UV: UVsystab: ioremap(%d) failed!\n",
-                               uv_systab->size);
+                       pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
                        return;
                }
        }
index f0b5f2d402afb15f639be87f9581c44c7298bdef..a3e3ccc87138a7d44e6fe12114403b25a2d4affa 100644 (file)
@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
        struct x86_mapping_info info = {
                .alloc_pgt_page = alloc_pgt_page,
                .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
-               .kernel_mapping = true,
+               .offset         = __PAGE_OFFSET,
        };
        unsigned long mstart, mend;
        pgd_t *pgd;
index 705e3fffb4a1a3296ac5745603681cc434c44d92..5db706f14111c7c12a55fc0731a86641fe2c4741 100644 (file)
@@ -1,9 +1,11 @@
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/memblock.h>
 
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/realmode.h>
+#include <asm/tlbflush.h>
 
 struct real_mode_header *real_mode_header;
 u32 *trampoline_cr4_features;
@@ -11,25 +13,37 @@ u32 *trampoline_cr4_features;
 /* Hold the pgd entry used on booting additional CPUs */
 pgd_t trampoline_pgd_entry;
 
+void __init set_real_mode_mem(phys_addr_t mem, size_t size)
+{
+       void *base = __va(mem);
+
+       real_mode_header = (struct real_mode_header *) base;
+       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+              base, (unsigned long long)mem, size);
+}
+
 void __init reserve_real_mode(void)
 {
        phys_addr_t mem;
-       unsigned char *base;
-       size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+       size_t size = real_mode_size_needed();
+
+       if (!size)
+               return;
+
+       WARN_ON(slab_is_available());
 
        /* Has to be under 1M so we can execute real-mode AP code. */
        mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-       if (!mem)
-               panic("Cannot allocate trampoline\n");
+       if (!mem) {
+               pr_info("No sub-1M memory is available for the trampoline\n");
+               return;
+       }
 
-       base = __va(mem);
        memblock_reserve(mem, size);
-       real_mode_header = (struct real_mode_header *) base;
-       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-              base, (unsigned long long)mem, size);
+       set_real_mode_mem(mem, size);
 }
 
-void __init setup_real_mode(void)
+static void __init setup_real_mode(void)
 {
        u16 real_mode_seg;
        const u32 *rel;
@@ -84,7 +98,7 @@ void __init setup_real_mode(void)
 
        trampoline_header->start = (u64) secondary_startup_64;
        trampoline_cr4_features = &trampoline_header->cr4;
-       *trampoline_cr4_features = __read_cr4();
+       *trampoline_cr4_features = mmu_cr4_features;
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
@@ -100,7 +114,7 @@ void __init setup_real_mode(void)
  * need to mark it executable at do_pre_smp_initcalls() at least,
  * thus run it as a early_initcall().
  */
-static int __init set_real_mode_permissions(void)
+static void __init set_real_mode_permissions(void)
 {
        unsigned char *base = (unsigned char *) real_mode_header;
        size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
@@ -119,7 +133,16 @@ static int __init set_real_mode_permissions(void)
        set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
        set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
        set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+}
+
+static int __init init_real_mode(void)
+{
+       if (!real_mode_header)
+               panic("Real mode trampoline was not allocated");
+
+       setup_real_mode();
+       set_real_mode_permissions();
 
        return 0;
 }
-early_initcall(set_real_mode_permissions);
+early_initcall(init_real_mode);
index 8c234dd9b8bc595c21d9d5abaa7327e02b9cc572..80cc7c089a15e908ae070d95ad4879e5b6309555 100644 (file)
@@ -1527,11 +1527,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
 {
        struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
        u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+       const u32 STATUS_MASK = 0x80000037;
 
        if (mmio->num_lines)
                offset = to_interleave_offset(offset, mmio);
 
-       return readl(mmio->addr.base + offset);
+       return readl(mmio->addr.base + offset) & STATUS_MASK;
 }
 
 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
index 1a04af6d24212cdd16e5c8091d01f16e2b409384..6c6519f6492a4198c78cae1eaad5e33e03efd2d9 100644 (file)
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev)
        bool need_put = !!rbd_dev->opts;
 
        ceph_oid_destroy(&rbd_dev->header_oid);
+       ceph_oloc_destroy(&rbd_dev->header_oloc);
 
        rbd_put_client(rbd_dev->rbd_client);
        rbd_spec_put(rbd_dev->spec);
@@ -5336,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        }
        spec->pool_id = (u64)rc;
 
-       /* The ceph file layout needs to fit pool id in 32 bits */
-
-       if (spec->pool_id > (u64)U32_MAX) {
-               rbd_warn(NULL, "pool id too large (%llu > %u)",
-                               (unsigned long long)spec->pool_id, U32_MAX);
-               rc = -EIO;
-               goto err_out_client;
-       }
-
        rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
        if (!rbd_dev) {
                rc = -ENOMEM;
index 1523e05c46fc95b29c47af3b51ebdc9f93af9029..93b1aaa5ba3be26d5de4d0a7b461ecc2fe7beb61 100644 (file)
@@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk)
                num_vqs = 1;
 
        vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
-       if (!vblk->vqs) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!vblk->vqs)
+               return -ENOMEM;
 
        names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
-       if (!names)
-               goto err_names;
-
        callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
-       if (!callbacks)
-               goto err_callbacks;
-
        vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
-       if (!vqs)
-               goto err_vqs;
+       if (!names || !callbacks || !vqs) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        for (i = 0; i < num_vqs; i++) {
                callbacks[i] = virtblk_done;
@@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk)
        /* Discover virtqueues and write information to configuration.  */
        err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
        if (err)
-               goto err_find_vqs;
+               goto out;
 
        for (i = 0; i < num_vqs; i++) {
                spin_lock_init(&vblk->vqs[i].lock);
@@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk)
        }
        vblk->num_vqs = num_vqs;
 
- err_find_vqs:
+out:
        kfree(vqs);
- err_vqs:
        kfree(callbacks);
- err_callbacks:
        kfree(names);
- err_names:
        if (err)
                kfree(vblk->vqs);
- out:
        return err;
 }
 
index 28bce3f4f81d6aa56840657169c0842461f9f515..57700541f95129e6f8f194ede23afeeda35842da 100644 (file)
@@ -8,6 +8,9 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#define pr_fmt(fmt)    "arm_arch_timer: " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
@@ -370,16 +373,33 @@ static bool arch_timer_has_nonsecure_ppi(void)
                arch_timer_ppi[PHYS_NONSECURE_PPI]);
 }
 
+static u32 check_ppi_trigger(int irq)
+{
+       u32 flags = irq_get_trigger_type(irq);
+
+       if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
+               pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
+               pr_warn("WARNING: Please fix your firmware\n");
+               flags = IRQF_TRIGGER_LOW;
+       }
+
+       return flags;
+}
+
 static int arch_timer_starting_cpu(unsigned int cpu)
 {
        struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+       u32 flags;
 
        __arch_timer_setup(ARCH_CP15_TIMER, clk);
 
-       enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+       flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
+       enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
 
-       if (arch_timer_has_nonsecure_ppi())
-               enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+       if (arch_timer_has_nonsecure_ppi()) {
+               flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+               enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
+       }
 
        arch_counter_set_user_access();
        if (evtstrm_enable)
index 87796e0864e945e41725be06106234ca8a32a133..d3ffde8066298ff48d64c990eefe1dec0bf34cb6 100644 (file)
@@ -145,11 +145,30 @@ static struct powernv_pstate_info {
 /* Use following macros for conversions between pstate_id and index */
 static inline int idx_to_pstate(unsigned int i)
 {
+       if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
+               pr_warn_once("index %u is out of bound\n", i);
+               return powernv_freqs[powernv_pstate_info.nominal].driver_data;
+       }
+
        return powernv_freqs[i].driver_data;
 }
 
 static inline unsigned int pstate_to_idx(int pstate)
 {
+       int min = powernv_freqs[powernv_pstate_info.min].driver_data;
+       int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+
+       if (min > 0) {
+               if (unlikely((pstate < max) || (pstate > min))) {
+                       pr_warn_once("pstate %d is out of bound\n", pstate);
+                       return powernv_pstate_info.nominal;
+               }
+       } else {
+               if (unlikely((pstate > max) || (pstate < min))) {
+                       pr_warn_once("pstate %d is out of bound\n", pstate);
+                       return powernv_pstate_info.nominal;
+               }
+       }
        /*
         * abs() is deliberately used so that is works with
         * both monotonically increasing and decreasing
@@ -593,7 +612,7 @@ void gpstate_timer_handler(unsigned long data)
        } else {
                gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
                                                 gpstates->highest_lpstate_idx,
-                                                freq_data.pstate_id);
+                                                gpstates->last_lpstate_idx);
        }
 
        /*
index c99c24bc79b02262298ea64b5ed3e6e625429f4d..9ae6c116c4746286770052fb6f241baddc08d5b0 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/efi.h>
+#include <linux/vmalloc.h>
 
 #define NO_FURTHER_WRITE_ACTION -1
 
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
        int ret;
        void *cap_hdr_temp;
 
-       cap_hdr_temp = kmap(cap_info->pages[0]);
+       cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
+                       VM_MAP, PAGE_KERNEL);
        if (!cap_hdr_temp) {
-               pr_debug("%s: kmap() failed\n", __func__);
+               pr_debug("%s: vmap() failed\n", __func__);
                return -EFAULT;
        }
 
        ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
-       kunmap(cap_info->pages[0]);
+       vunmap(cap_hdr_temp);
        if (ret) {
                pr_err("%s: efi_capsule_update() failed\n", __func__);
                return ret;
index 53b9fd2293ee8f5af6f7f6a38de61730c8c99ec9..6eedff45e6d77811a5c4922e6be6fa5cabb3b307 100644 (file)
@@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule,
  * map the capsule described by @capsule with its data in @pages and
  * send it to the firmware via the UpdateCapsule() runtime service.
  *
- * @capsule must be a virtual mapping of the first page in @pages
- * (@pages[0]) in the kernel address space. That is, a
- * capsule_header_t that describes the entire contents of the capsule
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * capsule_header_t that describes the entire contents of the capsule
  * must be at the start of the first data page.
  *
  * Even though this function will validate that the firmware supports
index 98dd47a30fc754af1aede05d31dc2c6179d6d2b1..0e41c45a2936b897ab1a238d79997f4632a629c4 100644 (file)
@@ -10,27 +10,6 @@ config ARCH_HAVE_CUSTOM_GPIO_H
          overriding the default implementations.  New uses of this are
          strongly discouraged.
 
-config ARCH_WANT_OPTIONAL_GPIOLIB
-       bool
-       help
-         Select this config option from the architecture Kconfig, if
-         it is possible to use gpiolib on the architecture, but let the
-         user decide whether to actually build it or not.
-         Select this instead of ARCH_REQUIRE_GPIOLIB, if your architecture does
-         not depend on GPIOs being available, but rather let the user
-         decide whether he needs it or not.
-
-config ARCH_REQUIRE_GPIOLIB
-       bool
-       select GPIOLIB
-       help
-         Platforms select gpiolib if they use this infrastructure
-         for all their GPIOs, usually starting with ones integrated
-         into SOC processors.
-         Selecting this from the architecture code will cause the gpiolib
-         code to always get built in.
-
-
 menuconfig GPIOLIB
        bool "GPIO Support"
        select ANON_INODES
@@ -127,6 +106,13 @@ config GPIO_AMDPT
          driver for GPIO functionality on Promontory IOHub
          Require ACPI ASL code to enumerate as a platform device.
 
+config GPIO_ASPEED
+       tristate "Aspeed GPIO support"
+       depends on (ARCH_ASPEED || COMPILE_TEST) && OF_GPIO
+       select GPIOLIB_IRQCHIP
+       help
+         Say Y here to support Aspeed AST2400 and AST2500 GPIO controllers.
+
 config GPIO_ATH79
        tristate "Atheros AR71XX/AR724X/AR913X GPIO support"
        default y if ATH79
@@ -137,6 +123,12 @@ config GPIO_ATH79
          Select this option to enable GPIO driver for
          Atheros AR71XX/AR724X/AR913X SoC devices.
 
+config GPIO_AXP209
+       tristate "X-Powers AXP209 PMIC GPIO Support"
+       depends on MFD_AXP20X
+       help
+         Say yes to enable GPIO support for the AXP209 PMIC
+
 config GPIO_BCM_KONA
        bool "Broadcom Kona GPIO"
        depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -236,7 +228,8 @@ config GPIO_ICH
 
 config GPIO_IOP
        tristate "Intel IOP GPIO"
-       depends on ARM && (ARCH_IOP32X || ARCH_IOP33X)
+       depends on ARCH_IOP32X || ARCH_IOP33X || COMPILE_TEST
+       select GPIO_GENERIC
        help
          Say yes here to support the GPIO functionality of a number of Intel
          IOP32X or IOP33X.
@@ -573,6 +566,19 @@ config GPIO_F7188X
          To compile this driver as a module, choose M here: the module will
          be called f7188x-gpio.
 
+config GPIO_GPIO_MM
+       tristate "Diamond Systems GPIO-MM GPIO support"
+       depends on ISA_BUS_API
+       help
+         Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
+
+         The Diamond Systems GPIO-MM device features 48 lines of digital I/O
+         via the emulation of dual 82C55A PPI chips. This driver provides GPIO
+         support for these 48 channels of digital I/O.
+
+         The base port addresses for the devices may be configured via the base
+         array module parameter.
+
 config GPIO_IT87
        tristate "IT87xx GPIO support"
        help
@@ -779,6 +785,13 @@ config GPIO_TPIC2810
          To compile this driver as a module, choose M here: the module will
          be called gpio-tpic2810.
 
+config GPIO_TS4900
+       tristate "Technologic Systems FPGA I2C GPIO"
+       select REGMAP_I2C
+       help
+         Say yes here to enabled the GPIO driver for Technologic's FPGA core.
+         Series supported include TS-4100, TS-4900, TS-7970 and TS-7990.
+
 endmenu
 
 menu "MFD GPIO expanders"
@@ -874,6 +887,16 @@ config GPIO_LP3943
          LP3943 can be used as a GPIO expander which provides up to 16 GPIOs.
          Open drain outputs are required for this usage.
 
+config GPIO_LP873X
+       tristate "TI LP873X GPO"
+       depends on MFD_TI_LP873X
+       help
+         This driver supports the GPO on TI Lp873x PMICs. 2 GPOs are present
+         on LP873X PMICs.
+
+         This driver can also be built as a module. If so, the module will be
+          called gpio-lp873x.
+
 config GPIO_MAX77620
        tristate "GPIO support for PMIC MAX77620 and MAX20024"
        depends on MFD_MAX77620
@@ -984,6 +1007,19 @@ config GPIO_UCB1400
          This enables support for the Philips UCB1400 GPIO pins.
          The UCB1400 is an AC97 audio codec.
 
+config GPIO_WHISKEY_COVE
+       tristate "GPIO support for Whiskey Cove PMIC"
+       depends on INTEL_SOC_PMIC
+       select GPIOLIB_IRQCHIP
+       help
+         Support for GPIO pins on Whiskey Cove PMIC.
+
+         Say Yes if you have a Intel SoC based tablet with Whiskey Cove PMIC
+         inside.
+
+         This driver can also be built as a module. If so, the module will be
+         called gpio-wcove.
+
 config GPIO_WM831X
        tristate "WM831x GPIOs"
        depends on MFD_WM831X
index 2a035ed8f168196ec7596d303685b439b2c2938e..b3e039c3ae8d6146d64d9bf30b45827f5f7c915e 100644 (file)
@@ -28,6 +28,8 @@ obj-$(CONFIG_GPIO_AMD8111)    += gpio-amd8111.o
 obj-$(CONFIG_GPIO_AMDPT)       += gpio-amdpt.o
 obj-$(CONFIG_GPIO_ARIZONA)     += gpio-arizona.o
 obj-$(CONFIG_GPIO_ATH79)       += gpio-ath79.o
+obj-$(CONFIG_GPIO_ASPEED)      += gpio-aspeed.o
+obj-$(CONFIG_GPIO_AXP209)      += gpio-axp209.o
 obj-$(CONFIG_GPIO_BCM_KONA)    += gpio-bcm-kona.o
 obj-$(CONFIG_GPIO_BRCMSTB)     += gpio-brcmstb.o
 obj-$(CONFIG_GPIO_BT8XX)       += gpio-bt8xx.o
@@ -44,6 +46,7 @@ obj-$(CONFIG_GPIO_EP93XX)     += gpio-ep93xx.o
 obj-$(CONFIG_GPIO_ETRAXFS)     += gpio-etraxfs.o
 obj-$(CONFIG_GPIO_F7188X)      += gpio-f7188x.o
 obj-$(CONFIG_GPIO_GE_FPGA)     += gpio-ge.o
+obj-$(CONFIG_GPIO_GPIO_MM)     += gpio-gpio-mm.o
 obj-$(CONFIG_GPIO_GRGPIO)      += gpio-grgpio.o
 obj-$(CONFIG_GPIO_ICH)         += gpio-ich.o
 obj-$(CONFIG_GPIO_IOP)         += gpio-iop.o
@@ -56,6 +59,7 @@ obj-$(CONFIG_GPIO_LOONGSON)   += gpio-loongson.o
 obj-$(CONFIG_GPIO_LP3943)      += gpio-lp3943.o
 obj-$(CONFIG_GPIO_LPC18XX)     += gpio-lpc18xx.o
 obj-$(CONFIG_ARCH_LPC32XX)     += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LP873X)      += gpio-lp873x.o
 obj-$(CONFIG_GPIO_LYNXPOINT)   += gpio-lynxpoint.o
 obj-$(CONFIG_GPIO_MAX730X)     += gpio-max730x.o
 obj-$(CONFIG_GPIO_MAX7300)     += gpio-max7300.o
@@ -110,6 +114,7 @@ obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
 obj-$(CONFIG_GPIO_TPS65910)    += gpio-tps65910.o
 obj-$(CONFIG_GPIO_TPS65912)    += gpio-tps65912.o
 obj-$(CONFIG_GPIO_TS4800)      += gpio-ts4800.o
+obj-$(CONFIG_GPIO_TS4900)      += gpio-ts4900.o
 obj-$(CONFIG_GPIO_TS5500)      += gpio-ts5500.o
 obj-$(CONFIG_GPIO_TWL4030)     += gpio-twl4030.o
 obj-$(CONFIG_GPIO_TWL6040)     += gpio-twl6040.o
@@ -120,6 +125,7 @@ obj-$(CONFIG_GPIO_VF610)    += gpio-vf610.o
 obj-$(CONFIG_GPIO_VIPERBOARD)  += gpio-viperboard.o
 obj-$(CONFIG_GPIO_VR41XX)      += gpio-vr41xx.o
 obj-$(CONFIG_GPIO_VX855)       += gpio-vx855.o
+obj-$(CONFIG_GPIO_WHISKEY_COVE)        += gpio-wcove.o
 obj-$(CONFIG_GPIO_WM831X)      += gpio-wm831x.o
 obj-$(CONFIG_GPIO_WM8350)      += gpio-wm8350.o
 obj-$(CONFIG_GPIO_WM8994)      += gpio-wm8994.o
index 3f87a03abc222bc31cbc68eef98993c9e3303ad4..5bddbd507ca9f105aa18cfe5f43b673b676d551d 100644 (file)
@@ -17,6 +17,7 @@
  */
 
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 
index 991370494922b1f7ac3489dc0616793be319378a..482462889c8f18d05e557f2aa0616e587af6ba27 100644 (file)
@@ -79,7 +79,7 @@ static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
                           ARIZONA_GPN_LVL, value);
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "arizona",
        .owner                  = THIS_MODULE,
        .direction_input        = arizona_gpio_direction_in,
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
new file mode 100644 (file)
index 0000000..8aa3406
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * Joel Stanley <joel@jms.id.au>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/driver.h>
+#include <linux/pinctrl/consumer.h>
+
+struct aspeed_gpio {
+       struct gpio_chip chip;
+       spinlock_t lock;
+       void __iomem *base;
+       int irq;
+};
+
+struct aspeed_gpio_bank {
+       uint16_t        val_regs;
+       uint16_t        irq_regs;
+       const char      names[4];
+};
+
+static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
+       {
+               .val_regs = 0x0000,
+               .irq_regs = 0x0008,
+               .names = { 'A', 'B', 'C', 'D' },
+       },
+       {
+               .val_regs = 0x0020,
+               .irq_regs = 0x0028,
+               .names = { 'E', 'F', 'G', 'H' },
+       },
+       {
+               .val_regs = 0x0070,
+               .irq_regs = 0x0098,
+               .names = { 'I', 'J', 'K', 'L' },
+       },
+       {
+               .val_regs = 0x0078,
+               .irq_regs = 0x00e8,
+               .names = { 'M', 'N', 'O', 'P' },
+       },
+       {
+               .val_regs = 0x0080,
+               .irq_regs = 0x0118,
+               .names = { 'Q', 'R', 'S', 'T' },
+       },
+       {
+               .val_regs = 0x0088,
+               .irq_regs = 0x0148,
+               .names = { 'U', 'V', 'W', 'X' },
+       },
+       /*
+        * A bank exists for { 'Y', 'Z', "AA", "AB" }, but is not implemented.
+        * Only half of GPIOs Y support interrupt configuration, and none of Z,
+        * AA or AB do as they are output only.
+        */
+};
+
+#define GPIO_BANK(x)   ((x) >> 5)
+#define GPIO_OFFSET(x) ((x) & 0x1f)
+#define GPIO_BIT(x)    BIT(GPIO_OFFSET(x))
+
+#define GPIO_DATA      0x00
+#define GPIO_DIR       0x04
+
+#define GPIO_IRQ_ENABLE        0x00
+#define GPIO_IRQ_TYPE0 0x04
+#define GPIO_IRQ_TYPE1 0x08
+#define GPIO_IRQ_TYPE2 0x0c
+#define GPIO_IRQ_STATUS        0x10
+
+static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
+{
+       unsigned int bank = GPIO_BANK(offset);
+
+       WARN_ON(bank > ARRAY_SIZE(aspeed_gpio_banks));
+       return &aspeed_gpio_banks[bank];
+}
+
+static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
+               const struct aspeed_gpio_bank *bank,
+               unsigned int reg)
+{
+       return gpio->base + bank->val_regs + reg;
+}
+
+static void __iomem *bank_irq_reg(struct aspeed_gpio *gpio,
+               const struct aspeed_gpio_bank *bank,
+               unsigned int reg)
+{
+       return gpio->base + bank->irq_regs + reg;
+}
+
+static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_gpio_bank *bank = to_bank(offset);
+
+       return !!(ioread32(bank_val_reg(gpio, bank, GPIO_DATA))
+                       & GPIO_BIT(offset));
+}
+
+static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
+                             int val)
+{
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_gpio_bank *bank = to_bank(offset);
+       void __iomem *addr;
+       u32 reg;
+
+       addr = bank_val_reg(gpio, bank, GPIO_DATA);
+       reg = ioread32(addr);
+
+       if (val)
+               reg |= GPIO_BIT(offset);
+       else
+               reg &= ~GPIO_BIT(offset);
+
+       iowrite32(reg, addr);
+}
+
+static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
+                           int val)
+{
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       __aspeed_gpio_set(gc, offset, val);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
+{
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_gpio_bank *bank = to_bank(offset);
+       unsigned long flags;
+       u32 reg;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
+       iowrite32(reg & ~GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return 0;
+}
+
+static int aspeed_gpio_dir_out(struct gpio_chip *gc,
+                              unsigned int offset, int val)
+{
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_gpio_bank *bank = to_bank(offset);
+       unsigned long flags;
+       u32 reg;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
+       iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR));
+
+       __aspeed_gpio_set(gc, offset, val);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return 0;
+}
+
+static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+       struct aspeed_gpio *gpio = gpiochip_get_data(gc);
+       const struct aspeed_gpio_bank *bank = to_bank(offset);
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       return !val;
+
+}
+
+static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
+               struct aspeed_gpio **gpio,
+               const struct aspeed_gpio_bank **bank,
+               u32 *bit)
+{
+       int offset;
+
+       offset = irqd_to_hwirq(d);
+
+       *gpio = irq_data_get_irq_chip_data(d);
+       *bank = to_bank(offset);
+       *bit = GPIO_BIT(offset);
+
+       return 0;
+}
+
+static void aspeed_gpio_irq_ack(struct irq_data *d)
+{
+       const struct aspeed_gpio_bank *bank;
+       struct aspeed_gpio *gpio;
+       unsigned long flags;
+       void __iomem *status_addr;
+       u32 bit;
+       int rc;
+
+       rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+       if (rc)
+               return;
+
+       status_addr = bank_irq_reg(gpio, bank, GPIO_IRQ_STATUS);
+
+       spin_lock_irqsave(&gpio->lock, flags);
+       iowrite32(bit, status_addr);
+       spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
+{
+       const struct aspeed_gpio_bank *bank;
+       struct aspeed_gpio *gpio;
+       unsigned long flags;
+       u32 reg, bit;
+       void __iomem *addr;
+       int rc;
+
+       rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+       if (rc)
+               return;
+
+       addr = bank_irq_reg(gpio, bank, GPIO_IRQ_ENABLE);
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       reg = ioread32(addr);
+       if (set)
+               reg |= bit;
+       else
+               reg &= bit;
+       iowrite32(reg, addr);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_gpio_irq_mask(struct irq_data *d)
+{
+       aspeed_gpio_irq_set_mask(d, false);
+}
+
+static void aspeed_gpio_irq_unmask(struct irq_data *d)
+{
+       aspeed_gpio_irq_set_mask(d, true);
+}
+
+static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
+{
+       u32 type0 = 0;
+       u32 type1 = 0;
+       u32 type2 = 0;
+       u32 bit, reg;
+       const struct aspeed_gpio_bank *bank;
+       irq_flow_handler_t handler;
+       struct aspeed_gpio *gpio;
+       unsigned long flags;
+       void __iomem *addr;
+       int rc;
+
+       rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit);
+       if (rc)
+               return -EINVAL;
+
+       switch (type & IRQ_TYPE_SENSE_MASK) {
+       case IRQ_TYPE_EDGE_BOTH:
+               type2 |= bit;
+       case IRQ_TYPE_EDGE_RISING:
+               type0 |= bit;
+       case IRQ_TYPE_EDGE_FALLING:
+               handler = handle_edge_irq;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               type0 |= bit;
+       case IRQ_TYPE_LEVEL_LOW:
+               type1 |= bit;
+               handler = handle_level_irq;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&gpio->lock, flags);
+
+       addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE0);
+       reg = ioread32(addr);
+       reg = (reg & ~bit) | type0;
+       iowrite32(reg, addr);
+
+       addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE1);
+       reg = ioread32(addr);
+       reg = (reg & ~bit) | type1;
+       iowrite32(reg, addr);
+
+       addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE2);
+       reg = ioread32(addr);
+       reg = (reg & ~bit) | type2;
+       iowrite32(reg, addr);
+
+       spin_unlock_irqrestore(&gpio->lock, flags);
+
+       irq_set_handler_locked(d, handler);
+
+       return 0;
+}
+
+static void aspeed_gpio_irq_handler(struct irq_desc *desc)
+{
+       struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+       struct irq_chip *ic = irq_desc_get_chip(desc);
+       struct aspeed_gpio *data = gpiochip_get_data(gc);
+       unsigned int i, p, girq;
+       unsigned long reg;
+
+       chained_irq_enter(ic, desc);
+
+       for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
+               const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
+
+               reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
+
+               for_each_set_bit(p, &reg, 32) {
+                       girq = irq_find_mapping(gc->irqdomain, i * 32 + p);
+                       generic_handle_irq(girq);
+               }
+
+       }
+
+       chained_irq_exit(ic, desc);
+}
+
+static struct irq_chip aspeed_gpio_irqchip = {
+       .name           = "aspeed-gpio",
+       .irq_ack        = aspeed_gpio_irq_ack,
+       .irq_mask       = aspeed_gpio_irq_mask,
+       .irq_unmask     = aspeed_gpio_irq_unmask,
+       .irq_set_type   = aspeed_gpio_set_type,
+};
+
+static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
+               struct platform_device *pdev)
+{
+       int rc;
+
+       rc = platform_get_irq(pdev, 0);
+       if (rc < 0)
+               return rc;
+
+       gpio->irq = rc;
+
+       rc = gpiochip_irqchip_add(&gpio->chip, &aspeed_gpio_irqchip,
+                       0, handle_bad_irq, IRQ_TYPE_NONE);
+       if (rc) {
+               dev_info(&pdev->dev, "Could not add irqchip\n");
+               return rc;
+       }
+
+       gpiochip_set_chained_irqchip(&gpio->chip, &aspeed_gpio_irqchip,
+                                    gpio->irq, aspeed_gpio_irq_handler);
+
+       return 0;
+}
+
+static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+       return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+       pinctrl_free_gpio(chip->base + offset);
+}
+
+static int __init aspeed_gpio_probe(struct platform_device *pdev)
+{
+       struct aspeed_gpio *gpio;
+       struct resource *res;
+       int rc;
+
+       gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+       if (!gpio)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENXIO;
+
+       gpio->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(gpio->base))
+               return PTR_ERR(gpio->base);
+
+       spin_lock_init(&gpio->lock);
+
+       gpio->chip.ngpio = ARRAY_SIZE(aspeed_gpio_banks) * 32;
+
+       gpio->chip.parent = &pdev->dev;
+       gpio->chip.direction_input = aspeed_gpio_dir_in;
+       gpio->chip.direction_output = aspeed_gpio_dir_out;
+       gpio->chip.get_direction = aspeed_gpio_get_direction;
+       gpio->chip.request = aspeed_gpio_request;
+       gpio->chip.free = aspeed_gpio_free;
+       gpio->chip.get = aspeed_gpio_get;
+       gpio->chip.set = aspeed_gpio_set;
+       gpio->chip.label = dev_name(&pdev->dev);
+       gpio->chip.base = -1;
+
+       rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+       if (rc < 0)
+               return rc;
+
+       return aspeed_gpio_setup_irqs(gpio, pdev);
+}
+
+static const struct of_device_id aspeed_gpio_of_table[] = {
+       { .compatible = "aspeed,ast2400-gpio" },
+       { .compatible = "aspeed,ast2500-gpio" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
+
+static struct platform_driver aspeed_gpio_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = aspeed_gpio_of_table,
+       },
+};
+
+module_platform_driver_probe(aspeed_gpio_driver, aspeed_gpio_probe);
+
+MODULE_DESCRIPTION("Aspeed GPIO Driver");
+MODULE_LICENSE("GPL");
index c4f4cddc7c1a42e52f2f7f80926878e084ded70e..9457e2022bf6c1e0c4f8cc4cd11acf224ff68e54 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_data/gpio-ath79.h>
 #include <linux/of_device.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/irq.h>
 
 #define AR71XX_GPIO_REG_OE             0x00
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
new file mode 100644 (file)
index 0000000..3174799
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * AXP20x GPIO driver
+ *
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_GPIO_FUNCTIONS          0x7
+#define AXP20X_GPIO_FUNCTION_OUT_LOW   0
+#define AXP20X_GPIO_FUNCTION_OUT_HIGH  1
+#define AXP20X_GPIO_FUNCTION_INPUT     2
+
+struct axp20x_gpio {
+       struct gpio_chip        chip;
+       struct regmap           *regmap;
+};
+
+static int axp20x_gpio_get_reg(unsigned offset)
+{
+       switch (offset) {
+       case 0:
+               return AXP20X_GPIO0_CTRL;
+       case 1:
+               return AXP20X_GPIO1_CTRL;
+       case 2:
+               return AXP20X_GPIO2_CTRL;
+       }
+
+       return -EINVAL;
+}
+
+static int axp20x_gpio_input(struct gpio_chip *chip, unsigned offset)
+{
+       struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+       int reg;
+
+       reg = axp20x_gpio_get_reg(offset);
+       if (reg < 0)
+               return reg;
+
+       return regmap_update_bits(gpio->regmap, reg,
+                                 AXP20X_GPIO_FUNCTIONS,
+                                 AXP20X_GPIO_FUNCTION_INPUT);
+}
+
+static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+       unsigned int val;
+       int reg, ret;
+
+       reg = axp20x_gpio_get_reg(offset);
+       if (reg < 0)
+               return reg;
+
+       ret = regmap_read(gpio->regmap, reg, &val);
+       if (ret)
+               return ret;
+
+       return !!(val & BIT(offset + 4));
+}
+
+static int axp20x_gpio_output(struct gpio_chip *chip, unsigned offset,
+                             int value)
+{
+       struct axp20x_gpio *gpio = gpiochip_get_data(chip);
+       int reg;
+
+       reg = axp20x_gpio_get_reg(offset);
+       if (reg < 0)
+               return reg;
+
+       return regmap_update_bits(gpio->regmap, reg,
+                                 AXP20X_GPIO_FUNCTIONS,
+                                 value ? AXP20X_GPIO_FUNCTION_OUT_HIGH
+                                 : AXP20X_GPIO_FUNCTION_OUT_LOW);
+}
+
+static void axp20x_gpio_set(struct gpio_chip *chip, unsigned offset,
+                           int value)
+{
+       axp20x_gpio_output(chip, offset, value);
+}
+
+static int axp20x_gpio_probe(struct platform_device *pdev)
+{
+       struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+       struct axp20x_gpio *gpio;
+       int ret;
+
+       if (!of_device_is_available(pdev->dev.of_node))
+               return -ENODEV;
+
+       if (!axp20x) {
+               dev_err(&pdev->dev, "Parent drvdata not set\n");
+               return -EINVAL;
+       }
+
+       gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+       if (!gpio)
+               return -ENOMEM;
+
+       gpio->chip.base                 = -1;
+       gpio->chip.can_sleep            = true;
+       gpio->chip.parent               = &pdev->dev;
+       gpio->chip.label                = dev_name(&pdev->dev);
+       gpio->chip.owner                = THIS_MODULE;
+       gpio->chip.get                  = axp20x_gpio_get;
+       gpio->chip.set                  = axp20x_gpio_set;
+       gpio->chip.direction_input      = axp20x_gpio_input;
+       gpio->chip.direction_output     = axp20x_gpio_output;
+       gpio->chip.ngpio                = 3;
+
+       gpio->regmap = axp20x->regmap;
+
+       ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register GPIO chip\n");
+               return ret;
+       }
+
+       dev_info(&pdev->dev, "AXP209 GPIO driver loaded\n");
+
+       return 0;
+}
+
+static const struct of_device_id axp20x_gpio_match[] = {
+       { .compatible = "x-powers,axp209-gpio" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_gpio_match);
+
+static struct platform_driver axp20x_gpio_driver = {
+       .probe          = axp20x_gpio_probe,
+       .driver = {
+               .name           = "axp20x-gpio",
+               .of_match_table = axp20x_gpio_match,
+       },
+};
+
+module_platform_driver(axp20x_gpio_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20x PMIC GPIO driver");
+MODULE_LICENSE("GPL");
index 953e4b829e32b8aa02f0cdb0774f8fe8244ef565..3d1cf018e8e7e27c8d7baf06a9755c7ae6a5e429 100644 (file)
@@ -308,7 +308,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
        return 0;
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label = "bcm-kona-gpio",
        .owner = THIS_MODULE,
        .request = bcm_kona_gpio_request,
index e29553b7ccdb8aaad14d574e5b6fcc433e094bb5..dd8977cf3e854fae4259d01e89a1bbf62e0b3ff2 100644 (file)
@@ -184,7 +184,7 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
        return irq;
 }
 
-static struct gpio_chip reference_gp = {
+static const struct gpio_chip reference_gp = {
        .label = "da9052-gpio",
        .owner = THIS_MODULE,
        .get = da9052_gpio_get,
index 2c2c18dc6c4f394d4bce5ac675d9ca45a37e9a9d..82053b52cba03d7cb998ca4ed38f3b243b255c7a 100644 (file)
@@ -121,7 +121,7 @@ static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
                                  DA9055_IRQ_GPI0 + offset);
 }
 
-static struct gpio_chip reference_gp = {
+static const struct gpio_chip reference_gp = {
        .label = "da9055-gpio",
        .owner = THIS_MODULE,
        .get = da9055_gpio_get,
index 600be84187073a211ad1db5fef6381670ad87be2..e8accde62aa71421809783c4615c6cf58354243f 100644 (file)
@@ -214,8 +214,7 @@ static struct f7188x_gpio_bank f81866_gpio_bank[] = {
 static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
 {
        int err;
-       struct f7188x_gpio_bank *bank =
-               container_of(chip, struct f7188x_gpio_bank, chip);
+       struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
        struct f7188x_sio *sio = bank->data->sio;
        u8 dir;
 
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
new file mode 100644 (file)
index 0000000..1e7def9
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * GPIO driver for the Diamond Systems GPIO-MM
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following Diamond Systems devices: GPIO-MM and
+ * GPIO-MM-12.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+
+#define GPIOMM_EXTENT 8
+#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
+
+static unsigned int base[MAX_NUM_GPIOMM];
+static unsigned int num_gpiomm;
+module_param_array(base, uint, &num_gpiomm, 0);
+MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
+
+/**
+ * struct gpiomm_gpio - GPIO device private data structure
+ * @chip:      instance of the gpio_chip
+ * @io_state:  bit I/O state (whether bit is set to input or output)
+ * @out_state: output bits state
+ * @control:   Control registers state
+ * @lock:      synchronization lock to prevent I/O race conditions
+ * @base:      base port address of the GPIO device
+ */
+struct gpiomm_gpio {
+       struct gpio_chip chip;
+       unsigned char io_state[6];
+       unsigned char out_state[6];
+       unsigned char control[2];
+       spinlock_t lock;
+       unsigned int base;
+};
+
+static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
+       unsigned int offset)
+{
+       struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+       const unsigned int port = offset / 8;
+       const unsigned int mask = BIT(offset % 8);
+
+       return !!(gpiommgpio->io_state[port] & mask);
+}
+
+static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
+       unsigned int offset)
+{
+       struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+       const unsigned int io_port = offset / 8;
+       const unsigned int control_port = io_port / 3;
+       const unsigned int control_addr = gpiommgpio->base + 3 + control_port*4;
+       unsigned long flags;
+       unsigned int control;
+
+       spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+       /* Check if configuring Port C */
+       if (io_port == 2 || io_port == 5) {
+               /* Port C can be configured by nibble */
+               if (offset % 8 > 3) {
+                       gpiommgpio->io_state[io_port] |= 0xF0;
+                       gpiommgpio->control[control_port] |= BIT(3);
+               } else {
+                       gpiommgpio->io_state[io_port] |= 0x0F;
+                       gpiommgpio->control[control_port] |= BIT(0);
+               }
+       } else {
+               gpiommgpio->io_state[io_port] |= 0xFF;
+               if (io_port == 0 || io_port == 3)
+                       gpiommgpio->control[control_port] |= BIT(4);
+               else
+                       gpiommgpio->control[control_port] |= BIT(1);
+       }
+
+       control = BIT(7) | gpiommgpio->control[control_port];
+       outb(control, control_addr);
+
+       spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+
+       return 0;
+}
+
+static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
+       unsigned int offset, int value)
+{
+       struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+       const unsigned int io_port = offset / 8;
+       const unsigned int control_port = io_port / 3;
+       const unsigned int mask = BIT(offset % 8);
+       const unsigned int control_addr = gpiommgpio->base + 3 + control_port*4;
+       const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
+       unsigned long flags;
+       unsigned int control;
+
+       spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+       /* Check if configuring Port C */
+       if (io_port == 2 || io_port == 5) {
+               /* Port C can be configured by nibble */
+               if (offset % 8 > 3) {
+                       gpiommgpio->io_state[io_port] &= 0x0F;
+                       gpiommgpio->control[control_port] &= ~BIT(3);
+               } else {
+                       gpiommgpio->io_state[io_port] &= 0xF0;
+                       gpiommgpio->control[control_port] &= ~BIT(0);
+               }
+       } else {
+               gpiommgpio->io_state[io_port] &= 0x00;
+               if (io_port == 0 || io_port == 3)
+                       gpiommgpio->control[control_port] &= ~BIT(4);
+               else
+                       gpiommgpio->control[control_port] &= ~BIT(1);
+       }
+
+       if (value)
+               gpiommgpio->out_state[io_port] |= mask;
+       else
+               gpiommgpio->out_state[io_port] &= ~mask;
+
+       control = BIT(7) | gpiommgpio->control[control_port];
+       outb(control, control_addr);
+
+       outb(gpiommgpio->out_state[io_port], gpiommgpio->base + out_port);
+
+       spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+
+       return 0;
+}
+
+static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+       struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+       const unsigned int port = offset / 8;
+       const unsigned int mask = BIT(offset % 8);
+       const unsigned int in_port = (port > 2) ? port + 1 : port;
+       unsigned long flags;
+       unsigned int port_state;
+
+       spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+       /* ensure that GPIO is set for input */
+       if (!(gpiommgpio->io_state[port] & mask)) {
+               spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+               return -EINVAL;
+       }
+
+       port_state = inb(gpiommgpio->base + in_port);
+
+       spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+
+       return !!(port_state & mask);
+}
+
+static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
+       int value)
+{
+       struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
+       const unsigned int port = offset / 8;
+       const unsigned int mask = BIT(offset % 8);
+       const unsigned int out_port = (port > 2) ? port + 1 : port;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpiommgpio->lock, flags);
+
+       if (value)
+               gpiommgpio->out_state[port] |= mask;
+       else
+               gpiommgpio->out_state[port] &= ~mask;
+
+       outb(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
+
+       spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+}
+
+static int gpiomm_probe(struct device *dev, unsigned int id)
+{
+       struct gpiomm_gpio *gpiommgpio;
+       const char *const name = dev_name(dev);
+       int err;
+
+       gpiommgpio = devm_kzalloc(dev, sizeof(*gpiommgpio), GFP_KERNEL);
+       if (!gpiommgpio)
+               return -ENOMEM;
+
+       if (!devm_request_region(dev, base[id], GPIOMM_EXTENT, name)) {
+               dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+                       base[id], base[id] + GPIOMM_EXTENT);
+               return -EBUSY;
+       }
+
+       gpiommgpio->chip.label = name;
+       gpiommgpio->chip.parent = dev;
+       gpiommgpio->chip.owner = THIS_MODULE;
+       gpiommgpio->chip.base = -1;
+       gpiommgpio->chip.ngpio = 48;
+       gpiommgpio->chip.get_direction = gpiomm_gpio_get_direction;
+       gpiommgpio->chip.direction_input = gpiomm_gpio_direction_input;
+       gpiommgpio->chip.direction_output = gpiomm_gpio_direction_output;
+       gpiommgpio->chip.get = gpiomm_gpio_get;
+       gpiommgpio->chip.set = gpiomm_gpio_set;
+       gpiommgpio->base = base[id];
+
+       spin_lock_init(&gpiommgpio->lock);
+
+       dev_set_drvdata(dev, gpiommgpio);
+
+       err = gpiochip_add_data(&gpiommgpio->chip, gpiommgpio);
+       if (err) {
+               dev_err(dev, "GPIO registering failed (%d)\n", err);
+               return err;
+       }
+
+       /* initialize all GPIO as output */
+       outb(0x80, base[id] + 3);
+       outb(0x00, base[id]);
+       outb(0x00, base[id] + 1);
+       outb(0x00, base[id] + 2);
+       outb(0x80, base[id] + 7);
+       outb(0x00, base[id] + 4);
+       outb(0x00, base[id] + 5);
+       outb(0x00, base[id] + 6);
+
+       return 0;
+}
+
+static int gpiomm_remove(struct device *dev, unsigned int id)
+{
+       struct gpiomm_gpio *const gpiommgpio = dev_get_drvdata(dev);
+
+       gpiochip_remove(&gpiommgpio->chip);
+
+       return 0;
+}
+
+static struct isa_driver gpiomm_driver = {
+       .probe = gpiomm_probe,
+       .driver = {
+               .name = "gpio-mm"
+       },
+       .remove = gpiomm_remove
+};
+
+module_isa_driver(gpiomm_driver, num_gpiomm);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Diamond Systems GPIO-MM GPIO driver");
+MODULE_LICENSE("GPL v2");
index 860c535922fd184d929e1fda6a9fff2c9452622c..98c7ff2a76e76a24194f2c3278af42f5d5c4ff9c 100644 (file)
  * your option) any later version.
  */
 
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
-#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
 #include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
 
-#define IOP3XX_N_GPIOS 8
-
-#define GPIO_IN                        0
-#define GPIO_OUT               1
-#define GPIO_LOW               0
-#define GPIO_HIGH              1
-
-/* Memory base offset */
-static void __iomem *base;
-
-#define IOP3XX_GPIO_REG(reg)   (base + (reg))
-#define IOP3XX_GPOE            IOP3XX_GPIO_REG(0x0000)
-#define IOP3XX_GPID            IOP3XX_GPIO_REG(0x0004)
-#define IOP3XX_GPOD            IOP3XX_GPIO_REG(0x0008)
-
-static void gpio_line_config(int line, int direction)
-{
-       unsigned long flags;
-       u32 val;
-
-       local_irq_save(flags);
-       val = readl(IOP3XX_GPOE);
-       if (direction == GPIO_IN) {
-               val |= BIT(line);
-       } else if (direction == GPIO_OUT) {
-               val &= ~BIT(line);
-       }
-       writel(val, IOP3XX_GPOE);
-       local_irq_restore(flags);
-}
-
-static int gpio_line_get(int line)
-{
-       return !!(readl(IOP3XX_GPID) & BIT(line));
-}
-
-static void gpio_line_set(int line, int value)
-{
-       unsigned long flags;
-       u32 val;
-
-       local_irq_save(flags);
-       val = readl(IOP3XX_GPOD);
-       if (value == GPIO_LOW) {
-               val &= ~BIT(line);
-       } else if (value == GPIO_HIGH) {
-               val |= BIT(line);
-       }
-       writel(val, IOP3XX_GPOD);
-       local_irq_restore(flags);
-}
-
-static int iop3xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
-       gpio_line_config(gpio, GPIO_IN);
-       return 0;
-}
-
-static int iop3xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
-{
-       gpio_line_set(gpio, level);
-       gpio_line_config(gpio, GPIO_OUT);
-       return 0;
-}
-
-static int iop3xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
-       return gpio_line_get(gpio);
-}
-
-static void iop3xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
-{
-       gpio_line_set(gpio, value);
-}
-
-static struct gpio_chip iop3xx_chip = {
-       .label                  = "iop3xx",
-       .direction_input        = iop3xx_gpio_direction_input,
-       .get                    = iop3xx_gpio_get_value,
-       .direction_output       = iop3xx_gpio_direction_output,
-       .set                    = iop3xx_gpio_set_value,
-       .base                   = 0,
-       .ngpio                  = IOP3XX_N_GPIOS,
-};
+#define IOP3XX_GPOE    0x0000
+#define IOP3XX_GPID    0x0004
+#define IOP3XX_GPOD    0x0008
 
 static int iop3xx_gpio_probe(struct platform_device *pdev)
 {
        struct resource *res;
+       struct gpio_chip *gc;
+       void __iomem *base;
+       int err;
+
+       gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
+       if (!gc)
+               return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
-       return devm_gpiochip_add_data(&pdev->dev, &iop3xx_chip, NULL);
+       err = bgpio_init(gc, &pdev->dev, 1, base + IOP3XX_GPID,
+                        base + IOP3XX_GPOD, NULL, NULL, base + IOP3XX_GPOE, 0);
+       if (err)
+               return err;
+
+       gc->base = 0;
+       gc->owner = THIS_MODULE;
+
+       return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
 }
 
 static struct platform_driver iop3xx_gpio_driver = {
index 63a962d18cd6d732af48be3849ca2d658d9bea9b..45d29e488dbb9c7371f207c20b248d06a805db89 100644 (file)
@@ -273,7 +273,7 @@ exit:
        return rc;
 }
 
-static struct gpio_chip it87_template_chip = {
+static const struct gpio_chip it87_template_chip = {
        .label                  = KBUILD_MODNAME,
        .owner                  = THIS_MODULE,
        .request                = it87_gpio_request,
index 10c09bdd851430c9ef7de0b1114711cbd40e6c39..ad0a5958fcd06c3abb3b6671839a54c9d7857af3 100644 (file)
@@ -8,6 +8,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/module.h>
 #include <linux/gpio/driver.h>
 #include <linux/platform_device.h>
 
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
new file mode 100644 (file)
index 0000000..218c706
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *     Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65218 driver
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+#define BITS_PER_GPO           0x4
+#define LP873X_GPO_CTRL_OD     0x2
+
+struct lp873x_gpio {
+       struct gpio_chip chip;
+       struct lp873x *lp873;
+};
+
+static int lp873x_gpio_get_direction(struct gpio_chip *chip,
+                                    unsigned int offset)
+{
+       /* This device is output only */
+       return 0;
+}
+
+static int lp873x_gpio_direction_input(struct gpio_chip *chip,
+                                      unsigned int offset)
+{
+       /* This device is output only */
+       return -EINVAL;
+}
+
+static int lp873x_gpio_direction_output(struct gpio_chip *chip,
+                                       unsigned int offset, int value)
+{
+       struct lp873x_gpio *gpio = gpiochip_get_data(chip);
+
+       /* Set the initial value */
+       return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+                                 BIT(offset * BITS_PER_GPO),
+                                 value ? BIT(offset * BITS_PER_GPO) : 0);
+}
+
+static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+       struct lp873x_gpio *gpio = gpiochip_get_data(chip);
+       int ret, val;
+
+       ret = regmap_read(gpio->lp873->regmap, LP873X_REG_GPO_CTRL, &val);
+       if (ret < 0)
+               return ret;
+
+       return val & BIT(offset * BITS_PER_GPO);
+}
+
+static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+                           int value)
+{
+       struct lp873x_gpio *gpio = gpiochip_get_data(chip);
+
+       regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+                          BIT(offset * BITS_PER_GPO),
+                          value ? BIT(offset * BITS_PER_GPO) : 0);
+}
+
+static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+       struct lp873x_gpio *gpio = gpiochip_get_data(gc);
+       int ret;
+
+       switch (offset) {
+       case 0:
+               /* No MUX Set up Needed for GPO */
+               break;
+       case 1:
+               /* Setup the CLKIN_PIN_SEL MUX to GPO2 */
+               ret = regmap_update_bits(gpio->lp873->regmap, LP873X_REG_CONFIG,
+                                        LP873X_CONFIG_CLKIN_PIN_SEL, 0);
+               if (ret)
+                       return ret;
+
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int lp873x_gpio_set_single_ended(struct gpio_chip *gc,
+                                       unsigned int offset,
+                                       enum single_ended_mode mode)
+{
+       struct lp873x_gpio *gpio = gpiochip_get_data(gc);
+
+       switch (mode) {
+       case LINE_MODE_OPEN_DRAIN:
+               return regmap_update_bits(gpio->lp873->regmap,
+                                         LP873X_REG_GPO_CTRL,
+                                         BIT(offset * BITS_PER_GPO +
+                                         LP873X_GPO_CTRL_OD),
+                                         BIT(offset * BITS_PER_GPO +
+                                         LP873X_GPO_CTRL_OD));
+       case LINE_MODE_PUSH_PULL:
+               return regmap_update_bits(gpio->lp873->regmap,
+                                         LP873X_REG_GPO_CTRL,
+                                         BIT(offset * BITS_PER_GPO +
+                                         LP873X_GPO_CTRL_OD), 0);
+       default:
+               return -ENOTSUPP;
+       }
+}
+
+static const struct gpio_chip template_chip = {
+       .label                  = "lp873x-gpio",
+       .owner                  = THIS_MODULE,
+       .request                = lp873x_gpio_request,
+       .get_direction          = lp873x_gpio_get_direction,
+       .direction_input        = lp873x_gpio_direction_input,
+       .direction_output       = lp873x_gpio_direction_output,
+       .get                    = lp873x_gpio_get,
+       .set                    = lp873x_gpio_set,
+       .set_single_ended       = lp873x_gpio_set_single_ended,
+       .base                   = -1,
+       .ngpio                  = 2,
+       .can_sleep              = true,
+};
+
+static int lp873x_gpio_probe(struct platform_device *pdev)
+{
+       struct lp873x_gpio *gpio;
+       int ret;
+
+       gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+       if (!gpio)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, gpio);
+
+       gpio->lp873 = dev_get_drvdata(pdev->dev.parent);
+       gpio->chip = template_chip;
+       gpio->chip.parent = gpio->lp873->dev;
+
+       ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id lp873x_gpio_id_table[] = {
+       { "lp873x-gpio", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp873x_gpio_id_table);
+
+static struct platform_driver lp873x_gpio_driver = {
+       .driver = {
+               .name = "lp873x-gpio",
+       },
+       .probe = lp873x_gpio_probe,
+       .id_table = lp873x_gpio_id_table,
+};
+module_platform_driver(lp873x_gpio_driver);
+
+MODULE_AUTHOR("Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X GPIO driver");
+MODULE_LICENSE("GPL v2");
index 98832c9f614a26e59e78aee6509a92506e1d3115..f12e02e1016d856250dc5f07521d50ef6b4516ec 100644 (file)
@@ -78,7 +78,7 @@ static int lpc18xx_gpio_direction_output(struct gpio_chip *chip,
        return lpc18xx_gpio_direction(chip, offset, true);
 }
 
-static struct gpio_chip lpc18xx_chip = {
+static const struct gpio_chip lpc18xx_chip = {
        .label                  = "lpc18xx/43xx-gpio",
        .request                = gpiochip_generic_request,
        .free                   = gpiochip_generic_free,
index fc5f197906ac97b09b14617ca56d52896b59453a..92b3ae2a67357f7714faa224a10f86a440bd4257 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
-#include <linux/platform_data/gpio-lpc32xx.h>
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
 #define GPI3_PIN_IN_SEL(x, y)                  (((x) >> (y)) & 1)
 #define GPO3_PIN_IN_SEL(x, y)                  (((x) >> (y)) & 1)
 
+#define LPC32XX_GPIO_P0_MAX    8
+#define LPC32XX_GPIO_P1_MAX    24
+#define LPC32XX_GPIO_P2_MAX    13
+#define LPC32XX_GPIO_P3_MAX    6
+#define LPC32XX_GPI_P3_MAX     29
+#define LPC32XX_GPO_P3_MAX     24
+
+#define LPC32XX_GPIO_P0_GRP    0
+#define LPC32XX_GPIO_P1_GRP    (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
+#define LPC32XX_GPIO_P2_GRP    (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
+#define LPC32XX_GPIO_P3_GRP    (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
+#define LPC32XX_GPI_P3_GRP     (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
+#define LPC32XX_GPO_P3_GRP     (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
+
 struct gpio_regs {
        void __iomem *inp_state;
        void __iomem *outp_state;
index 6ec144baeb11f1d124586b53ba353db7da7adab9..d7d03ad052d007bd1d7531a015e4688e2f603754 100644 (file)
@@ -573,6 +573,7 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
 
 #ifdef CONFIG_OF
 static const struct of_device_id bgpio_of_match[] = {
+       { .compatible = "brcm,bcm6345-gpio" },
        { .compatible = "wd,mbl-gpio" },
        { }
 };
@@ -593,6 +594,9 @@ static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev,
 
        pdata->base = -1;
 
+       if (of_device_is_big_endian(pdev->dev.of_node))
+               *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+
        if (of_property_read_bool(pdev->dev.of_node, "no-output"))
                *flags |= BGPIOF_NO_OUTPUT;
 
index d75649787e6c4eac55c582cbc7e992804f4eee21..1b7ce7f8588669f3dda9a1a8a05462d0afd7d868 100644 (file)
@@ -20,7 +20,6 @@
  *
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
@@ -328,9 +327,4 @@ static int __init platform_msic_gpio_init(void)
 {
        return platform_driver_register(&platform_msic_gpio_driver);
 }
-
 subsys_initcall(platform_msic_gpio_init);
-
-MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Medfield MSIC GPIO driver");
-MODULE_LICENSE("GPL v2");
index 1b342a3842c824158b424141636a7df368b3c787..e35af5249478b24ac02720abc3a14216c9c0109e 100644 (file)
@@ -2,7 +2,8 @@
  * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
  * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
  *
- * Based on code from Freescale,
+ * Based on code from Freescale Semiconductor,
+ * Authors: Daniel Mack, Juergen Beisert.
  * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -33,7 +34,6 @@
 #include <linux/gpio.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/module.h>
 #include <linux/bug.h>
 
 enum mxc_gpio_hwtype {
@@ -511,9 +511,3 @@ static int __init gpio_mxc_init(void)
        return platform_driver_register(&mxc_gpio_driver);
 }
 postcore_initcall(gpio_mxc_init);
-
-MODULE_AUTHOR("Freescale Semiconductor, "
-             "Daniel Mack <danielncaiaq.de>, "
-             "Juergen Beisert <kernel@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale MXC GPIO");
-MODULE_LICENSE("GPL");
index 8394744302299f404c25cf8154f7a0b34543ed4c..3d818195e351a7e6d16138bd38b1d81abd2ac0ba 100644 (file)
@@ -152,7 +152,6 @@ static const struct of_device_id of_palmas_gpio_match[] = {
        { .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,},
        { },
 };
-MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
 
 static int palmas_gpio_probe(struct platform_device *pdev)
 {
index 02f2a5621bb0fd132a3a7dd131c865b8d24b107a..5d059866d17acf4eb397f85adef7481fdde4cdcc 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/unaligned.h>
 #include <linux/of_platform.h>
 #include <linux/acpi.h>
+#include <linux/regulator/consumer.h>
 
 #define PCA953X_INPUT          0
 #define PCA953X_OUTPUT         1
@@ -94,6 +95,24 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
 
 #define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
 
+struct pca953x_reg_config {
+       int direction;
+       int output;
+       int input;
+};
+
+static const struct pca953x_reg_config pca953x_regs = {
+       .direction = PCA953X_DIRECTION,
+       .output = PCA953X_OUTPUT,
+       .input = PCA953X_INPUT,
+};
+
+static const struct pca953x_reg_config pca957x_regs = {
+       .direction = PCA957X_CFG,
+       .output = PCA957X_OUT,
+       .input = PCA957X_IN,
+};
+
 struct pca953x_chip {
        unsigned gpio_start;
        u8 reg_output[MAX_BANK];
@@ -111,8 +130,13 @@ struct pca953x_chip {
        struct i2c_client *client;
        struct gpio_chip gpio_chip;
        const char *const *names;
-       int     chip_type;
        unsigned long driver_data;
+       struct regulator *regulator;
+
+       const struct pca953x_reg_config *regs;
+
+       int (*write_regs)(struct pca953x_chip *, int, u8 *);
+       int (*read_regs)(struct pca953x_chip *, int, u8 *);
 };
 
 static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val,
@@ -152,38 +176,44 @@ static int pca953x_write_single(struct pca953x_chip *chip, int reg, u32 val,
        return 0;
 }
 
-static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+static int pca953x_write_regs_8(struct pca953x_chip *chip, int reg, u8 *val)
 {
-       int ret = 0;
+       return i2c_smbus_write_byte_data(chip->client, reg, *val);
+}
 
-       if (chip->gpio_chip.ngpio <= 8)
-               ret = i2c_smbus_write_byte_data(chip->client, reg, *val);
-       else if (chip->gpio_chip.ngpio >= 24) {
-               int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
-               ret = i2c_smbus_write_i2c_block_data(chip->client,
-                                       (reg << bank_shift) | REG_ADDR_AI,
-                                       NBANK(chip), val);
-       } else {
-               switch (chip->chip_type) {
-               case PCA953X_TYPE: {
-                       __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
+static int pca953x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
 
-                       ret = i2c_smbus_write_word_data(chip->client, reg << 1,
-                                                       (__force u16)word);
-                       break;
-               }
-               case PCA957X_TYPE:
-                       ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
-                                                       val[0]);
-                       if (ret < 0)
-                               break;
-                       ret = i2c_smbus_write_byte_data(chip->client,
-                                                       (reg << 1) + 1,
-                                                       val[1]);
-                       break;
-               }
-       }
+       return i2c_smbus_write_word_data(chip->client,
+                                        reg << 1, (__force u16)word);
+}
+
+static int pca957x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       int ret;
+
+       ret = i2c_smbus_write_byte_data(chip->client, reg << 1, val[0]);
+       if (ret < 0)
+               return ret;
+
+       return i2c_smbus_write_byte_data(chip->client, (reg << 1) + 1, val[1]);
+}
+
+static int pca953x_write_regs_24(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+
+       return i2c_smbus_write_i2c_block_data(chip->client,
+                                             (reg << bank_shift) | REG_ADDR_AI,
+                                             NBANK(chip), val);
+}
 
+static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       int ret = 0;
+
+       ret = chip->write_regs(chip, reg, val);
        if (ret < 0) {
                dev_err(&chip->client->dev, "failed writing register\n");
                return ret;
@@ -192,24 +222,41 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
        return 0;
 }
 
-static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
+static int pca953x_read_regs_8(struct pca953x_chip *chip, int reg, u8 *val)
 {
        int ret;
 
-       if (chip->gpio_chip.ngpio <= 8) {
-               ret = i2c_smbus_read_byte_data(chip->client, reg);
-               *val = ret;
-       } else if (chip->gpio_chip.ngpio >= 24) {
-               int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+       ret = i2c_smbus_read_byte_data(chip->client, reg);
+       *val = ret;
 
-               ret = i2c_smbus_read_i2c_block_data(chip->client,
-                                       (reg << bank_shift) | REG_ADDR_AI,
-                                       NBANK(chip), val);
-       } else {
-               ret = i2c_smbus_read_word_data(chip->client, reg << 1);
-               val[0] = (u16)ret & 0xFF;
-               val[1] = (u16)ret >> 8;
-       }
+       return ret;
+}
+
+static int pca953x_read_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       int ret;
+
+       ret = i2c_smbus_read_word_data(chip->client, reg << 1);
+       val[0] = (u16)ret & 0xFF;
+       val[1] = (u16)ret >> 8;
+
+       return ret;
+}
+
+static int pca953x_read_regs_24(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
+
+       return i2c_smbus_read_i2c_block_data(chip->client,
+                                            (reg << bank_shift) | REG_ADDR_AI,
+                                            NBANK(chip), val);
+}
+
+static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val)
+{
+       int ret;
+
+       ret = chip->read_regs(chip, reg, val);
        if (ret < 0) {
                dev_err(&chip->client->dev, "failed reading register\n");
                return ret;
@@ -222,20 +269,12 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
 {
        struct pca953x_chip *chip = gpiochip_get_data(gc);
        u8 reg_val;
-       int ret, offset = 0;
+       int ret;
 
        mutex_lock(&chip->i2c_lock);
        reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
 
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_DIRECTION;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_CFG;
-               break;
-       }
-       ret = pca953x_write_single(chip, offset, reg_val, off);
+       ret = pca953x_write_single(chip, chip->regs->direction, reg_val, off);
        if (ret)
                goto exit;
 
@@ -250,7 +289,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 {
        struct pca953x_chip *chip = gpiochip_get_data(gc);
        u8 reg_val;
-       int ret, offset = 0;
+       int ret;
 
        mutex_lock(&chip->i2c_lock);
        /* set output level */
@@ -261,15 +300,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
                reg_val = chip->reg_output[off / BANK_SZ]
                        & ~(1u << (off % BANK_SZ));
 
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_OUTPUT;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_OUT;
-               break;
-       }
-       ret = pca953x_write_single(chip, offset, reg_val, off);
+       ret = pca953x_write_single(chip, chip->regs->output, reg_val, off);
        if (ret)
                goto exit;
 
@@ -277,15 +308,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 
        /* then direction */
        reg_val = chip->reg_direction[off / BANK_SZ] & ~(1u << (off % BANK_SZ));
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_DIRECTION;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_CFG;
-               break;
-       }
-       ret = pca953x_write_single(chip, offset, reg_val, off);
+       ret = pca953x_write_single(chip, chip->regs->direction, reg_val, off);
        if (ret)
                goto exit;
 
@@ -299,18 +322,10 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
 {
        struct pca953x_chip *chip = gpiochip_get_data(gc);
        u32 reg_val;
-       int ret, offset = 0;
+       int ret;
 
        mutex_lock(&chip->i2c_lock);
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_INPUT;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_IN;
-               break;
-       }
-       ret = pca953x_read_single(chip, offset, &reg_val, off);
+       ret = pca953x_read_single(chip, chip->regs->input, &reg_val, off);
        mutex_unlock(&chip->i2c_lock);
        if (ret < 0) {
                /* NOTE:  diagnostic already emitted; that's all we should
@@ -327,7 +342,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
 {
        struct pca953x_chip *chip = gpiochip_get_data(gc);
        u8 reg_val;
-       int ret, offset = 0;
+       int ret;
 
        mutex_lock(&chip->i2c_lock);
        if (val)
@@ -337,15 +352,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
                reg_val = chip->reg_output[off / BANK_SZ]
                        & ~(1u << (off % BANK_SZ));
 
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_OUTPUT;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_OUT;
-               break;
-       }
-       ret = pca953x_write_single(chip, offset, reg_val, off);
+       ret = pca953x_write_single(chip, chip->regs->output, reg_val, off);
        if (ret)
                goto exit;
 
@@ -355,35 +362,31 @@ exit:
 }
 
 static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
-               unsigned long *mask, unsigned long *bits)
+                                     unsigned long *mask, unsigned long *bits)
 {
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       unsigned int bank_mask, bank_val;
+       int bank_shift, bank;
        u8 reg_val[MAX_BANK];
-       int ret, offset = 0;
-       int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
-       int bank;
-
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_OUTPUT;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_OUT;
-               break;
-       }
+       int ret;
+
+       bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
 
        memcpy(reg_val, chip->reg_output, NBANK(chip));
        mutex_lock(&chip->i2c_lock);
-       for(bank=0; bank<NBANK(chip); bank++) {
-               unsigned bankmask = mask[bank / sizeof(*mask)] >>
-                                   ((bank % sizeof(*mask)) * 8);
-               if(bankmask) {
-                       unsigned bankval  = bits[bank / sizeof(*bits)] >>
-                                           ((bank % sizeof(*bits)) * 8);
-                       reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
+       for (bank = 0; bank < NBANK(chip); bank++) {
+               bank_mask = mask[bank / sizeof(*mask)] >>
+                          ((bank % sizeof(*mask)) * 8);
+               if (bank_mask) {
+                       bank_val = bits[bank / sizeof(*bits)] >>
+                                 ((bank % sizeof(*bits)) * 8);
+                       reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
                }
        }
-       ret = i2c_smbus_write_i2c_block_data(chip->client, offset << bank_shift, NBANK(chip), reg_val);
+
+       ret = i2c_smbus_write_i2c_block_data(chip->client,
+                                            chip->regs->output << bank_shift,
+                                            NBANK(chip), reg_val);
        if (ret)
                goto exit;
 
@@ -515,7 +518,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
        bool pending_seen = false;
        bool trigger_seen = false;
        u8 trigger[MAX_BANK];
-       int ret, i, offset = 0;
+       int ret, i;
 
        if (chip->driver_data & PCA_PCAL) {
                /* Read the current interrupt status from the device */
@@ -540,15 +543,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
                return pending_seen;
        }
 
-       switch (chip->chip_type) {
-       case PCA953X_TYPE:
-               offset = PCA953X_INPUT;
-               break;
-       case PCA957X_TYPE:
-               offset = PCA957X_IN;
-               break;
-       }
-       ret = pca953x_read_regs(chip, offset, cur_stat);
+       ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
        if (ret)
                return false;
 
@@ -608,20 +603,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
                             int irq_base)
 {
        struct i2c_client *client = chip->client;
-       int ret, i, offset = 0;
+       int ret, i;
 
        if (client->irq && irq_base != -1
                        && (chip->driver_data & PCA_INT)) {
 
-               switch (chip->chip_type) {
-               case PCA953X_TYPE:
-                       offset = PCA953X_INPUT;
-                       break;
-               case PCA957X_TYPE:
-                       offset = PCA957X_IN;
-                       break;
-               }
-               ret = pca953x_read_regs(chip, offset, chip->irq_stat);
+               ret = pca953x_read_regs(chip,
+                                       chip->regs->input, chip->irq_stat);
                if (ret)
                        return ret;
 
@@ -684,12 +672,14 @@ static int device_pca953x_init(struct pca953x_chip *chip, u32 invert)
        int ret;
        u8 val[MAX_BANK];
 
-       ret = pca953x_read_regs(chip, PCA953X_OUTPUT, chip->reg_output);
+       chip->regs = &pca953x_regs;
+
+       ret = pca953x_read_regs(chip, chip->regs->output, chip->reg_output);
        if (ret)
                goto out;
 
-       ret = pca953x_read_regs(chip, PCA953X_DIRECTION,
-                              chip->reg_direction);
+       ret = pca953x_read_regs(chip, chip->regs->direction,
+                               chip->reg_direction);
        if (ret)
                goto out;
 
@@ -709,10 +699,13 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
        int ret;
        u8 val[MAX_BANK];
 
-       ret = pca953x_read_regs(chip, PCA957X_OUT, chip->reg_output);
+       chip->regs = &pca957x_regs;
+
+       ret = pca953x_read_regs(chip, chip->regs->output, chip->reg_output);
        if (ret)
                goto out;
-       ret = pca953x_read_regs(chip, PCA957X_CFG, chip->reg_direction);
+       ret = pca953x_read_regs(chip, chip->regs->direction,
+                               chip->reg_direction);
        if (ret)
                goto out;
 
@@ -746,6 +739,7 @@ static int pca953x_probe(struct i2c_client *client,
        int irq_base = 0;
        int ret;
        u32 invert = 0;
+       struct regulator *reg;
 
        chip = devm_kzalloc(&client->dev,
                        sizeof(struct pca953x_chip), GFP_KERNEL);
@@ -765,6 +759,20 @@ static int pca953x_probe(struct i2c_client *client,
 
        chip->client = client;
 
+       reg = devm_regulator_get(&client->dev, "vcc");
+       if (IS_ERR(reg)) {
+               ret = PTR_ERR(reg);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&client->dev, "reg get err: %d\n", ret);
+               return ret;
+       }
+       ret = regulator_enable(reg);
+       if (ret) {
+               dev_err(&client->dev, "reg en err: %d\n", ret);
+               return ret;
+       }
+       chip->regulator = reg;
+
        if (id) {
                chip->driver_data = id->driver_data;
        } else {
@@ -776,15 +784,15 @@ static int pca953x_probe(struct i2c_client *client,
                        chip->driver_data = (int)(uintptr_t)match->data;
                } else {
                        id = acpi_match_device(pca953x_acpi_ids, &client->dev);
-                       if (!id)
-                               return -ENODEV;
+                       if (!id) {
+                               ret = -ENODEV;
+                               goto err_exit;
+                       }
 
                        chip->driver_data = id->driver_data;
                }
        }
 
-       chip->chip_type = PCA_CHIP_TYPE(chip->driver_data);
-
        mutex_init(&chip->i2c_lock);
 
        /* initialize cached registers from their original values.
@@ -792,20 +800,34 @@ static int pca953x_probe(struct i2c_client *client,
         */
        pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
 
-       if (chip->chip_type == PCA953X_TYPE)
+       if (chip->gpio_chip.ngpio <= 8) {
+               chip->write_regs = pca953x_write_regs_8;
+               chip->read_regs = pca953x_read_regs_8;
+       } else if (chip->gpio_chip.ngpio >= 24) {
+               chip->write_regs = pca953x_write_regs_24;
+               chip->read_regs = pca953x_read_regs_24;
+       } else {
+               if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE)
+                       chip->write_regs = pca953x_write_regs_16;
+               else
+                       chip->write_regs = pca957x_write_regs_16;
+               chip->read_regs = pca953x_read_regs_16;
+       }
+
+       if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE)
                ret = device_pca953x_init(chip, invert);
        else
                ret = device_pca957x_init(chip, invert);
        if (ret)
-               return ret;
+               goto err_exit;
 
        ret = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
        if (ret)
-               return ret;
+               goto err_exit;
 
        ret = pca953x_irq_setup(chip, irq_base);
        if (ret)
-               return ret;
+               goto err_exit;
 
        if (pdata && pdata->setup) {
                ret = pdata->setup(client, chip->gpio_chip.base,
@@ -816,6 +838,10 @@ static int pca953x_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, chip);
        return 0;
+
+err_exit:
+       regulator_disable(chip->regulator);
+       return ret;
 }
 
 static int pca953x_remove(struct i2c_client *client)
@@ -827,14 +853,16 @@ static int pca953x_remove(struct i2c_client *client)
        if (pdata && pdata->teardown) {
                ret = pdata->teardown(client, chip->gpio_chip.base,
                                chip->gpio_chip.ngpio, pdata->context);
-               if (ret < 0) {
+               if (ret < 0)
                        dev_err(&client->dev, "%s failed, %d\n",
                                        "teardown", ret);
-                       return ret;
-               }
+       } else {
+               ret = 0;
        }
 
-       return 0;
+       regulator_disable(chip->regulator);
+
+       return ret;
 }
 
 /* convenience to stop overlong match-table lines */
index cb14b8d1d5120e1535dcef462c1cc041c8a50f2a..f5545049c187e493e427fc82a8174873a5b3bbd8 100644 (file)
@@ -90,7 +90,7 @@ static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
        return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "pisosr-gpio",
        .owner                  = THIS_MODULE,
        .get_direction          = pisosr_gpio_get_direction,
index b96e0b466f742ea2930ee0d2d79774f0ce903c32..2be48f5eba361af18817c8124dcc6637d1cff419 100644 (file)
@@ -347,6 +347,10 @@ static const struct of_device_id gpio_rcar_of_table[] = {
                .compatible = "renesas,gpio-r8a7795",
                /* Gen3 GPIO is identical to Gen2. */
                .data = &gpio_rcar_info_gen2,
+       }, {
+               .compatible = "renesas,gpio-r8a7796",
+               /* Gen3 GPIO is identical to Gen2. */
+               .data = &gpio_rcar_info_gen2,
        }, {
                .compatible = "renesas,gpio-rcar",
                .data = &gpio_rcar_info_gen1,
index eb43ae4835c15d8a65ac8f4fde2de07c7b6e19ee..54500444584626926a8ec258c09e7170ae413d4d 100644 (file)
@@ -138,7 +138,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
        return 0;
 }
 
-static struct gpio_chip sch_gpio_chip = {
+static const struct gpio_chip sch_gpio_chip = {
        .label                  = "sch_gpio",
        .owner                  = THIS_MODULE,
        .direction_input        = sch_gpio_direction_in,
index 7ffd164952867a870ff06c5120462b38cf6df928..22267479ba6846dd2088fe62dbc946281f72d6ae 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
@@ -183,7 +183,6 @@ static const struct of_device_id spics_gpio_of_match[] = {
        { .compatible = "st,spear-spics-gpio" },
        {}
 };
-MODULE_DEVICE_TABLE(of, spics_gpio_of_match);
 
 static struct platform_driver spics_gpio_driver = {
        .probe = spics_gpio_probe,
@@ -198,7 +197,3 @@ static int __init spics_gpio_init(void)
        return platform_driver_register(&spics_gpio_driver);
 }
 subsys_initcall(spics_gpio_init);
-
-MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
-MODULE_DESCRIPTION("STMicroelectronics SPEAr SPI Chip Select Abstraction");
-MODULE_LICENSE("GPL");
index f675132de10ee7cd69bc4ad9d9fe0f2df5b7e3fb..b51c5be55c3a2848376eb8a8b596230c725dcec8 100644 (file)
@@ -20,6 +20,8 @@
  */
 enum { REG_RE, REG_FE, REG_IE };
 
+enum { LSB, CSB, MSB };
+
 #define CACHE_NR_REGS  3
 /* No variant has more than 24 GPIOs */
 #define CACHE_NR_BANKS (24 / 8)
@@ -39,7 +41,7 @@ static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
        struct stmpe *stmpe = stmpe_gpio->stmpe;
-       u8 reg = stmpe->regs[STMPE_IDX_GPMR_LSB] - (offset / 8);
+       u8 reg = stmpe->regs[STMPE_IDX_GPMR_LSB + (offset / 8)];
        u8 mask = 1 << (offset % 8);
        int ret;
 
@@ -55,7 +57,7 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
        struct stmpe *stmpe = stmpe_gpio->stmpe;
        int which = val ? STMPE_IDX_GPSR_LSB : STMPE_IDX_GPCR_LSB;
-       u8 reg = stmpe->regs[which] - (offset / 8);
+       u8 reg = stmpe->regs[which + (offset / 8)];
        u8 mask = 1 << (offset % 8);
 
        /*
@@ -89,7 +91,7 @@ static int stmpe_gpio_direction_output(struct gpio_chip *chip,
 {
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
        struct stmpe *stmpe = stmpe_gpio->stmpe;
-       u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
+       u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
        u8 mask = 1 << (offset % 8);
 
        stmpe_gpio_set(chip, offset, val);
@@ -102,7 +104,7 @@ static int stmpe_gpio_direction_input(struct gpio_chip *chip,
 {
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
        struct stmpe *stmpe = stmpe_gpio->stmpe;
-       u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
+       u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
        u8 mask = 1 << (offset % 8);
 
        return stmpe_set_bits(stmpe, reg, mask, 0);
@@ -119,7 +121,7 @@ static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
        return stmpe_set_altfunc(stmpe, 1 << offset, STMPE_BLOCK_GPIO);
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "stmpe",
        .owner                  = THIS_MODULE,
        .get_direction          = stmpe_gpio_get_direction,
@@ -142,8 +144,9 @@ static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        if (type & IRQ_TYPE_LEVEL_LOW || type & IRQ_TYPE_LEVEL_HIGH)
                return -EINVAL;
 
-       /* STMPE801 doesn't have RE and FE registers */
-       if (stmpe_gpio->stmpe->partnum == STMPE801)
+       /* STMPE801 and STMPE 1600 don't have RE and FE registers */
+       if (stmpe_gpio->stmpe->partnum == STMPE801 ||
+           stmpe_gpio->stmpe->partnum == STMPE1600)
                return 0;
 
        if (type & IRQ_TYPE_EDGE_RISING)
@@ -173,17 +176,24 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
        struct stmpe *stmpe = stmpe_gpio->stmpe;
        int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
-       static const u8 regmap[] = {
-               [REG_RE]        = STMPE_IDX_GPRER_LSB,
-               [REG_FE]        = STMPE_IDX_GPFER_LSB,
-               [REG_IE]        = STMPE_IDX_IEGPIOR_LSB,
+       static const u8 regmap[CACHE_NR_REGS][CACHE_NR_BANKS] = {
+               [REG_RE][LSB] = STMPE_IDX_GPRER_LSB,
+               [REG_RE][CSB] = STMPE_IDX_GPRER_CSB,
+               [REG_RE][MSB] = STMPE_IDX_GPRER_MSB,
+               [REG_FE][LSB] = STMPE_IDX_GPFER_LSB,
+               [REG_FE][CSB] = STMPE_IDX_GPFER_CSB,
+               [REG_FE][MSB] = STMPE_IDX_GPFER_MSB,
+               [REG_IE][LSB] = STMPE_IDX_IEGPIOR_LSB,
+               [REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
+               [REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
        };
        int i, j;
 
        for (i = 0; i < CACHE_NR_REGS; i++) {
-               /* STMPE801 doesn't have RE and FE registers */
-               if ((stmpe->partnum == STMPE801) &&
-                               (i != REG_IE))
+               /* STMPE801 and STMPE1600 don't have RE and FE registers */
+               if ((stmpe->partnum == STMPE801 ||
+                    stmpe->partnum == STMPE1600) &&
+                    (i != REG_IE))
                        continue;
 
                for (j = 0; j < num_banks; j++) {
@@ -194,7 +204,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
                                continue;
 
                        stmpe_gpio->oldregs[i][j] = new;
-                       stmpe_reg_write(stmpe, stmpe->regs[regmap[i]] - j, new);
+                       stmpe_reg_write(stmpe, stmpe->regs[regmap[i][j]], new);
                }
        }
 
@@ -216,11 +226,21 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
+       struct stmpe *stmpe = stmpe_gpio->stmpe;
        int offset = d->hwirq;
        int regoffset = offset / 8;
        int mask = 1 << (offset % 8);
 
        stmpe_gpio->regs[REG_IE][regoffset] |= mask;
+
+       /*
+        * STMPE1600 workaround: to be able to get IRQ from pins,
+        * a read must be done on GPMR register, or a write in
+        * GPSR or GPCR registers
+        */
+       if (stmpe->partnum == STMPE1600)
+               stmpe_reg_read(stmpe,
+                              stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
 }
 
 static void stmpe_dbg_show_one(struct seq_file *s,
@@ -230,9 +250,9 @@ static void stmpe_dbg_show_one(struct seq_file *s,
        struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
        struct stmpe *stmpe = stmpe_gpio->stmpe;
        const char *label = gpiochip_is_requested(gc, offset);
-       int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
        bool val = !!stmpe_gpio_get(gc, offset);
-       u8 dir_reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
+       u8 bank = offset / 8;
+       u8 dir_reg = stmpe->regs[STMPE_IDX_GPDR_LSB + bank];
        u8 mask = 1 << (offset % 8);
        int ret;
        u8 dir;
@@ -247,39 +267,72 @@ static void stmpe_dbg_show_one(struct seq_file *s,
                           gpio, label ?: "(none)",
                           val ? "hi" : "lo");
        } else {
-               u8 edge_det_reg = stmpe->regs[STMPE_IDX_GPEDR_MSB] + num_banks - 1 - (offset / 8);
-               u8 rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB] - (offset / 8);
-               u8 fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB] - (offset / 8);
-               u8 irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB] - (offset / 8);
-               bool edge_det;
-               bool rise;
-               bool fall;
+               u8 edge_det_reg;
+               u8 rise_reg;
+               u8 fall_reg;
+               u8 irqen_reg;
+
+               char *edge_det_values[] = {"edge-inactive",
+                                          "edge-asserted",
+                                          "not-supported"};
+               char *rise_values[] = {"no-rising-edge-detection",
+                                      "rising-edge-detection",
+                                      "not-supported"};
+               char *fall_values[] = {"no-falling-edge-detection",
+                                      "falling-edge-detection",
+                                      "not-supported"};
+               #define NOT_SUPPORTED_IDX 2
+               u8 edge_det = NOT_SUPPORTED_IDX;
+               u8 rise = NOT_SUPPORTED_IDX;
+               u8 fall = NOT_SUPPORTED_IDX;
                bool irqen;
 
-               ret = stmpe_reg_read(stmpe, edge_det_reg);
-               if (ret < 0)
-                       return;
-               edge_det = !!(ret & mask);
-               ret = stmpe_reg_read(stmpe, rise_reg);
-               if (ret < 0)
+               switch (stmpe->partnum) {
+               case STMPE610:
+               case STMPE811:
+               case STMPE1601:
+               case STMPE2401:
+               case STMPE2403:
+                       edge_det_reg = stmpe->regs[STMPE_IDX_GPEDR_LSB + bank];
+                       ret = stmpe_reg_read(stmpe, edge_det_reg);
+                       if (ret < 0)
+                               return;
+                       edge_det = !!(ret & mask);
+
+               case STMPE1801:
+                       rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank];
+                       fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank];
+
+                       ret = stmpe_reg_read(stmpe, rise_reg);
+                       if (ret < 0)
+                               return;
+                       rise = !!(ret & mask);
+                       ret = stmpe_reg_read(stmpe, fall_reg);
+                       if (ret < 0)
+                               return;
+                       fall = !!(ret & mask);
+
+               case STMPE801:
+               case STMPE1600:
+                       irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank];
+                       break;
+
+               default:
                        return;
-               rise = !!(ret & mask);
-               ret = stmpe_reg_read(stmpe, fall_reg);
-               if (ret < 0)
-                       return;
-               fall = !!(ret & mask);
+               }
+
                ret = stmpe_reg_read(stmpe, irqen_reg);
                if (ret < 0)
                        return;
                irqen = !!(ret & mask);
 
-               seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s %s%s%s",
+               seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %13s %13s %25s %25s",
                           gpio, label ?: "(none)",
                           val ? "hi" : "lo",
-                          edge_det ? "edge-asserted" : "edge-inactive",
-                          irqen ? "IRQ-enabled" : "",
-                          rise ? " rising-edge-detection" : "",
-                          fall ? " falling-edge-detection" : "");
+                          edge_det_values[edge_det],
+                          irqen ? "IRQ-enabled" : "IRQ-disabled",
+                          rise_values[rise],
+                          fall_values[fall]);
        }
 }
 
@@ -307,18 +360,32 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
 {
        struct stmpe_gpio *stmpe_gpio = dev;
        struct stmpe *stmpe = stmpe_gpio->stmpe;
-       u8 statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_MSB];
+       u8 statmsbreg;
        int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
        u8 status[num_banks];
        int ret;
        int i;
 
+       /*
+        * the stmpe_block_read() call below, imposes to set statmsbreg
+        * with the register located at the lowest address. As STMPE1600
+        * variant is the only one which respect registers address's order
+        * (LSB regs located at lowest address than MSB ones) whereas all
+        * the others have a registers layout with MSB located before the
+        * LSB regs.
+        */
+       if (stmpe->partnum == STMPE1600)
+               statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_LSB];
+       else
+               statmsbreg = stmpe->regs[STMPE_IDX_ISGPIOR_MSB];
+
        ret = stmpe_block_read(stmpe, statmsbreg, num_banks, status);
        if (ret < 0)
                return IRQ_NONE;
 
        for (i = 0; i < num_banks; i++) {
-               int bank = num_banks - i - 1;
+               int bank = (stmpe_gpio->stmpe->partnum == STMPE1600) ? i :
+                          num_banks - i - 1;
                unsigned int enabled = stmpe_gpio->regs[REG_IE][bank];
                unsigned int stat = status[i];
 
@@ -336,12 +403,18 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
                        stat &= ~(1 << bit);
                }
 
-               stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
-
-               /* Edge detect register is not present on 801 */
-               if (stmpe->partnum != STMPE801)
-                       stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB]
-                                       + i, status[i]);
+               /*
+                * interrupt status register write has no effect on
+                * 801/1801/1600, bits are cleared when read.
+                * Edge detect register is not present on 801/1600/1801
+                */
+               if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+                   stmpe->partnum != STMPE1801) {
+                       stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
+                       stmpe_reg_write(stmpe,
+                                       stmpe->regs[STMPE_IDX_GPEDR_LSB + i],
+                                       status[i]);
+               }
        }
 
        return IRQ_HANDLED;
index a177ebd921d5f106d0f46332158a0fdc15ade23b..af95de89db01da504960ccbb8fdd2bf180b1ab24 100644 (file)
@@ -236,7 +236,6 @@ static const struct i2c_device_id sx150x_id[] = {
        {"sx1502q", 3},
        {}
 };
-MODULE_DEVICE_TABLE(i2c, sx150x_id);
 
 static const struct of_device_id sx150x_of_match[] = {
        { .compatible = "semtech,sx1508q" },
@@ -245,7 +244,6 @@ static const struct of_device_id sx150x_of_match[] = {
        { .compatible = "semtech,sx1502q" },
        {},
 };
-MODULE_DEVICE_TABLE(of, sx150x_of_match);
 
 static s32 sx150x_i2c_write(struct i2c_client *client, u8 reg, u8 val)
 {
index 8b3659352e495b02843187b75679406469a06611..5a5a6cb00eea9cc7e21666812ed6571fa55ff92d 100644 (file)
@@ -34,7 +34,7 @@ struct tc3589x_gpio {
        u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
 };
 
-static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
        struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
        struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -49,24 +49,24 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned offset)
        return !!(ret & mask);
 }
 
-static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
 {
        struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
        struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
        u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2;
-       unsigned pos = offset % 8;
+       unsigned int pos = offset % 8;
        u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
 
        tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
 }
 
 static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
-                                        unsigned offset, int val)
+                                        unsigned int offset, int val)
 {
        struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
        struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
        u8 reg = TC3589x_GPIODIR0 + offset / 8;
-       unsigned pos = offset % 8;
+       unsigned int pos = offset % 8;
 
        tc3589x_gpio_set(chip, offset, val);
 
@@ -74,19 +74,35 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
 }
 
 static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
-                                       unsigned offset)
+                                       unsigned int offset)
 {
        struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
        struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
        u8 reg = TC3589x_GPIODIR0 + offset / 8;
-       unsigned pos = offset % 8;
+       unsigned int pos = offset % 8;
 
        return tc3589x_set_bits(tc3589x, reg, BIT(pos), 0);
 }
 
-static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
-                                    unsigned offset,
-                                    enum single_ended_mode mode)
+static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
+                                     unsigned int offset)
+{
+       struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
+       struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
+       u8 reg = TC3589x_GPIODIR0 + offset / 8;
+       unsigned int pos = offset % 8;
+       int ret;
+
+       ret = tc3589x_reg_read(tc3589x, reg);
+       if (ret < 0)
+               return ret;
+
+       return !!(ret & BIT(pos));
+}
+
+static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
+                                        unsigned int offset,
+                                        enum single_ended_mode mode)
 {
        struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
        struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -97,7 +113,7 @@ static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
         */
        u8 odmreg = TC3589x_GPIOODM0 + (offset / 8) * 2;
        u8 odereg = TC3589x_GPIOODE0 + (offset / 8) * 2;
-       unsigned pos = offset % 8;
+       unsigned int pos = offset % 8;
        int ret;
 
        switch(mode) {
@@ -124,14 +140,15 @@ static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
        return -ENOTSUPP;
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "tc3589x",
        .owner                  = THIS_MODULE,
-       .direction_input        = tc3589x_gpio_direction_input,
        .get                    = tc3589x_gpio_get,
-       .direction_output       = tc3589x_gpio_direction_output,
        .set                    = tc3589x_gpio_set,
-       .set_single_ended       = tc3589x_gpio_single_ended,
+       .direction_output       = tc3589x_gpio_direction_output,
+       .direction_input        = tc3589x_gpio_direction_input,
+       .get_direction          = tc3589x_gpio_get_direction,
+       .set_single_ended       = tc3589x_gpio_set_single_ended,
        .can_sleep              = true,
 };
 
index cace79c1b70a53fa9d339d4e7902abfe627a690b..c8b34d787eedeb27fa6315cba34ea9b286cf9c59 100644 (file)
@@ -87,7 +87,7 @@ static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
        tpic2810_set_mask_bits(chip, *mask, *bits);
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "tpic2810",
        .owner                  = THIS_MODULE,
        .get_direction          = tpic2810_get_direction,
index 8e25f01ac314c3e0a7970e62f0881b8851fccb5e..b23c4d2429be51f47035f1920554e3bf77e122ae 100644 (file)
@@ -72,7 +72,7 @@ static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
                           BIT(4 + offset), value ? BIT(4 + offset) : 0);
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "tps65086-gpio",
        .owner                  = THIS_MODULE,
        .get_direction          = tps65086_gpio_get_direction,
index 1c09a19ae10c26c583551f500c0926cb2a53a71e..d779307a96852b15f251c9595e035e0ee537db7e 100644 (file)
@@ -172,7 +172,7 @@ static int tps65218_gpio_set_single_ended(struct gpio_chip *gc,
        return -ENOTSUPP;
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "gpio-tps65218",
        .owner                  = THIS_MODULE,
        .request                = tps65218_gpio_request,
@@ -204,7 +204,8 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
        tps65218_gpio->gpio_chip.of_node = pdev->dev.of_node;
 #endif
 
-       ret = gpiochip_add_data(&tps65218_gpio->gpio_chip, tps65218_gpio);
+       ret = devm_gpiochip_add_data(&pdev->dev, &tps65218_gpio->gpio_chip,
+                                    tps65218_gpio);
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
                return ret;
@@ -215,15 +216,6 @@ static int tps65218_gpio_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int tps65218_gpio_remove(struct platform_device *pdev)
-{
-       struct tps65218_gpio *tps65218_gpio = platform_get_drvdata(pdev);
-
-       gpiochip_remove(&tps65218_gpio->gpio_chip);
-
-       return 0;
-}
-
 static const struct of_device_id tps65218_dt_match[] = {
        { .compatible = "ti,tps65218-gpio" },
        {  }
@@ -242,7 +234,6 @@ static struct platform_driver tps65218_gpio_driver = {
                .of_match_table = of_match_ptr(tps65218_dt_match)
        },
        .probe = tps65218_gpio_probe,
-       .remove = tps65218_gpio_remove,
        .id_table = tps65218_gpio_id_table,
 };
 
index acfd30a13a567902571c531ae5ac8052d0863375..abc0798ef843a485a88b4a520cb529e1b6b99780 100644 (file)
@@ -90,7 +90,7 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
                           GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "tps65912-gpio",
        .owner                  = THIS_MODULE,
        .get_direction          = tps65912_gpio_get_direction,
index 0c144a72f9af2281b9dc8a151728e2574d57d4e3..99256115bea55c12710ae6c19a94c09a8e01d918 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/gpio/driver.h>
+#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
new file mode 100644 (file)
index 0000000..5bd2172
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Digital I/O driver for Technologic Systems I2C FPGA Core
+ *
+ * Copyright (C) 2015 Technologic Systems
+ * Copyright (C) 2016 Savoir-Faire Linux
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define DEFAULT_PIN_NUMBER     32
+/*
+ * Register bits used by the GPIO device
+ * Some boards, such as TS-7970 do not have a separate input bit
+ */
+#define TS4900_GPIO_OE         0x01
+#define TS4900_GPIO_OUT                0x02
+#define TS4900_GPIO_IN         0x04
+#define TS7970_GPIO_IN         0x02
+
+struct ts4900_gpio_priv {
+       struct regmap *regmap;
+       struct gpio_chip gpio_chip;
+       unsigned int input_bit;
+};
+
+static int ts4900_gpio_get_direction(struct gpio_chip *chip,
+                                    unsigned int offset)
+{
+       struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+       unsigned int reg;
+
+       regmap_read(priv->regmap, offset, &reg);
+
+       return !(reg & TS4900_GPIO_OE);
+}
+
+static int ts4900_gpio_direction_input(struct gpio_chip *chip,
+                                      unsigned int offset)
+{
+       struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+
+       /*
+        * This will clear the output enable bit, the other bits are
+        * dontcare when this is cleared
+        */
+       return regmap_write(priv->regmap, offset, 0);
+}
+
+static int ts4900_gpio_direction_output(struct gpio_chip *chip,
+                                       unsigned int offset, int value)
+{
+       struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+       int ret;
+
+       if (value)
+               ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE |
+                                                        TS4900_GPIO_OUT);
+       else
+               ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE);
+
+       return ret;
+}
+
+static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+       struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+       unsigned int reg;
+
+       regmap_read(priv->regmap, offset, &reg);
+
+       return !!(reg & priv->input_bit);
+}
+
+static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
+                           int value)
+{
+       struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
+
+       if (value)
+               regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT,
+                                  TS4900_GPIO_OUT);
+       else
+               regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
+}
+
+static const struct regmap_config ts4900_regmap_config = {
+       .reg_bits = 16,
+       .val_bits = 8,
+};
+
+static const struct gpio_chip template_chip = {
+       .label                  = "ts4900-gpio",
+       .owner                  = THIS_MODULE,
+       .get_direction          = ts4900_gpio_get_direction,
+       .direction_input        = ts4900_gpio_direction_input,
+       .direction_output       = ts4900_gpio_direction_output,
+       .get                    = ts4900_gpio_get,
+       .set                    = ts4900_gpio_set,
+       .base                   = -1,
+       .can_sleep              = true,
+};
+
+static const struct of_device_id ts4900_gpio_of_match_table[] = {
+       {
+               .compatible = "technologic,ts4900-gpio",
+               .data = (void *)TS4900_GPIO_IN,
+       }, {
+               .compatible = "technologic,ts7970-gpio",
+               .data = (void *)TS7970_GPIO_IN,
+       },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ts4900_gpio_of_match_table);
+
+static int ts4900_gpio_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       const struct of_device_id *match;
+       struct ts4900_gpio_priv *priv;
+       u32 ngpio;
+       int ret;
+
+       match = of_match_device(ts4900_gpio_of_match_table, &client->dev);
+       if (!match)
+               return -EINVAL;
+
+       if (of_property_read_u32(client->dev.of_node, "ngpios", &ngpio))
+               ngpio = DEFAULT_PIN_NUMBER;
+
+       priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->gpio_chip = template_chip;
+       priv->gpio_chip.label = "ts4900-gpio";
+       priv->gpio_chip.ngpio = ngpio;
+       priv->gpio_chip.parent = &client->dev;
+       priv->input_bit = (uintptr_t)match->data;
+
+       priv->regmap = devm_regmap_init_i2c(client, &ts4900_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               ret = PTR_ERR(priv->regmap);
+               dev_err(&client->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
+       ret = devm_gpiochip_add_data(&client->dev, &priv->gpio_chip, priv);
+       if (ret < 0) {
+               dev_err(&client->dev, "Unable to register gpiochip\n");
+               return ret;
+       }
+
+       i2c_set_clientdata(client, priv);
+
+       return 0;
+}
+
+static const struct i2c_device_id ts4900_gpio_id_table[] = {
+       { "ts4900-gpio", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ts4900_gpio_id_table);
+
+static struct i2c_driver ts4900_gpio_driver = {
+       .driver = {
+               .name = "ts4900-gpio",
+               .of_match_table = ts4900_gpio_of_match_table,
+       },
+       .probe = ts4900_gpio_probe,
+       .id_table = ts4900_gpio_id_table,
+};
+module_i2c_driver(ts4900_gpio_driver);
+
+MODULE_AUTHOR("Technologic Systems");
+MODULE_DESCRIPTION("GPIO interface for Technologic Systems I2C-FPGA core");
+MODULE_LICENSE("GPL");
index 4b807b0e0c8e1696f8e9c80fec355eb72744aceb..dfcfbba74416016c6bc4a936b4072dc4cd9bec00 100644 (file)
@@ -381,7 +381,7 @@ static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
                : -EINVAL;
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "twl4030",
        .owner                  = THIS_MODULE,
        .request                = twl_request,
index 6284bdbe1e0cbbc95fca1118686dfe8c3c9b1b22..3edb09cb9ee08080ca6da66e369c8729e57e8392 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * vf610 GPIO support through PORT and GPIO module
+ * Freescale vf610 GPIO support through PORT and GPIO
  *
  * Copyright (c) 2014 Toradex AG.
  *
@@ -23,7 +23,6 @@
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/irq.h>
-#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -289,7 +288,3 @@ static int __init gpio_vf610_init(void)
        return platform_driver_register(&vf610_gpio_driver);
 }
 device_initcall(gpio_vf610_init);
-
-MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
-MODULE_DESCRIPTION("Freescale VF610 GPIO");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
new file mode 100644 (file)
index 0000000..d0ddba7
--- /dev/null
@@ -0,0 +1,470 @@
+/*
+ * Intel Whiskey Cove PMIC GPIO Driver
+ *
+ * This driver is written based on gpio-crystalcove.c
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+/*
+ * Whiskey Cove PMIC has 13 physical GPIO pins divided into 3 banks:
+ * Bank 0: Pin 0 - 6
+ * Bank 1: Pin 7 - 10
+ * Bank 2: Pin 11 -12
+ * Each pin has one output control register and one input control register.
+ */
+#define BANK0_NR_PINS          7
+#define BANK1_NR_PINS          4
+#define BANK2_NR_PINS          2
+#define WCOVE_GPIO_NUM         (BANK0_NR_PINS + BANK1_NR_PINS + BANK2_NR_PINS)
+#define WCOVE_VGPIO_NUM                94
+/* GPIO output control registers (one per pin): 0x4e44 - 0x4e50 */
+#define GPIO_OUT_CTRL_BASE     0x4e44
+/* GPIO input control registers (one per pin): 0x4e51 - 0x4e5d */
+#define GPIO_IN_CTRL_BASE      0x4e51
+
+/*
+ * GPIO interrupts are organized in two groups:
+ * Group 0: Bank 0 pins (Pin 0 - 6)
+ * Group 1: Bank 1 and Bank 2 pins (Pin 7 - 12)
+ * Each group has two registers (one bit per pin): status and mask.
+ */
+#define GROUP0_NR_IRQS         7
+#define GROUP1_NR_IRQS         6
+#define IRQ_MASK_BASE          0x4e19
+#define IRQ_STATUS_BASE                0x4e0b
+#define UPDATE_IRQ_TYPE                BIT(0)
+#define UPDATE_IRQ_MASK                BIT(1)
+
+#define CTLI_INTCNT_DIS                (0 << 1)
+#define CTLI_INTCNT_NE         (1 << 1)
+#define CTLI_INTCNT_PE         (2 << 1)
+#define CTLI_INTCNT_BE         (3 << 1)
+
+#define CTLO_DIR_IN            (0 << 5)
+#define CTLO_DIR_OUT           (1 << 5)
+
+#define CTLO_DRV_MASK          (1 << 4)
+#define CTLO_DRV_OD            (0 << 4)
+#define CTLO_DRV_CMOS          (1 << 4)
+
+#define CTLO_DRV_REN           (1 << 3)
+
+#define CTLO_RVAL_2KDOWN       (0 << 1)
+#define CTLO_RVAL_2KUP         (1 << 1)
+#define CTLO_RVAL_50KDOWN      (2 << 1)
+#define CTLO_RVAL_50KUP                (3 << 1)
+
+#define CTLO_INPUT_SET (CTLO_DRV_CMOS | CTLO_DRV_REN | CTLO_RVAL_2KUP)
+#define CTLO_OUTPUT_SET        (CTLO_DIR_OUT | CTLO_INPUT_SET)
+
+enum ctrl_register {
+       CTRL_IN,
+       CTRL_OUT,
+};
+
+/*
+ * struct wcove_gpio - Whiskey Cove GPIO controller
+ * @buslock: for bus lock/sync and unlock.
+ * @chip: the abstract gpio_chip structure.
+ * @dev: the gpio device
+ * @regmap: the regmap from the parent device.
+ * @regmap_irq_chip: the regmap of the gpio irq chip.
+ * @update: pending IRQ setting update, to be written to the chip upon unlock.
+ * @intcnt: the Interrupt Detect value to be written.
+ * @set_irq_mask: true if the IRQ mask needs to be set, false to clear.
+ */
+struct wcove_gpio {
+       struct mutex buslock;
+       struct gpio_chip chip;
+       struct device *dev;
+       struct regmap *regmap;
+       struct regmap_irq_chip_data *regmap_irq_chip;
+       int update;
+       int intcnt;
+       bool set_irq_mask;
+};
+
+static inline unsigned int to_reg(int gpio, enum ctrl_register reg_type)
+{
+       unsigned int reg;
+       int bank;
+
+       if (gpio < BANK0_NR_PINS)
+               bank = 0;
+       else if (gpio < BANK0_NR_PINS + BANK1_NR_PINS)
+               bank = 1;
+       else
+               bank = 2;
+
+       if (reg_type == CTRL_IN)
+               reg = GPIO_IN_CTRL_BASE + bank;
+       else
+               reg = GPIO_OUT_CTRL_BASE + bank;
+
+       return reg;
+}
+
+static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
+{
+       unsigned int reg, mask;
+
+       if (gpio < GROUP0_NR_IRQS) {
+               reg = IRQ_MASK_BASE;
+               mask = BIT(gpio % GROUP0_NR_IRQS);
+       } else {
+               reg = IRQ_MASK_BASE + 1;
+               mask = BIT((gpio - GROUP0_NR_IRQS) % GROUP1_NR_IRQS);
+       }
+
+       if (wg->set_irq_mask)
+               regmap_update_bits(wg->regmap, reg, mask, mask);
+       else
+               regmap_update_bits(wg->regmap, reg, mask, 0);
+}
+
+static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
+{
+       unsigned int reg = to_reg(gpio, CTRL_IN);
+
+       regmap_update_bits(wg->regmap, reg, CTLI_INTCNT_BE, wg->intcnt);
+}
+
+static int wcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio)
+{
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       return regmap_write(wg->regmap, to_reg(gpio, CTRL_OUT),
+                           CTLO_INPUT_SET);
+}
+
+static int wcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio,
+                                   int value)
+{
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       return regmap_write(wg->regmap, to_reg(gpio, CTRL_OUT),
+                           CTLO_OUTPUT_SET | value);
+}
+
+static int wcove_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
+{
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &val);
+       if (ret)
+               return ret;
+
+       return !(val & CTLO_DIR_OUT);
+}
+
+static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &val);
+       if (ret)
+               return ret;
+
+       return val & 0x1;
+}
+
+static void wcove_gpio_set(struct gpio_chip *chip,
+                                unsigned int gpio, int value)
+{
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       if (value)
+               regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+       else
+               regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+}
+
+static int wcove_gpio_set_single_ended(struct gpio_chip *chip,
+                                       unsigned int gpio,
+                                       enum single_ended_mode mode)
+{
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       switch (mode) {
+       case LINE_MODE_OPEN_DRAIN:
+               return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
+                                               CTLO_DRV_MASK, CTLO_DRV_OD);
+       case LINE_MODE_PUSH_PULL:
+               return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
+                                               CTLO_DRV_MASK, CTLO_DRV_CMOS);
+       default:
+               break;
+       }
+
+       return -ENOTSUPP;
+}
+
+static int wcove_irq_type(struct irq_data *data, unsigned int type)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       switch (type) {
+       case IRQ_TYPE_NONE:
+               wg->intcnt = CTLI_INTCNT_DIS;
+               break;
+       case IRQ_TYPE_EDGE_BOTH:
+               wg->intcnt = CTLI_INTCNT_BE;
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               wg->intcnt = CTLI_INTCNT_PE;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               wg->intcnt = CTLI_INTCNT_NE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       wg->update |= UPDATE_IRQ_TYPE;
+
+       return 0;
+}
+
+static void wcove_bus_lock(struct irq_data *data)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       mutex_lock(&wg->buslock);
+}
+
+static void wcove_bus_sync_unlock(struct irq_data *data)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+       int gpio = data->hwirq;
+
+       if (wg->update & UPDATE_IRQ_TYPE)
+               wcove_update_irq_ctrl(wg, gpio);
+       if (wg->update & UPDATE_IRQ_MASK)
+               wcove_update_irq_mask(wg, gpio);
+       wg->update = 0;
+
+       mutex_unlock(&wg->buslock);
+}
+
+static void wcove_irq_unmask(struct irq_data *data)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       wg->set_irq_mask = false;
+       wg->update |= UPDATE_IRQ_MASK;
+}
+
+static void wcove_irq_mask(struct irq_data *data)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+
+       wg->set_irq_mask = true;
+       wg->update |= UPDATE_IRQ_MASK;
+}
+
+static struct irq_chip wcove_irqchip = {
+       .name                   = "Whiskey Cove",
+       .irq_mask               = wcove_irq_mask,
+       .irq_unmask             = wcove_irq_unmask,
+       .irq_set_type           = wcove_irq_type,
+       .irq_bus_lock           = wcove_bus_lock,
+       .irq_bus_sync_unlock    = wcove_bus_sync_unlock,
+};
+
+static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
+{
+       struct wcove_gpio *wg = (struct wcove_gpio *)data;
+       unsigned int pending, virq, gpio, mask, offset;
+       u8 p[2];
+
+       if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
+               dev_err(wg->dev, "Failed to read irq status register\n");
+               return IRQ_NONE;
+       }
+
+       pending = p[0] | (p[1] << 8);
+       if (!pending)
+               return IRQ_NONE;
+
+       /* Iterate until no interrupt is pending */
+       while (pending) {
+               /* One iteration is for all pending bits */
+               for_each_set_bit(gpio, (const unsigned long *)&pending,
+                                                GROUP0_NR_IRQS) {
+                       offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
+                       mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
+                                                               BIT(gpio);
+                       virq = irq_find_mapping(wg->chip.irqdomain, gpio);
+                       handle_nested_irq(virq);
+                       regmap_update_bits(wg->regmap, IRQ_STATUS_BASE + offset,
+                                                               mask, mask);
+               }
+
+               /* Next iteration */
+               if (regmap_bulk_read(wg->regmap, IRQ_STATUS_BASE, p, 2)) {
+                       dev_err(wg->dev, "Failed to read irq status\n");
+                       break;
+               }
+
+               pending = p[0] | (p[1] << 8);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void wcove_gpio_dbg_show(struct seq_file *s,
+                                     struct gpio_chip *chip)
+{
+       unsigned int ctlo, ctli, irq_mask, irq_status;
+       struct wcove_gpio *wg = gpiochip_get_data(chip);
+       int gpio, offset, group, ret = 0;
+
+       for (gpio = 0; gpio < WCOVE_GPIO_NUM; gpio++) {
+               group = gpio < GROUP0_NR_IRQS ? 0 : 1;
+               ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &ctlo);
+               ret += regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &ctli);
+               ret += regmap_read(wg->regmap, IRQ_MASK_BASE + group,
+                                                       &irq_mask);
+               ret += regmap_read(wg->regmap, IRQ_STATUS_BASE + group,
+                                                       &irq_status);
+               if (ret) {
+                       pr_err("Failed to read registers: ctrl out/in or irq status/mask\n");
+                       break;
+               }
+
+               offset = gpio % 8;
+               seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
+                          gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
+                          ctli & 0x1 ? "hi" : "lo",
+                          ctli & CTLI_INTCNT_NE ? "fall" : "    ",
+                          ctli & CTLI_INTCNT_PE ? "rise" : "    ",
+                          ctlo,
+                          irq_mask & BIT(offset) ? "mask  " : "unmask",
+                          irq_status & BIT(offset) ? "pending" : "       ");
+       }
+}
+
+static int wcove_gpio_probe(struct platform_device *pdev)
+{
+       struct intel_soc_pmic *pmic;
+       struct wcove_gpio *wg;
+       int virq, ret, irq;
+       struct device *dev;
+
+       /*
+        * This gpio platform device is created by a mfd device (see
+        * drivers/mfd/intel_soc_pmic_bxtwc.c for details). Information
+        * shared by all sub-devices created by the mfd device, the regmap
+        * pointer for instance, is stored as driver data of the mfd device
+        * driver.
+        */
+       pmic = dev_get_drvdata(pdev->dev.parent);
+       if (!pmic)
+               return -ENODEV;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       dev = &pdev->dev;
+
+       wg = devm_kzalloc(dev, sizeof(*wg), GFP_KERNEL);
+       if (!wg)
+               return -ENOMEM;
+
+       wg->regmap_irq_chip = pmic->irq_chip_data_level2;
+
+       platform_set_drvdata(pdev, wg);
+
+       mutex_init(&wg->buslock);
+       wg->chip.label = KBUILD_MODNAME;
+       wg->chip.direction_input = wcove_gpio_dir_in;
+       wg->chip.direction_output = wcove_gpio_dir_out;
+       wg->chip.get_direction = wcove_gpio_get_direction;
+       wg->chip.get = wcove_gpio_get;
+       wg->chip.set = wcove_gpio_set;
+       wg->chip.set_single_ended = wcove_gpio_set_single_ended,
+       wg->chip.base = -1;
+       wg->chip.ngpio = WCOVE_VGPIO_NUM;
+       wg->chip.can_sleep = true;
+       wg->chip.parent = pdev->dev.parent;
+       wg->chip.dbg_show = wcove_gpio_dbg_show;
+       wg->dev = dev;
+       wg->regmap = pmic->regmap;
+
+       ret = devm_gpiochip_add_data(dev, &wg->chip, wg);
+       if (ret) {
+               dev_err(dev, "Failed to add gpiochip: %d\n", ret);
+               return ret;
+       }
+
+       ret = gpiochip_irqchip_add(&wg->chip, &wcove_irqchip, 0,
+                            handle_simple_irq, IRQ_TYPE_NONE);
+       if (ret) {
+               dev_err(dev, "Failed to add irqchip: %d\n", ret);
+               return ret;
+       }
+
+       virq = regmap_irq_get_virq(wg->regmap_irq_chip, irq);
+       if (virq < 0) {
+               dev_err(dev, "Failed to get virq by irq %d\n", irq);
+               return virq;
+       }
+
+       ret = devm_request_threaded_irq(dev, virq, NULL,
+               wcove_gpio_irq_handler, IRQF_ONESHOT, pdev->name, wg);
+       if (ret) {
+               dev_err(dev, "Failed to request irq %d\n", virq);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Whiskey Cove PMIC itself is a analog device(but with digital control
+ * interface) providing power management support for other devices in
+ * the accompanied SoC, so we have no .pm for Whiskey Cove GPIO driver.
+ */
+static struct platform_driver wcove_gpio_driver = {
+       .driver = {
+               .name = "bxt_wcove_gpio",
+       },
+       .probe = wcove_gpio_probe,
+};
+
+module_platform_driver(wcove_gpio_driver);
+
+MODULE_AUTHOR("Ajay Thomas <ajay.thomas.david.rajamanickam@intel.com>");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_DESCRIPTION("Intel Whiskey Cove GPIO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bxt_wcove_gpio");
index 21f97bcd0062c2fccfccae1971d1bce1b15352a6..533707f943f44fd9890f5738149d65d96f34d856 100644 (file)
@@ -247,7 +247,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 #define wm831x_gpio_dbg_show NULL
 #endif
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "wm831x",
        .owner                  = THIS_MODULE,
        .direction_input        = wm831x_gpio_direction_in,
index e9765707d5c1f255c159300cbc2e456508eb37e3..e46752e73dd9066e91d9c9edb55bc15ef0ae02ec 100644 (file)
@@ -93,7 +93,7 @@ static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
        return wm8350->irq_base + WM8350_IRQ_GPIO(offset);
 }
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "wm8350",
        .owner                  = THIS_MODULE,
        .direction_input        = wm8350_gpio_direction_in,
index 2457aac8592e7c7f1fd73d6300944281b9715313..68410fda61383214cb5328645fcb71e237bece7c 100644 (file)
@@ -249,7 +249,7 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 #define wm8994_gpio_dbg_show NULL
 #endif
 
-static struct gpio_chip template_chip = {
+static const struct gpio_chip template_chip = {
        .label                  = "wm8994",
        .owner                  = THIS_MODULE,
        .request                = wm8994_gpio_request,
index 932e510aec50db6c2dba5f2ce539f5cb46925d31..4b44dd97c07f52c34ff9ad35d64e2766c4aeb071 100644 (file)
@@ -670,10 +670,10 @@ int gpiod_export_link(struct device *dev, const char *name,
 EXPORT_SYMBOL_GPL(gpiod_export_link);
 
 /**
- * gpiod_unexport - reverse effect of gpio_export()
+ * gpiod_unexport - reverse effect of gpiod_export()
  * @gpio: gpio to make unavailable
  *
- * This is implicit on gpio_free().
+ * This is implicit on gpiod_free().
  */
 void gpiod_unexport(struct gpio_desc *desc)
 {
index 23be4daefeed7404ed6abe0cb6f80b426f165628..19a665f8d455f36114d683ea264ecf8ec43ff4e7 100644 (file)
@@ -1370,19 +1370,15 @@ struct gpio_chip *gpiochip_find(void *data,
                                             void *data))
 {
        struct gpio_device *gdev;
-       struct gpio_chip *chip;
+       struct gpio_chip *chip = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&gpio_lock, flags);
        list_for_each_entry(gdev, &gpio_devices, list)
-               if (gdev->chip && match(gdev->chip, data))
+               if (gdev->chip && match(gdev->chip, data)) {
+                       chip = gdev->chip;
                        break;
-
-       /* No match? */
-       if (&gdev->list == &gpio_devices)
-               chip = NULL;
-       else
-               chip = gdev->chip;
+               }
 
        spin_unlock_irqrestore(&gpio_lock, flags);
 
@@ -1667,6 +1663,20 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
        if (gpiochip->of_node)
                of_node = gpiochip->of_node;
 #endif
+       /*
+        * Specifying a default trigger is a terrible idea if DT or ACPI is
+        * used to configure the interrupts, as you may end-up with
+        * conflicting triggers. Tell the user, and reset to NONE.
+        */
+       if (WARN(of_node && type != IRQ_TYPE_NONE,
+                "%s: Ignoring %d default trigger\n", of_node->full_name, type))
+               type = IRQ_TYPE_NONE;
+       if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
+               acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+                                "Ignoring %d default trigger\n", type);
+               type = IRQ_TYPE_NONE;
+       }
+
        gpiochip->irqchip = irqchip;
        gpiochip->irq_handler = handler;
        gpiochip->irq_default_type = type;
index ff63b88b0ffaf721a58d63f81bb47ab227ac93b9..5cc7052e391d4f732dc48ac0ed01316a713054df 100644 (file)
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        char *table = NULL;
-       int size, i;
+       int size;
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
        if (size >= PAGE_SIZE)
                size = PAGE_SIZE - 1;
 
-       for (i = 0; i < size; i++) {
-               sprintf(buf + i, "%02x", table[i]);
-       }
-       sprintf(buf + i, "\n");
+       memcpy(buf, table, size);
 
        return size;
 }
index b7742e62972a61aae553dca11477e89ab8be6e2e..9b61c8ba7aaf915892c9c2c86e5d4ec22934f2d9 100644 (file)
@@ -335,7 +335,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -368,7 +368,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
index e2f0e5d58d5cc05abd29ed65037e38a060fd3145..a5c94b482459234a09c87a85300d820e2ee608a5 100644 (file)
@@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
        default: BUG();
        }
 
index bff8668e9e6d466e059d70f719c0a818235040dc..b8184617ca250f143ee01779fa40f77edeee80e4 100644 (file)
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
 
 static const u32 golden_settings_polaris11_a11[] =
 {
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+       mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
        mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
        mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
        mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
-       mmSQ_CONFIG, 0x07f80000, 0x07180000,
+       mmSQ_CONFIG, 0x07f80000, 0x01180000,
        mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
        mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
-       mmCB_HW_CONTROL_2, 0, 0x0f000000,
+       mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
        mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
        mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
        mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+       mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
        mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
        mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
        mmGB_GPU_ID, 0x0000000f, 0x00000000,
        mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
        mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+       mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
        mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
        mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+       mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
        mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
        mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
index d24a82bd0c7a119d7b450ee801f3b5ffa1dc51e3..0b0f08641eed67b12c779f402c08c66c459ac1f7 100644 (file)
@@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
+       case CHIP_MULLINS:
                return 0;
        default: BUG();
        }
index 717359d3ba8c506b77b09d46663b7afc598e9df9..2aee2c6f3cd5b4719bdd97e38eeba43f6bb06718 100644 (file)
@@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_stoney_common[] =
+{
+       mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+       mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
 
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
@@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
                amdgpu_program_register_sequence(adev,
                                                 stoney_mgcg_cgcg_init,
                                                 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_stoney_common,
+                                                (const u32)ARRAY_SIZE(golden_settings_stoney_common));
                break;
        default:
                break;
index 80446e2d3ab6efd00f3d3e777e4178b26ee6ae5d..76bcb43e7c06ac2dcf56a4d183e61c869a3544c5 100644 (file)
@@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
                goto out;
        }
 
+       /*
+        * cirrus_modeset_init() is initializing/registering the emulated fbdev
+        * and DRM internals can access/test some of the fields in
+        * mode_config->funcs as part of the fbdev registration process.
+        * Make sure dev->mode_config.funcs is properly set to avoid
+        * dereferencing a NULL pointer.
+        * FIXME: mode_config.funcs assignment should probably be done in
+        * cirrus_modeset_init() (that's a common pattern seen in other DRM
+        * drivers).
+        */
+       dev->mode_config.funcs = &cirrus_mode_funcs;
        r = cirrus_modeset_init(cdev);
        if (r) {
                dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
                goto out;
        }
 
-       dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
-
        return 0;
 out:
        cirrus_driver_unload(dev);
index f1d9f0569d7f86514773e517bafba9326de82a45..b1dbb60af99fa79a796174dc560973b34ba6c4cc 100644 (file)
@@ -1121,16 +1121,14 @@ static int drm_connector_register_all(struct drm_device *dev)
        struct drm_connector *connector;
        int ret;
 
-       mutex_lock(&dev->mode_config.mutex);
-
-       drm_for_each_connector(connector, dev) {
+       /* FIXME: taking the mode config mutex ends up in a clash with
+        * fbcon/backlight registration */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                ret = drm_connector_register(connector);
                if (ret)
                        goto err;
        }
 
-       mutex_unlock(&dev->mode_config.mutex);
-
        return 0;
 
 err:
index 7df26d4b7ad8b5775c97df7e5ab3fdab315195e5..637a0aa4d3a0c1d71e83c8d0cde2b18584a20629 100644 (file)
@@ -74,6 +74,8 @@
 #define EDID_QUIRK_FORCE_8BPC                  (1 << 8)
 /* Force 12bpc */
 #define EDID_QUIRK_FORCE_12BPC                 (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC                  (1 << 10)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -100,6 +102,9 @@ static struct edid_quirk {
        /* Unknown Acer */
        { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
 
+       /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+       { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
        /* Belinea 10 15 55 */
        { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
        { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3862,6 +3867,20 @@ static void drm_add_display_info(struct edid *edid,
        /* HDMI deep color modes supported? Assign to info, if so */
        drm_assign_hdmi_deep_color_info(edid, info, connector);
 
+       /*
+        * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
+        *
+        * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
+        * tells us to assume 8 bpc color depth if the EDID doesn't have
+        * extensions which tell otherwise.
+        */
+       if ((info->bpc == 0) && (edid->revision < 4) &&
+           (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+               info->bpc = 8;
+               DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
+                         connector->name, info->bpc);
+       }
+
        /* Only defined for 1.4 with digital displays */
        if (edid->revision < 4)
                return;
@@ -4082,6 +4101,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 
        drm_add_display_info(edid, &connector->display_info, connector);
 
+       if (quirks & EDID_QUIRK_FORCE_6BPC)
+               connector->display_info.bpc = 6;
+
        if (quirks & EDID_QUIRK_FORCE_8BPC)
                connector->display_info.bpc = 8;
 
index c457eed76f1f7a1fca441d4714c2aebf95534d05..dcf93b3d4fb6cb75a836a242ef2e560e8f065b49 100644 (file)
@@ -5691,15 +5691,7 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
 
 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 {
-       unsigned int i;
-
-       for (i = 0; i < 15; i++) {
-               if (skl_cdclk_pcu_ready(dev_priv))
-                       return true;
-               udelay(10);
-       }
-
-       return false;
+       return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
 }
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
@@ -12114,21 +12106,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
        }
 
-       /* Clamp bpp to default limit on screens without EDID 1.4 */
-       if (connector->base.display_info.bpc == 0) {
-               int type = connector->base.connector_type;
-               int clamp_bpp = 24;
-
-               /* Fall back to 18 bpp when DP sink capability is unknown. */
-               if (type == DRM_MODE_CONNECTOR_DisplayPort ||
-                   type == DRM_MODE_CONNECTOR_eDP)
-                       clamp_bpp = 18;
-
-               if (bpp > clamp_bpp) {
-                       DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
-                                     bpp, clamp_bpp);
-                       pipe_config->pipe_bpp = clamp_bpp;
-               }
+       /* Clamp bpp to 8 on screens without EDID 1.4 */
+       if (connector->base.display_info.bpc == 0 && bpp > 24) {
+               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+                             bpp);
+               pipe_config->pipe_bpp = 24;
        }
 }
 
index 86b00c6db1a6d694ad1beb97fcb37fc2cb899873..3e3632c18733502816dc169daab852910fcee814 100644 (file)
@@ -782,7 +782,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct fb_info *info;
 
-       if (!ifbdev)
+       if (!ifbdev || !ifbdev->fb)
                return;
 
        info = ifbdev->helper.fbdev;
@@ -827,31 +827,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       if (dev_priv->fbdev)
-               drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       if (ifbdev && ifbdev->fb)
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
 void intel_fbdev_restore_mode(struct drm_device *dev)
 {
-       int ret;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
-       struct drm_fb_helper *fb_helper;
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
        if (!ifbdev)
                return;
 
        intel_fbdev_sync(ifbdev);
+       if (!ifbdev->fb)
+               return;
 
-       fb_helper = &ifbdev->helper;
-
-       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
-       if (ret) {
+       if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
                DRM_DEBUG("failed to restore crtc mode\n");
        } else {
-               mutex_lock(&fb_helper->dev->struct_mutex);
+               mutex_lock(&dev->struct_mutex);
                intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
-               mutex_unlock(&fb_helper->dev->struct_mutex);
+               mutex_unlock(&dev->struct_mutex);
        }
 }
index f4f3fcc8b3becb59c0ed5d6bdd5dd27a177f3731..97ba6c8cf907862197a42bbd9935030df0e16856 100644 (file)
@@ -4892,7 +4892,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
                else
                        gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
                dev_priv->rps.last_adj = 0;
-               I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+               I915_WRITE(GEN6_PMINTRMSK,
+                          gen6_sanitize_rps_pm_mask(dev_priv, ~0));
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 
index 528bdeffb339ecaaa887c97887dc80427a153c77..6190035edfeaa2af9ae30063bad3f73aca822f7d 100644 (file)
@@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
index ffdad81ef9647cbc96612e3d0dc4d41b85009dff..0c00e192c8458406e11783d1bfb674e18747a811 100644 (file)
@@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
index 4de3ff0dbebd8cba5bf4a761ae9d97bb37f86c00..e03004f4588deb2bb360fa78b9d0540b6eb43ca4 100644 (file)
@@ -125,6 +125,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
 
        /* Link drm_bridge to encoder */
        bridge->encoder = encoder;
+       encoder->bridge = bridge;
 
        ret = drm_bridge_attach(rcdu->ddev, bridge);
        if (ret) {
index 4054d804fe068f5821d49d675549d40a7ea23fb6..42c074a9c9551571c2dc782c05c5b568f8d0a789 100644 (file)
@@ -354,7 +354,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
+                                     mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
                                         no_wait_gpu, mem);
index 2df602a35f9291ce178a6634ad9d0531ff331df4..f157a9efd220864858489ee8e8c4fe71ba68900c 100644 (file)
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict,
+                   bool evict, bool interruptible,
                    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
@@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
+               ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+
+               if (unlikely(ret != 0)) {
+                       if (ret != -ERESTARTSYS)
+                               pr_err("Failed to expire sync object before unbinding TTM\n");
+                       return ret;
+               }
+
                ttm_tt_unbind(ttm);
                ttm_bo_free_old_node(bo);
                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
index 2d1fb64205924de8ab87e14608004fc169557cea..69e51d0250bfdde305eb52ff30d6fc8d140f3f8f 100644 (file)
@@ -1224,6 +1224,20 @@ config MFD_TPS65217
          This driver can also be built as a module.  If so, the module
          will be called tps65217.
 
+config MFD_TI_LP873X
+       tristate "TI LP873X Power Management IC"
+       depends on I2C
+       select MFD_CORE
+       select REGMAP_I2C
+       help
+         If you say yes here then you get support for the LP873X series of
+         Power Management Integrated Circuits (PMIC).
+         These include voltage regulators, thermal protection, configurable
+         General Purpose Outputs (GPO) that are used in portable devices.
+
+         This driver can also be built as a module. If so, the module
+         will be called lp873x.
+
 config MFD_TPS65218
        tristate "TI TPS65218 Power Management chips"
        depends on I2C
index 2ba3ba35f745309de62050ab8442cea87a0edb2e..42acbcdeb4f69ea3f24411e0aff2f6403b3a9a1c 100644 (file)
@@ -22,6 +22,8 @@ obj-$(CONFIG_HTC_EGPIO)               += htc-egpio.o
 obj-$(CONFIG_HTC_PASIC3)       += htc-pasic3.o
 obj-$(CONFIG_HTC_I2CPLD)       += htc-i2cpld.o
 
+obj-$(CONFIG_MFD_TI_LP873X)    += lp873x.o
+
 obj-$(CONFIG_MFD_DAVINCI_VOICECODEC)   += davinci_voicecodec.o
 obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
 obj-$(CONFIG_MFD_TI_AM335X_TSCADC)     += ti_am335x_tscadc.o
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
new file mode 100644 (file)
index 0000000..9af064c
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author: Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+static const struct regmap_config lp873x_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = LP873X_REG_MAX,
+};
+
+static const struct mfd_cell lp873x_cells[] = {
+       { .name = "lp873x-regulator", },
+       { .name = "lp873x-gpio", },
+};
+
+static int lp873x_probe(struct i2c_client *client,
+                       const struct i2c_device_id *ids)
+{
+       struct lp873x *lp873;
+       int ret;
+       unsigned int otpid;
+
+       lp873 = devm_kzalloc(&client->dev, sizeof(*lp873), GFP_KERNEL);
+       if (!lp873)
+               return -ENOMEM;
+
+       lp873->dev = &client->dev;
+
+       lp873->regmap = devm_regmap_init_i2c(client, &lp873x_regmap_config);
+       if (IS_ERR(lp873->regmap)) {
+               ret = PTR_ERR(lp873->regmap);
+               dev_err(lp873->dev,
+                       "Failed to initialize register map: %d\n", ret);
+               return ret;
+       }
+
+       mutex_init(&lp873->lock);
+
+       ret = regmap_read(lp873->regmap, LP873X_REG_OTP_REV, &otpid);
+       if (ret) {
+               dev_err(lp873->dev, "Failed to read OTP ID\n");
+               return ret;
+       }
+
+       lp873->rev = otpid & LP873X_OTP_REV_OTP_ID;
+
+       i2c_set_clientdata(client, lp873);
+
+       ret = mfd_add_devices(lp873->dev, PLATFORM_DEVID_AUTO, lp873x_cells,
+                             ARRAY_SIZE(lp873x_cells), NULL, 0, NULL);
+
+       return ret;
+}
+
+static const struct of_device_id of_lp873x_match_table[] = {
+       { .compatible = "ti,lp8733", },
+       { .compatible = "ti,lp8732", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, of_lp873x_match_table);
+
+static const struct i2c_device_id lp873x_id_table[] = {
+       { "lp873x", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, lp873x_id_table);
+
+static struct i2c_driver lp873x_driver = {
+       .driver = {
+               .name   = "lp873x",
+               .of_match_table = of_lp873x_match_table,
+       },
+       .probe          = lp873x_probe,
+       .id_table       = lp873x_id_table,
+};
+module_i2c_driver(lp873x_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X chip family Multi-Function Device driver");
+MODULE_LICENSE("GPL v2");
index c3f4aab53b0796b169719e61a90a8a0817057e93..863c39a3353c97c871610d62f9340a661b7b6f6e 100644 (file)
@@ -57,6 +57,7 @@ static const struct of_device_id stmpe_of_match[] = {
        { .compatible = "st,stmpe610", .data = (void *)STMPE610, },
        { .compatible = "st,stmpe801", .data = (void *)STMPE801, },
        { .compatible = "st,stmpe811", .data = (void *)STMPE811, },
+       { .compatible = "st,stmpe1600", .data = (void *)STMPE1600, },
        { .compatible = "st,stmpe1601", .data = (void *)STMPE1601, },
        { .compatible = "st,stmpe1801", .data = (void *)STMPE1801, },
        { .compatible = "st,stmpe2401", .data = (void *)STMPE2401, },
@@ -101,6 +102,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
        { "stmpe610", STMPE610 },
        { "stmpe801", STMPE801 },
        { "stmpe811", STMPE811 },
+       { "stmpe1600", STMPE1600 },
        { "stmpe1601", STMPE1601 },
        { "stmpe1801", STMPE1801 },
        { "stmpe2401", STMPE2401 },
index 94c7cc02fdab3c083ff1995e4b12bc2cd86d3ade..cfdae8a3d77976b3a5b543551d07834069a7b45d 100644 (file)
@@ -469,6 +469,8 @@ static const struct mfd_cell stmpe_ts_cell = {
 
 static const u8 stmpe811_regs[] = {
        [STMPE_IDX_CHIP_ID]     = STMPE811_REG_CHIP_ID,
+       [STMPE_IDX_SYS_CTRL]    = STMPE811_REG_SYS_CTRL,
+       [STMPE_IDX_SYS_CTRL2]   = STMPE811_REG_SYS_CTRL2,
        [STMPE_IDX_ICR_LSB]     = STMPE811_REG_INT_CTRL,
        [STMPE_IDX_IER_LSB]     = STMPE811_REG_INT_EN,
        [STMPE_IDX_ISR_MSB]     = STMPE811_REG_INT_STA,
@@ -481,7 +483,7 @@ static const u8 stmpe811_regs[] = {
        [STMPE_IDX_GPAFR_U_MSB] = STMPE811_REG_GPIO_AF,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE811_REG_GPIO_INT_EN,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE811_REG_GPIO_INT_STA,
-       [STMPE_IDX_GPEDR_MSB]   = STMPE811_REG_GPIO_ED,
+       [STMPE_IDX_GPEDR_LSB]   = STMPE811_REG_GPIO_ED,
 };
 
 static struct stmpe_variant_block stmpe811_blocks[] = {
@@ -511,7 +513,7 @@ static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks,
        if (blocks & STMPE_BLOCK_TOUCHSCREEN)
                mask |= STMPE811_SYS_CTRL2_TSC_OFF;
 
-       return __stmpe_set_bits(stmpe, STMPE811_REG_SYS_CTRL2, mask,
+       return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL2], mask,
                                enable ? 0 : mask);
 }
 
@@ -550,26 +552,90 @@ static struct stmpe_variant_info stmpe610 = {
        .get_altfunc    = stmpe811_get_altfunc,
 };
 
+/*
+ * STMPE1600
+ * Compared to all others STMPE variant, LSB and MSB regs are located in this
+ * order :     LSB   addr
+ *             MSB   addr + 1
+ * As there is only 2 * 8bits registers for GPMR/GPSR/IEGPIOPR, CSB index is MSB registers
+ */
+
+static const u8 stmpe1600_regs[] = {
+       [STMPE_IDX_CHIP_ID]     = STMPE1600_REG_CHIP_ID,
+       [STMPE_IDX_SYS_CTRL]    = STMPE1600_REG_SYS_CTRL,
+       [STMPE_IDX_ICR_LSB]     = STMPE1600_REG_SYS_CTRL,
+       [STMPE_IDX_GPMR_LSB]    = STMPE1600_REG_GPMR_LSB,
+       [STMPE_IDX_GPMR_CSB]    = STMPE1600_REG_GPMR_MSB,
+       [STMPE_IDX_GPSR_LSB]    = STMPE1600_REG_GPSR_LSB,
+       [STMPE_IDX_GPSR_CSB]    = STMPE1600_REG_GPSR_MSB,
+       [STMPE_IDX_GPDR_LSB]    = STMPE1600_REG_GPDR_LSB,
+       [STMPE_IDX_GPDR_CSB]    = STMPE1600_REG_GPDR_MSB,
+       [STMPE_IDX_IEGPIOR_LSB] = STMPE1600_REG_IEGPIOR_LSB,
+       [STMPE_IDX_IEGPIOR_CSB] = STMPE1600_REG_IEGPIOR_MSB,
+       [STMPE_IDX_ISGPIOR_LSB] = STMPE1600_REG_ISGPIOR_LSB,
+};
+
+static struct stmpe_variant_block stmpe1600_blocks[] = {
+       {
+               .cell   = &stmpe_gpio_cell,
+               .irq    = 0,
+               .block  = STMPE_BLOCK_GPIO,
+       },
+};
+
+static int stmpe1600_enable(struct stmpe *stmpe, unsigned int blocks,
+                          bool enable)
+{
+       if (blocks & STMPE_BLOCK_GPIO)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+static struct stmpe_variant_info stmpe1600 = {
+       .name           = "stmpe1600",
+       .id_val         = STMPE1600_ID,
+       .id_mask        = 0xffff,
+       .num_gpios      = 16,
+       .af_bits        = 0,
+       .regs           = stmpe1600_regs,
+       .blocks         = stmpe1600_blocks,
+       .num_blocks     = ARRAY_SIZE(stmpe1600_blocks),
+       .num_irqs       = STMPE1600_NR_INTERNAL_IRQS,
+       .enable         = stmpe1600_enable,
+};
+
 /*
  * STMPE1601
  */
 
 static const u8 stmpe1601_regs[] = {
        [STMPE_IDX_CHIP_ID]     = STMPE1601_REG_CHIP_ID,
+       [STMPE_IDX_SYS_CTRL]    = STMPE1601_REG_SYS_CTRL,
+       [STMPE_IDX_SYS_CTRL2]   = STMPE1601_REG_SYS_CTRL2,
        [STMPE_IDX_ICR_LSB]     = STMPE1601_REG_ICR_LSB,
+       [STMPE_IDX_IER_MSB]     = STMPE1601_REG_IER_MSB,
        [STMPE_IDX_IER_LSB]     = STMPE1601_REG_IER_LSB,
        [STMPE_IDX_ISR_MSB]     = STMPE1601_REG_ISR_MSB,
        [STMPE_IDX_GPMR_LSB]    = STMPE1601_REG_GPIO_MP_LSB,
+       [STMPE_IDX_GPMR_CSB]    = STMPE1601_REG_GPIO_MP_MSB,
        [STMPE_IDX_GPSR_LSB]    = STMPE1601_REG_GPIO_SET_LSB,
+       [STMPE_IDX_GPSR_CSB]    = STMPE1601_REG_GPIO_SET_MSB,
        [STMPE_IDX_GPCR_LSB]    = STMPE1601_REG_GPIO_CLR_LSB,
+       [STMPE_IDX_GPCR_CSB]    = STMPE1601_REG_GPIO_CLR_MSB,
        [STMPE_IDX_GPDR_LSB]    = STMPE1601_REG_GPIO_SET_DIR_LSB,
+       [STMPE_IDX_GPDR_CSB]    = STMPE1601_REG_GPIO_SET_DIR_MSB,
+       [STMPE_IDX_GPEDR_LSB]   = STMPE1601_REG_GPIO_ED_LSB,
+       [STMPE_IDX_GPEDR_CSB]   = STMPE1601_REG_GPIO_ED_MSB,
        [STMPE_IDX_GPRER_LSB]   = STMPE1601_REG_GPIO_RE_LSB,
+       [STMPE_IDX_GPRER_CSB]   = STMPE1601_REG_GPIO_RE_MSB,
        [STMPE_IDX_GPFER_LSB]   = STMPE1601_REG_GPIO_FE_LSB,
+       [STMPE_IDX_GPFER_CSB]   = STMPE1601_REG_GPIO_FE_MSB,
        [STMPE_IDX_GPPUR_LSB]   = STMPE1601_REG_GPIO_PU_LSB,
        [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
+       [STMPE_IDX_IEGPIOR_CSB] = STMPE1601_REG_INT_EN_GPIO_MASK_MSB,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
-       [STMPE_IDX_GPEDR_MSB]   = STMPE1601_REG_GPIO_ED_MSB,
 };
 
 static struct stmpe_variant_block stmpe1601_blocks[] = {
@@ -640,13 +706,13 @@ static int stmpe1601_autosleep(struct stmpe *stmpe,
                return timeout;
        }
 
-       ret = __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
+       ret = __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL2],
                        STMPE1601_AUTOSLEEP_TIMEOUT_MASK,
                        timeout);
        if (ret < 0)
                return ret;
 
-       return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
+       return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL2],
                        STPME1601_AUTOSLEEP_ENABLE,
                        STPME1601_AUTOSLEEP_ENABLE);
 }
@@ -671,7 +737,7 @@ static int stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks,
        else
                mask &= ~STMPE1601_SYS_CTRL_ENABLE_SPWM;
 
-       return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL, mask,
+       return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL], mask,
                                enable ? mask : 0);
 }
 
@@ -710,18 +776,33 @@ static struct stmpe_variant_info stmpe1601 = {
  */
 static const u8 stmpe1801_regs[] = {
        [STMPE_IDX_CHIP_ID]     = STMPE1801_REG_CHIP_ID,
+       [STMPE_IDX_SYS_CTRL]    = STMPE1801_REG_SYS_CTRL,
        [STMPE_IDX_ICR_LSB]     = STMPE1801_REG_INT_CTRL_LOW,
        [STMPE_IDX_IER_LSB]     = STMPE1801_REG_INT_EN_MASK_LOW,
        [STMPE_IDX_ISR_LSB]     = STMPE1801_REG_INT_STA_LOW,
        [STMPE_IDX_GPMR_LSB]    = STMPE1801_REG_GPIO_MP_LOW,
+       [STMPE_IDX_GPMR_CSB]    = STMPE1801_REG_GPIO_MP_MID,
+       [STMPE_IDX_GPMR_MSB]    = STMPE1801_REG_GPIO_MP_HIGH,
        [STMPE_IDX_GPSR_LSB]    = STMPE1801_REG_GPIO_SET_LOW,
+       [STMPE_IDX_GPSR_CSB]    = STMPE1801_REG_GPIO_SET_MID,
+       [STMPE_IDX_GPSR_MSB]    = STMPE1801_REG_GPIO_SET_HIGH,
        [STMPE_IDX_GPCR_LSB]    = STMPE1801_REG_GPIO_CLR_LOW,
+       [STMPE_IDX_GPCR_CSB]    = STMPE1801_REG_GPIO_CLR_MID,
+       [STMPE_IDX_GPCR_MSB]    = STMPE1801_REG_GPIO_CLR_HIGH,
        [STMPE_IDX_GPDR_LSB]    = STMPE1801_REG_GPIO_SET_DIR_LOW,
+       [STMPE_IDX_GPDR_CSB]    = STMPE1801_REG_GPIO_SET_DIR_MID,
+       [STMPE_IDX_GPDR_MSB]    = STMPE1801_REG_GPIO_SET_DIR_HIGH,
        [STMPE_IDX_GPRER_LSB]   = STMPE1801_REG_GPIO_RE_LOW,
+       [STMPE_IDX_GPRER_CSB]   = STMPE1801_REG_GPIO_RE_MID,
+       [STMPE_IDX_GPRER_MSB]   = STMPE1801_REG_GPIO_RE_HIGH,
        [STMPE_IDX_GPFER_LSB]   = STMPE1801_REG_GPIO_FE_LOW,
+       [STMPE_IDX_GPFER_CSB]   = STMPE1801_REG_GPIO_FE_MID,
+       [STMPE_IDX_GPFER_MSB]   = STMPE1801_REG_GPIO_FE_HIGH,
        [STMPE_IDX_GPPUR_LSB]   = STMPE1801_REG_GPIO_PULL_UP_LOW,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
-       [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
+       [STMPE_IDX_IEGPIOR_CSB] = STMPE1801_REG_INT_EN_GPIO_MASK_MID,
+       [STMPE_IDX_IEGPIOR_MSB] = STMPE1801_REG_INT_EN_GPIO_MASK_HIGH,
+       [STMPE_IDX_ISGPIOR_MSB] = STMPE1801_REG_INT_STA_GPIO_HIGH,
 };
 
 static struct stmpe_variant_block stmpe1801_blocks[] = {
@@ -751,22 +832,31 @@ static int stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks,
                                enable ? mask : 0);
 }
 
-static int stmpe1801_reset(struct stmpe *stmpe)
+static int stmpe_reset(struct stmpe *stmpe)
 {
+       u16 id_val = stmpe->variant->id_val;
        unsigned long timeout;
        int ret = 0;
+       u8 reset_bit;
+
+       if (id_val == STMPE811_ID)
+               /* STMPE801 and STMPE610 use bit 1 of SYS_CTRL register */
+               reset_bit = STMPE811_SYS_CTRL_RESET;
+       else
+               /* all other STMPE variant use bit 7 of SYS_CTRL register */
+               reset_bit = STMPE_SYS_CTRL_RESET;
 
-       ret = __stmpe_set_bits(stmpe, STMPE1801_REG_SYS_CTRL,
-               STMPE1801_MSK_SYS_CTRL_RESET, STMPE1801_MSK_SYS_CTRL_RESET);
+       ret = __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL],
+                              reset_bit, reset_bit);
        if (ret < 0)
                return ret;
 
        timeout = jiffies + msecs_to_jiffies(100);
        while (time_before(jiffies, timeout)) {
-               ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
+               ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
                if (ret < 0)
                        return ret;
-               if (!(ret & STMPE1801_MSK_SYS_CTRL_RESET))
+               if (!(ret & reset_bit))
                        return 0;
                usleep_range(100, 200);
        }
@@ -794,20 +884,39 @@ static struct stmpe_variant_info stmpe1801 = {
 
 static const u8 stmpe24xx_regs[] = {
        [STMPE_IDX_CHIP_ID]     = STMPE24XX_REG_CHIP_ID,
+       [STMPE_IDX_SYS_CTRL]    = STMPE24XX_REG_SYS_CTRL,
+       [STMPE_IDX_SYS_CTRL2]   = STMPE24XX_REG_SYS_CTRL2,
        [STMPE_IDX_ICR_LSB]     = STMPE24XX_REG_ICR_LSB,
+       [STMPE_IDX_IER_MSB]     = STMPE24XX_REG_IER_MSB,
        [STMPE_IDX_IER_LSB]     = STMPE24XX_REG_IER_LSB,
        [STMPE_IDX_ISR_MSB]     = STMPE24XX_REG_ISR_MSB,
        [STMPE_IDX_GPMR_LSB]    = STMPE24XX_REG_GPMR_LSB,
+       [STMPE_IDX_GPMR_CSB]    = STMPE24XX_REG_GPMR_CSB,
+       [STMPE_IDX_GPMR_MSB]    = STMPE24XX_REG_GPMR_MSB,
        [STMPE_IDX_GPSR_LSB]    = STMPE24XX_REG_GPSR_LSB,
+       [STMPE_IDX_GPSR_CSB]    = STMPE24XX_REG_GPSR_CSB,
+       [STMPE_IDX_GPSR_MSB]    = STMPE24XX_REG_GPSR_MSB,
        [STMPE_IDX_GPCR_LSB]    = STMPE24XX_REG_GPCR_LSB,
+       [STMPE_IDX_GPCR_CSB]    = STMPE24XX_REG_GPCR_CSB,
+       [STMPE_IDX_GPCR_MSB]    = STMPE24XX_REG_GPCR_MSB,
        [STMPE_IDX_GPDR_LSB]    = STMPE24XX_REG_GPDR_LSB,
+       [STMPE_IDX_GPDR_CSB]    = STMPE24XX_REG_GPDR_CSB,
+       [STMPE_IDX_GPDR_MSB]    = STMPE24XX_REG_GPDR_MSB,
        [STMPE_IDX_GPRER_LSB]   = STMPE24XX_REG_GPRER_LSB,
+       [STMPE_IDX_GPRER_CSB]   = STMPE24XX_REG_GPRER_CSB,
+       [STMPE_IDX_GPRER_MSB]   = STMPE24XX_REG_GPRER_MSB,
        [STMPE_IDX_GPFER_LSB]   = STMPE24XX_REG_GPFER_LSB,
+       [STMPE_IDX_GPFER_CSB]   = STMPE24XX_REG_GPFER_CSB,
+       [STMPE_IDX_GPFER_MSB]   = STMPE24XX_REG_GPFER_MSB,
        [STMPE_IDX_GPPUR_LSB]   = STMPE24XX_REG_GPPUR_LSB,
        [STMPE_IDX_GPPDR_LSB]   = STMPE24XX_REG_GPPDR_LSB,
        [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
+       [STMPE_IDX_IEGPIOR_CSB] = STMPE24XX_REG_IEGPIOR_CSB,
+       [STMPE_IDX_IEGPIOR_MSB] = STMPE24XX_REG_IEGPIOR_MSB,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
+       [STMPE_IDX_GPEDR_LSB]   = STMPE24XX_REG_GPEDR_LSB,
+       [STMPE_IDX_GPEDR_CSB]   = STMPE24XX_REG_GPEDR_CSB,
        [STMPE_IDX_GPEDR_MSB]   = STMPE24XX_REG_GPEDR_MSB,
 };
 
@@ -840,7 +949,7 @@ static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks,
        if (blocks & STMPE_BLOCK_KEYPAD)
                mask |= STMPE24XX_SYS_CTRL_ENABLE_KPC;
 
-       return __stmpe_set_bits(stmpe, STMPE24XX_REG_SYS_CTRL, mask,
+       return __stmpe_set_bits(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL], mask,
                                enable ? mask : 0);
 }
 
@@ -893,6 +1002,7 @@ static struct stmpe_variant_info *stmpe_variant_info[STMPE_NBR_PARTS] = {
        [STMPE610]      = &stmpe610,
        [STMPE801]      = &stmpe801,
        [STMPE811]      = &stmpe811,
+       [STMPE1600]     = &stmpe1600,
        [STMPE1601]     = &stmpe1601,
        [STMPE1801]     = &stmpe1801,
        [STMPE2401]     = &stmpe2401,
@@ -919,7 +1029,8 @@ static irqreturn_t stmpe_irq(int irq, void *data)
        int ret;
        int i;
 
-       if (variant->id_val == STMPE801_ID) {
+       if (variant->id_val == STMPE801_ID ||
+           variant->id_val == STMPE1600_ID) {
                int base = irq_create_mapping(stmpe->domain, 0);
 
                handle_nested_irq(base);
@@ -982,7 +1093,7 @@ static void stmpe_irq_sync_unlock(struct irq_data *data)
                        continue;
 
                stmpe->oldier[i] = new;
-               stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB] - i, new);
+               stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB + i], new);
        }
 
        mutex_unlock(&stmpe->irq_lock);
@@ -1088,20 +1199,18 @@ static int stmpe_chip_init(struct stmpe *stmpe)
        if (ret)
                return ret;
 
-       if (id == STMPE1801_ID) {
-               ret =  stmpe1801_reset(stmpe);
-               if (ret < 0)
-                       return ret;
-       }
+       ret =  stmpe_reset(stmpe);
+       if (ret < 0)
+               return ret;
 
        if (stmpe->irq >= 0) {
-               if (id == STMPE801_ID)
-                       icr = STMPE801_REG_SYS_CTRL_INT_EN;
+               if (id == STMPE801_ID || id == STMPE1600_ID)
+                       icr = STMPE_SYS_CTRL_INT_EN;
                else
                        icr = STMPE_ICR_LSB_GIM;
 
-               /* STMPE801 doesn't support Edge interrupts */
-               if (id != STMPE801_ID) {
+               /* STMPE801 and STMPE1600 don't support Edge interrupts */
+               if (id != STMPE801_ID && id != STMPE1600_ID) {
                        if (irq_trigger == IRQF_TRIGGER_FALLING ||
                                        irq_trigger == IRQF_TRIGGER_RISING)
                                icr |= STMPE_ICR_LSB_EDGE;
@@ -1109,8 +1218,8 @@ static int stmpe_chip_init(struct stmpe *stmpe)
 
                if (irq_trigger == IRQF_TRIGGER_RISING ||
                                irq_trigger == IRQF_TRIGGER_HIGH) {
-                       if (id == STMPE801_ID)
-                               icr |= STMPE801_REG_SYS_CTRL_INT_HI;
+                       if (id == STMPE801_ID || id == STMPE1600_ID)
+                               icr |= STMPE_SYS_CTRL_INT_HI;
                        else
                                icr |= STMPE_ICR_LSB_HIGH;
                }
index 84adb46b3e2fea599f069072f2f48190602092e9..f7efdd8a5fc6301610a7e21593c69d7e7e07d757 100644 (file)
@@ -104,6 +104,10 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE_ICR_LSB_EDGE     (1 << 1)
 #define STMPE_ICR_LSB_GIM      (1 << 0)
 
+#define STMPE_SYS_CTRL_RESET   (1 << 7)
+#define STMPE_SYS_CTRL_INT_EN  (1 << 2)
+#define STMPE_SYS_CTRL_INT_HI  (1 << 0)
+
 /*
  * STMPE801
  */
@@ -119,13 +123,10 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE801_REG_GPIO_SET_PIN      0x11
 #define STMPE801_REG_GPIO_DIR          0x12
 
-#define STMPE801_REG_SYS_CTRL_RESET    (1 << 7)
-#define STMPE801_REG_SYS_CTRL_INT_EN   (1 << 2)
-#define STMPE801_REG_SYS_CTRL_INT_HI   (1 << 0)
-
 /*
  * STMPE811
  */
+#define STMPE811_ID                    0x0811
 
 #define STMPE811_IRQ_TOUCH_DET         0
 #define STMPE811_IRQ_FIFO_TH           1
@@ -138,6 +139,7 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE811_NR_INTERNAL_IRQS      8
 
 #define STMPE811_REG_CHIP_ID           0x00
+#define STMPE811_REG_SYS_CTRL          0x03
 #define STMPE811_REG_SYS_CTRL2         0x04
 #define STMPE811_REG_SPI_CFG           0x08
 #define STMPE811_REG_INT_CTRL          0x09
@@ -154,11 +156,34 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE811_REG_GPIO_FE           0x16
 #define STMPE811_REG_GPIO_AF           0x17
 
+#define STMPE811_SYS_CTRL_RESET                (1 << 1)
+
 #define STMPE811_SYS_CTRL2_ADC_OFF     (1 << 0)
 #define STMPE811_SYS_CTRL2_TSC_OFF     (1 << 1)
 #define STMPE811_SYS_CTRL2_GPIO_OFF    (1 << 2)
 #define STMPE811_SYS_CTRL2_TS_OFF      (1 << 3)
 
+/*
+ * STMPE1600
+ */
+#define STMPE1600_ID                   0x0016
+#define STMPE1600_NR_INTERNAL_IRQS     16
+
+#define STMPE1600_REG_CHIP_ID          0x00
+#define STMPE1600_REG_SYS_CTRL         0x03
+#define STMPE1600_REG_IEGPIOR_LSB      0x08
+#define STMPE1600_REG_IEGPIOR_MSB      0x09
+#define STMPE1600_REG_ISGPIOR_LSB      0x0A
+#define STMPE1600_REG_ISGPIOR_MSB      0x0B
+#define STMPE1600_REG_GPMR_LSB         0x10
+#define STMPE1600_REG_GPMR_MSB         0x11
+#define STMPE1600_REG_GPSR_LSB         0x12
+#define STMPE1600_REG_GPSR_MSB         0x13
+#define STMPE1600_REG_GPDR_LSB         0x14
+#define STMPE1600_REG_GPDR_MSB         0x15
+#define STMPE1600_REG_GPPIR_LSB                0x16
+#define STMPE1600_REG_GPPIR_MSB                0x17
+
 /*
  * STMPE1601
  */
@@ -175,19 +200,32 @@ int stmpe_remove(struct stmpe *stmpe);
 
 #define STMPE1601_REG_SYS_CTRL                 0x02
 #define STMPE1601_REG_SYS_CTRL2                        0x03
+#define STMPE1601_REG_ICR_MSB                  0x10
 #define STMPE1601_REG_ICR_LSB                  0x11
+#define STMPE1601_REG_IER_MSB                  0x12
 #define STMPE1601_REG_IER_LSB                  0x13
 #define STMPE1601_REG_ISR_MSB                  0x14
-#define STMPE1601_REG_CHIP_ID                  0x80
+#define STMPE1601_REG_ISR_LSB                  0x15
+#define STMPE1601_REG_INT_EN_GPIO_MASK_MSB     0x16
 #define STMPE1601_REG_INT_EN_GPIO_MASK_LSB     0x17
 #define STMPE1601_REG_INT_STA_GPIO_MSB         0x18
-#define STMPE1601_REG_GPIO_MP_LSB              0x87
+#define STMPE1601_REG_INT_STA_GPIO_LSB         0x19
+#define STMPE1601_REG_CHIP_ID                  0x80
+#define STMPE1601_REG_GPIO_SET_MSB             0x82
 #define STMPE1601_REG_GPIO_SET_LSB             0x83
+#define STMPE1601_REG_GPIO_CLR_MSB             0x84
 #define STMPE1601_REG_GPIO_CLR_LSB             0x85
+#define STMPE1601_REG_GPIO_MP_MSB              0x86
+#define STMPE1601_REG_GPIO_MP_LSB              0x87
+#define STMPE1601_REG_GPIO_SET_DIR_MSB         0x88
 #define STMPE1601_REG_GPIO_SET_DIR_LSB         0x89
 #define STMPE1601_REG_GPIO_ED_MSB              0x8A
+#define STMPE1601_REG_GPIO_ED_LSB              0x8B
+#define STMPE1601_REG_GPIO_RE_MSB              0x8C
 #define STMPE1601_REG_GPIO_RE_LSB              0x8D
+#define STMPE1601_REG_GPIO_FE_MSB              0x8E
 #define STMPE1601_REG_GPIO_FE_LSB              0x8F
+#define STMPE1601_REG_GPIO_PU_MSB              0x90
 #define STMPE1601_REG_GPIO_PU_LSB              0x91
 #define STMPE1601_REG_GPIO_AF_U_MSB            0x92
 
@@ -243,8 +281,6 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE1801_REG_GPIO_PULL_UP_MID         0x23
 #define STMPE1801_REG_GPIO_PULL_UP_HIGH                0x24
 
-#define STMPE1801_MSK_SYS_CTRL_RESET           (1 << 7)
-
 #define STMPE1801_MSK_INT_EN_KPC               (1 << 1)
 #define STMPE1801_MSK_INT_EN_GPIO              (1 << 3)
 
@@ -264,23 +300,48 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE24XX_NR_INTERNAL_IRQS     9
 
 #define STMPE24XX_REG_SYS_CTRL         0x02
+#define STMPE24XX_REG_SYS_CTRL2                0x03
+#define STMPE24XX_REG_ICR_MSB          0x10
 #define STMPE24XX_REG_ICR_LSB          0x11
+#define STMPE24XX_REG_IER_MSB          0x12
 #define STMPE24XX_REG_IER_LSB          0x13
 #define STMPE24XX_REG_ISR_MSB          0x14
-#define STMPE24XX_REG_CHIP_ID          0x80
+#define STMPE24XX_REG_ISR_LSB          0x15
+#define STMPE24XX_REG_IEGPIOR_MSB      0x16
+#define STMPE24XX_REG_IEGPIOR_CSB      0x17
 #define STMPE24XX_REG_IEGPIOR_LSB      0x18
 #define STMPE24XX_REG_ISGPIOR_MSB      0x19
-#define STMPE24XX_REG_GPMR_LSB         0xA4
+#define STMPE24XX_REG_ISGPIOR_CSB      0x1A
+#define STMPE24XX_REG_ISGPIOR_LSB      0x1B
+#define STMPE24XX_REG_CHIP_ID          0x80
+#define STMPE24XX_REG_GPSR_MSB         0x83
+#define STMPE24XX_REG_GPSR_CSB         0x84
 #define STMPE24XX_REG_GPSR_LSB         0x85
+#define STMPE24XX_REG_GPCR_MSB         0x86
+#define STMPE24XX_REG_GPCR_CSB         0x87
 #define STMPE24XX_REG_GPCR_LSB         0x88
+#define STMPE24XX_REG_GPDR_MSB         0x89
+#define STMPE24XX_REG_GPDR_CSB         0x8A
 #define STMPE24XX_REG_GPDR_LSB         0x8B
 #define STMPE24XX_REG_GPEDR_MSB                0x8C
+#define STMPE24XX_REG_GPEDR_CSB                0x8D
+#define STMPE24XX_REG_GPEDR_LSB                0x8E
+#define STMPE24XX_REG_GPRER_MSB                0x8F
+#define STMPE24XX_REG_GPRER_CSB                0x90
 #define STMPE24XX_REG_GPRER_LSB                0x91
+#define STMPE24XX_REG_GPFER_MSB                0x92
+#define STMPE24XX_REG_GPFER_CSB                0x93
 #define STMPE24XX_REG_GPFER_LSB                0x94
+#define STMPE24XX_REG_GPPUR_MSB                0x95
+#define STMPE24XX_REG_GPPUR_CSB                0x96
 #define STMPE24XX_REG_GPPUR_LSB                0x97
-#define STMPE24XX_REG_GPPDR_LSB                0x9a
+#define STMPE24XX_REG_GPPDR_MSB                0x98
+#define STMPE24XX_REG_GPPDR_CSB                0x99
+#define STMPE24XX_REG_GPPDR_LSB                0x9A
 #define STMPE24XX_REG_GPAFR_U_MSB      0x9B
-
+#define STMPE24XX_REG_GPMR_MSB         0xA2
+#define STMPE24XX_REG_GPMR_CSB         0xA3
+#define STMPE24XX_REG_GPMR_LSB         0xA4
 #define STMPE24XX_SYS_CTRL_ENABLE_GPIO         (1 << 3)
 #define STMPE24XX_SYSCON_ENABLE_PWM            (1 << 2)
 #define STMPE24XX_SYS_CTRL_ENABLE_KPC          (1 << 1)
index 4387ccb79e642c34f8d3b6c2a39ba4daae15e325..7410c6d9a34db942afd5624ebf54aadaffe9eb94 100644 (file)
@@ -69,5 +69,6 @@ OBJCOPYFLAGS :=
 OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
                        --set-section-flags .text=alloc,readonly \
                        --rename-section .text=.rodata
-$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
        $(call if_changed,objcopy)
index bdee9a01ef35ad6fa34f1238ea7268454e0e56c0..c466ee2b0c973a7c77cb16566770dec3b426db33 100644 (file)
@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
         */
        mutex_lock(&afu->contexts_lock);
        idr_preload(GFP_KERNEL);
-       i = idr_alloc(&ctx->afu->contexts_idr, ctx,
-                     ctx->afu->adapter->native->sl_ops->min_pe,
+       i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
                      ctx->afu->num_procs, GFP_NOWAIT);
        idr_preload_end();
        mutex_unlock(&afu->contexts_lock);
index de090533f18cb8eb5b2c8f0a0b3bfbf6f11e5e00..344a0ff8f8c7df97e5328f89e46cd3f55b791d68 100644 (file)
@@ -561,7 +561,6 @@ struct cxl_service_layer_ops {
        u64 (*timebase_read)(struct cxl *adapter);
        int capi_mode;
        bool needs_reset_before_disable;
-       int min_pe;
 };
 
 struct cxl_native {
@@ -603,6 +602,7 @@ struct cxl {
        struct bin_attribute cxl_attr;
        int adapter_num;
        int user_irqs;
+       int min_pe;
        u64 ps_size;
        u16 psl_rev;
        u16 base_image;
index 3bcdaee11ba159aa13580bdd48151e0ef0755dbd..e606fdc4bc9cc3ec0ff7d3f29d4e691c30a26bd4 100644 (file)
@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
        return fail_psl_irq(afu, &irq_info);
 }
 
-void native_irq_wait(struct cxl_context *ctx)
+static void native_irq_wait(struct cxl_context *ctx)
 {
        u64 dsisr;
        int timeout = 1000;
index d152e2de8c9375e2760b03cd93857d542f9a36d4..6f0c4ac4b6498991913647b0d70b2d4805870f0b 100644 (file)
@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id
 
 static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
 {
-       u64 psl_dsnctl;
+       u64 psl_dsnctl, psl_fircntl;
        u64 chipid;
        u64 capp_unit_id;
        int rc;
@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_
        cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
        /* snoop write mask */
        cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
-       /* set fir_accum */
-       cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
+       /* set fir_cntl to recommended value for production env */
+       psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
+       psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
+       psl_fircntl |= 0x1ULL; /* ce_thresh */
+       cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
        /* for debugging with trace arrays */
        cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
 
@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = {
        .write_timebase_ctrl = write_timebase_ctrl_xsl,
        .timebase_read = timebase_read_xsl,
        .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
-       .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
 };
 
 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
 {
        if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
+               /* Mellanox CX-4 */
                dev_info(&adapter->dev, "Device uses an XSL\n");
                adapter->native->sl_ops = &xsl_ops;
+               adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
        } else {
                dev_info(&adapter->dev, "Device uses a PSL\n");
                adapter->native->sl_ops = &psl_ops;
index dee8def1c1936c36fc82d894e269c1d8825d5c29..7ada5f1b7bb67b808ec824561b0ceb6def317562 100644 (file)
@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
        /* Setup the PHB using arch provided callback */
        phb->ops = &cxl_pcie_pci_ops;
        phb->cfg_addr = NULL;
-       phb->cfg_data = 0;
+       phb->cfg_data = NULL;
        phb->private_data = afu;
        phb->controller_ops = cxl_pci_controller_ops;
 
index 5a3fd76eec27b226ec4a9a9dd9655d0a2ae95759..5525a204db93aadb15a2691ede56f915857845e4 100644 (file)
@@ -49,7 +49,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 
        /* This is a pointer to outside our current stack frame. */
        if (bad_frame) {
-               bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+               bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
        } else {
                /* Put start address just inside stack. */
                bad_stack = task_stack_page(current) + THREAD_SIZE;
index 88e91666f145f0e7b0e95bf560ecc5e78f2ada22..368795aad5c974dbb59b1a43825b76c3e443acd2 100644 (file)
@@ -1269,6 +1269,7 @@ static int btt_blk_init(struct btt *btt)
                }
        }
        set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
+       btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
        revalidate_disk(btt->btt_disk);
 
        return 0;
index 3fa7919f94a8785860afd3487d803f5b3010acd9..97dd2925ed6e95f1f06ffa6f4a0b5643acd4c07a 100644 (file)
@@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(namespace);
 
+static ssize_t size_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nd_btt *nd_btt = to_nd_btt(dev);
+       ssize_t rc;
+
+       device_lock(dev);
+       if (dev->driver)
+               rc = sprintf(buf, "%llu\n", nd_btt->size);
+       else {
+               /* no size to convey if the btt instance is disabled */
+               rc = -ENXIO;
+       }
+       device_unlock(dev);
+
+       return rc;
+}
+static DEVICE_ATTR_RO(size);
+
 static struct attribute *nd_btt_attributes[] = {
        &dev_attr_sector_size.attr,
        &dev_attr_namespace.attr,
        &dev_attr_uuid.attr,
+       &dev_attr_size.attr,
        NULL,
 };
 
index 40476399d22793aece0438da0f5a0976cef063ab..8024a0ef86d3af9f0ba5ef169260e2e342023d8e 100644 (file)
@@ -143,6 +143,7 @@ struct nd_btt {
        struct nd_namespace_common *ndns;
        struct btt *btt;
        unsigned long lbasize;
+       u64 size;
        u8 *uuid;
        int id;
 };
index d7c33f9361aa0361d762d1da31368f03bdca3082..8dcf5a960951805b09d650b2cc243ceaeff6a5bb 100644 (file)
@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
                reinit_completion(&dev->ioq_wait);
  retry:
                timeout = ADMIN_TIMEOUT;
-               for (; i > 0; i--) {
-                       struct nvme_queue *nvmeq = dev->queues[i];
-
-                       if (!pass)
-                               nvme_suspend_queue(nvmeq);
-                       if (nvme_delete_queue(nvmeq, opcode))
+               for (; i > 0; i--, sent++)
+                       if (nvme_delete_queue(dev->queues[i], opcode))
                                break;
-                       ++sent;
-               }
+
                while (sent--) {
                        timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
                        if (timeout == 0)
@@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
                nvme_stop_queues(&dev->ctrl);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
+
+       for (i = dev->queue_count - 1; i > 0; i--)
+               nvme_suspend_queue(dev->queues[i]);
+
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
-               for (i = dev->queue_count - 1; i >= 0; i--) {
-                       struct nvme_queue *nvmeq = dev->queues[i];
-                       nvme_suspend_queue(nvmeq);
-               }
+               nvme_suspend_queue(dev->queues[0]);
        } else {
                nvme_disable_io_queues(dev);
                nvme_disable_admin_queue(dev, shutdown);
index 3e3ce2b0424e4844d759bcf395d4177726d4e301..8d2875b4c56d8c8bf7da8951512288b327dd6e7a 100644 (file)
  * more details.
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/string.h>
-#include <linux/jiffies.h>
 #include <linux/atomic.h>
 #include <linux/blk-mq.h>
 #include <linux/types.h>
@@ -26,7 +24,6 @@
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/nvme.h>
-#include <linux/t10-pi.h>
 #include <asm/unaligned.h>
 
 #include <rdma/ib_verbs.h>
@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always,
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                struct rdma_cm_event *event);
 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
 
 /* XXX: really should move to a generic header sooner or later.. */
 static inline void put_unaligned_le24(u32 val, u8 *p)
@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        list_del(&ctrl->list);
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       if (ctrl->ctrl.tagset) {
-               blk_cleanup_queue(ctrl->ctrl.connect_q);
-               blk_mq_free_tag_set(&ctrl->tag_set);
-               nvme_rdma_dev_put(ctrl->device);
-       }
        kfree(ctrl->queues);
        nvmf_free_options(nctrl->opts);
 free_ctrl:
@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
        WARN_ON_ONCE(!changed);
 
-       if (ctrl->queue_count > 1)
+       if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
+               nvme_queue_scan(&ctrl->ctrl);
+               nvme_queue_async_events(&ctrl->ctrl);
+       }
 
        dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
 
@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 {
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
        struct rdma_conn_param param = { };
-       struct nvme_rdma_cm_req priv;
+       struct nvme_rdma_cm_req priv = { };
        int ret;
 
        param.qp_num = queue->qp->qp_num;
@@ -1318,37 +1312,39 @@ out_destroy_queue_ib:
  * that caught the event. Since we hold the callout until the controller
  * deletion is completed, we'll deadlock if the controller deletion will
  * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources
- * after the controller deletion completed with the exception of destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * of destroying this queue before-hand, destroy the queue resources,
+ * then queue the controller deletion which won't destroy this queue and
+ * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
  */
 static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
 {
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
-       int ret, ctrl_deleted = 0;
+       int ret;
 
-       /* First disable the queue so ctrl delete won't free it */
-       if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
-               goto out;
+       /* Own the controller deletion */
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+               return 0;
 
-       /* delete the controller */
-       ret = __nvme_rdma_del_ctrl(ctrl);
-       if (!ret) {
-               dev_warn(ctrl->ctrl.device,
-                       "Got rdma device removal event, deleting ctrl\n");
-               flush_work(&ctrl->delete_work);
+       dev_warn(ctrl->ctrl.device,
+               "Got rdma device removal event, deleting ctrl\n");
 
-               /* Return non-zero so the cm_id will destroy implicitly */
-               ctrl_deleted = 1;
+       /* Get rid of reconnect work if its running */
+       cancel_delayed_work_sync(&ctrl->reconnect_work);
 
+       /* Disable the queue so ctrl delete won't free it */
+       if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
                /* Free this queue ourselves */
-               rdma_disconnect(queue->cm_id);
-               ib_drain_qp(queue->qp);
+               nvme_rdma_stop_queue(queue);
                nvme_rdma_destroy_queue_ib(queue);
+
+               /* Return non-zero so the cm_id will destroy implicitly */
+               ret = 1;
        }
 
-out:
-       return ctrl_deleted;
+       /* Queue controller deletion */
+       queue_work(nvme_rdma_wq, &ctrl->delete_work);
+       flush_work(&ctrl->delete_work);
+       return ret;
 }
 
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
                nvme_rdma_free_io_queues(ctrl);
        }
 
-       if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+       if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
                nvme_shutdown_ctrl(&ctrl->ctrl);
 
        blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
        nvme_rdma_destroy_admin_queue(ctrl);
 }
 
+static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
+{
+       nvme_uninit_ctrl(&ctrl->ctrl);
+       if (shutdown)
+               nvme_rdma_shutdown_ctrl(ctrl);
+
+       if (ctrl->ctrl.tagset) {
+               blk_cleanup_queue(ctrl->ctrl.connect_q);
+               blk_mq_free_tag_set(&ctrl->tag_set);
+               nvme_rdma_dev_put(ctrl->device);
+       }
+
+       nvme_put_ctrl(&ctrl->ctrl);
+}
+
 static void nvme_rdma_del_ctrl_work(struct work_struct *work)
 {
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                                struct nvme_rdma_ctrl, delete_work);
 
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_rdma_shutdown_ctrl(ctrl);
-       nvme_uninit_ctrl(&ctrl->ctrl);
-       nvme_put_ctrl(&ctrl->ctrl);
+       __nvme_rdma_remove_ctrl(ctrl, true);
 }
 
 static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
        struct nvme_rdma_ctrl *ctrl = container_of(work,
                                struct nvme_rdma_ctrl, delete_work);
 
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_uninit_ctrl(&ctrl->ctrl);
-       nvme_put_ctrl(&ctrl->ctrl);
+       __nvme_rdma_remove_ctrl(ctrl, false);
 }
 
 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
        if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
                nvme_queue_scan(&ctrl->ctrl);
+               nvme_queue_async_events(&ctrl->ctrl);
        }
 
        return;
index 2fac17a5ad53a8aba3b2f9cb84369887dfaa88f6..47c564b5a2895198b020a26851b824350f10006e 100644 (file)
@@ -13,7 +13,6 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
-#include <linux/random.h>
 #include <generated/utsrelease.h>
 #include "nvmet.h"
 
@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvme_id_ctrl *id;
-       u64 serial;
        u16 status = 0;
 
        id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
        id->vid = 0;
        id->ssvid = 0;
 
-       /* generate a random serial number as our controllers are ephemeral: */
-       get_random_bytes(&serial, sizeof(serial));
        memset(id->sn, ' ', sizeof(id->sn));
-       snprintf(id->sn, sizeof(id->sn), "%llx", serial);
+       snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
 
        memset(id->mn, ' ', sizeof(id->mn));
        strncpy((char *)id->mn, "Linux", sizeof(id->mn));
index 8a891ca53367eaa6498f8326e745b081a110157e..6559d5afa7bfd9f808281658f686429c53fc7903 100644 (file)
@@ -13,6 +13,7 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
+#include <linux/random.h>
 #include "nvmet.h"
 
 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
        memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
        memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
 
+       /* generate a random serial number as our controllers are ephemeral: */
+       get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
+
        kref_init(&ctrl->ref);
        ctrl->subsys = subsys;
 
index 94e782987cc9eb9b2bd29d0a1534811030e7850a..7affd40a6b337ba110caf825fcec4ee2dbe1a96b 100644 (file)
@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
        struct nvme_loop_ctrl *ctrl = container_of(work,
                                struct nvme_loop_ctrl, delete_work);
 
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_loop_shutdown_ctrl(ctrl);
        nvme_uninit_ctrl(&ctrl->ctrl);
+       nvme_loop_shutdown_ctrl(ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
 }
 
@@ -501,7 +500,6 @@ out_free_queues:
        nvme_loop_destroy_admin_queue(ctrl);
 out_disable:
        dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
-       nvme_remove_namespaces(&ctrl->ctrl);
        nvme_uninit_ctrl(&ctrl->ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
 }
index 57dd6d834c28a84a2c8960f84737ec9d082d67e1..76b6eedccaf92ce4708cc4b730c03775d5469f0f 100644 (file)
@@ -113,6 +113,7 @@ struct nvmet_ctrl {
 
        struct mutex            lock;
        u64                     cap;
+       u64                     serial;
        u32                     cc;
        u32                     csts;
 
index e06d504bdf0c81aad35c2643c299abec105e7337..b4d648536c3e43316cc2929f8346d4997db01306 100644 (file)
@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state {
        NVMET_RDMA_Q_CONNECTING,
        NVMET_RDMA_Q_LIVE,
        NVMET_RDMA_Q_DISCONNECTING,
+       NVMET_RDMA_IN_DEVICE_REMOVAL,
 };
 
 struct nvmet_rdma_queue {
@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
        if (!len)
                return 0;
 
-       /* use the already allocated data buffer if possible */
-       if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) {
-               nvmet_rdma_use_inline_sg(rsp, len, 0);
-       } else {
-               status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
-                               len);
-               if (status)
-                       return status;
-       }
+       status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
+                       len);
+       if (status)
+               return status;
 
        ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
                        rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -984,7 +980,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
        struct nvmet_rdma_device *dev = queue->dev;
 
        nvmet_rdma_free_queue(queue);
-       rdma_destroy_id(cm_id);
+
+       if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+               rdma_destroy_id(cm_id);
+
        kref_put(&dev->ref, nvmet_rdma_free_dev);
 }
 
@@ -1233,8 +1232,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
        switch (queue->state) {
        case NVMET_RDMA_Q_CONNECTING:
        case NVMET_RDMA_Q_LIVE:
-               disconnect = true;
                queue->state = NVMET_RDMA_Q_DISCONNECTING;
+       case NVMET_RDMA_IN_DEVICE_REMOVAL:
+               disconnect = true;
                break;
        case NVMET_RDMA_Q_DISCONNECTING:
                break;
@@ -1272,6 +1272,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
        schedule_work(&queue->release_work);
 }
 
+/**
+ * nvme_rdma_device_removal() - Handle RDMA device removal
+ * @queue:      nvmet rdma queue (cm id qp_context)
+ * @addr:      nvmet address (cm_id context)
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug so we should take care of destroying our RDMA resources.
+ * This event will be generated for each allocated cm_id.
+ *
+ * Note that this event can be generated on a normal queue cm_id
+ * and/or a device bound listener cm_id (where in this case
+ * queue will be null).
+ *
+ * we claim ownership on destroying the cm_id. For queues we move
+ * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
+ * we nullify the priv to prevent double cm_id destruction and destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
+               struct nvmet_rdma_queue *queue)
+{
+       unsigned long flags;
+
+       if (!queue) {
+               struct nvmet_port *port = cm_id->context;
+
+               /*
+                * This is a listener cm_id. Make sure that
+                * future remove_port won't invoke a double
+                * cm_id destroy. use atomic xchg to make sure
+                * we don't compete with remove_port.
+                */
+               if (xchg(&port->priv, NULL) != cm_id)
+                       return 0;
+       } else {
+               /*
+                * This is a queue cm_id. Make sure that
+                * release queue will not destroy the cm_id
+                * and schedule all ctrl queues removal (only
+                * if the queue is not disconnecting already).
+                */
+               spin_lock_irqsave(&queue->state_lock, flags);
+               if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
+                       queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
+               spin_unlock_irqrestore(&queue->state_lock, flags);
+               nvmet_rdma_queue_disconnect(queue);
+               flush_scheduled_work();
+       }
+
+       /*
+        * We need to return 1 so that the core will destroy
+        * it's own ID.  What a great API design..
+        */
+       return 1;
+}
+
 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
                struct rdma_cm_event *event)
 {
@@ -1294,20 +1350,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
                break;
        case RDMA_CM_EVENT_ADDR_CHANGE:
        case RDMA_CM_EVENT_DISCONNECTED:
-       case RDMA_CM_EVENT_DEVICE_REMOVAL:
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:
-               /*
-                * We can get the device removal callback even for a
-                * CM ID that we aren't actually using.  In that case
-                * the context pointer is NULL, so we shouldn't try
-                * to disconnect a non-existing queue.  But we also
-                * need to return 1 so that the core will destroy
-                * it's own ID.  What a great API design..
-                */
-               if (queue)
-                       nvmet_rdma_queue_disconnect(queue);
-               else
-                       ret = 1;
+               nvmet_rdma_queue_disconnect(queue);
+               break;
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               ret = nvmet_rdma_device_removal(cm_id, queue);
                break;
        case RDMA_CM_EVENT_REJECTED:
        case RDMA_CM_EVENT_UNREACHABLE:
@@ -1396,9 +1443,10 @@ out_destroy_id:
 
 static void nvmet_rdma_remove_port(struct nvmet_port *port)
 {
-       struct rdma_cm_id *cm_id = port->priv;
+       struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
 
-       rdma_destroy_id(cm_id);
+       if (cm_id)
+               rdma_destroy_id(cm_id);
 }
 
 static struct nvmet_fabrics_ops nvmet_rdma_ops = {
index a02981efdad570148e39925cfbe4a8579ca7f7ca..eafa6138a6b81866a3bc09739ab3398513746253 100644 (file)
@@ -1411,6 +1411,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                pci_msi_domain_update_chip_ops(info);
 
+       info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+
        domain = msi_create_irq_domain(fwnode, info, parent);
        if (!domain)
                return NULL;
index 6ccb994bdfcbd160148c535f18ea656ea7ee13e6..c494613c1909e16b88f06c0270bee1425239e5f3 100644 (file)
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
        return 0;
 }
 
-static DEFINE_MUTEX(arm_pmu_mutex);
+static DEFINE_SPINLOCK(arm_pmu_lock);
 static LIST_HEAD(arm_pmu_list);
 
 /*
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
 {
        struct arm_pmu *pmu;
 
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_for_each_entry(pmu, &arm_pmu_list, entry) {
 
                if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
                if (pmu->reset)
                        pmu->reset(pmu);
        }
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
        return 0;
 }
 
@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
        if (!cpu_hw_events)
                return -ENOMEM;
 
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
 
        err = cpu_pm_pmu_register(cpu_pmu);
        if (err)
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
        return 0;
 
 out_unregister:
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_del(&cpu_pmu->entry);
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
        free_percpu(cpu_hw_events);
        return err;
 }
@@ -869,9 +869,9 @@ out_unregister:
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
        cpu_pm_pmu_unregister(cpu_pmu);
-       mutex_lock(&arm_pmu_mutex);
+       spin_lock(&arm_pmu_lock);
        list_del(&cpu_pmu->entry);
-       mutex_unlock(&arm_pmu_mutex);
+       spin_unlock(&arm_pmu_lock);
        free_percpu(cpu_pmu->hw_events);
 }
 
@@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 
        /* If we didn't manage to parse anything, try the interrupt affinity */
        if (cpumask_weight(&pmu->supported_cpus) == 0) {
-               if (!using_spi) {
+               int irq = platform_get_irq(pdev, 0);
+
+               if (irq_is_percpu(irq)) {
                        /* If using PPIs, check the affinity of the partition */
-                       int ret, irq;
+                       int ret;
 
-                       irq = platform_get_irq(pdev, 0);
                        ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
                        if (ret) {
                                kfree(irqs);
index d2bc092defd77732f780acb531931f165d440498..da2fe18162e1196b932eb42baeea1ad47f411ee3 100644 (file)
@@ -110,8 +110,8 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
        /* BIOS error detected */
        { KE_IGNORE, 0xe00d, { KEY_RESERVED } },
 
-       /* Unknown, defined in ACPI DSDT */
-       /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */
+       /* Battery was removed or inserted */
+       { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
 
        /* Wifi Catcher */
        { KE_KEY,    0xe011, { KEY_PROG2 } },
index cecc15a880de6928fed5ee0d35588fbae03423dd..3fa17ac8df5492f4d2e4681d1a4f07f2f8defffa 100644 (file)
@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch)
 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
                                           long timeout)
 {
-       struct rio_channel *ch = NULL;
-       struct rio_channel *new_ch = NULL;
+       struct rio_channel *ch;
+       struct rio_channel *new_ch;
        struct conn_req *req;
        struct cm_peer *peer;
        int found = 0;
@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
 
        spin_unlock_bh(&ch->lock);
        riocm_put_channel(ch);
+       ch = NULL;
        kfree(req);
 
        down_read(&rdev_sem);
@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
        if (!found) {
                /* If peer device object not found, simply ignore the request */
                err = -ENODEV;
-               goto err_nodev;
+               goto err_put_new_ch;
        }
 
        new_ch->rdev = peer->rdev;
@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
 
        *new_ch_id = new_ch->id;
        return new_ch;
+
+err_put_new_ch:
+       spin_lock_bh(&idr_lock);
+       idr_remove(&ch_idr, new_ch->id);
+       spin_unlock_bh(&idr_lock);
+       riocm_put_channel(new_ch);
+
 err_put:
-       riocm_put_channel(ch);
-err_nodev:
-       if (new_ch) {
-               spin_lock_bh(&idr_lock);
-               idr_remove(&ch_idr, new_ch->id);
-               spin_unlock_bh(&idr_lock);
-               riocm_put_channel(new_ch);
-       }
+       if (ch)
+               riocm_put_channel(ch);
        *new_ch_id = 0;
        return ERR_PTR(err);
 }
index 6c88e31c01f7b11797aeeab37d622b03d720cf16..97dc4cc3ef43f2ea6d116f4ddd7f626d62a5de18 100644 (file)
@@ -323,7 +323,7 @@ config REGULATOR_LP872X
 
 config REGULATOR_LP873X
        tristate "TI LP873X Power regulators"
-       depends on MFD_LP873X && OF
+       depends on MFD_TI_LP873X && OF
        help
          This driver supports LP873X voltage regulator chips. LP873X
          provides two step-down converters and two general-purpose LDO
index 241891a57caf8e97637d3e6c2ce6baecd021589a..df40692a9011ceb2cb2481af2eaa58a9ff92136e 100644 (file)
@@ -6,4 +6,8 @@
 # it under the terms of the GNU General Public License (version 2 only)
 # as published by the Free Software Foundation.
 
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
+s390-virtio-objs := virtio_ccw.o
+ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
+s390-virtio-objs += kvm_virtio.o
+endif
+obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
index 1d060fd293a3b8e8a4d4095b2ad84241913272d5..5e5c11f37b2420cbb406ff5591ad15fe615f5ed8 100644 (file)
@@ -458,6 +458,8 @@ static int __init kvm_devices_init(void)
        if (test_devices_support(total_memory_size) < 0)
                return -ENODEV;
 
+       pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
+
        rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
        if (rc)
                return rc;
@@ -482,7 +484,7 @@ static int __init kvm_devices_init(void)
 }
 
 /* code for early console output with virtio_console */
-static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+static int early_put_chars(u32 vtermno, const char *buf, int count)
 {
        char scratch[17];
        unsigned int len = count;
index bf85974be8621e16a130ffd711e18697231da3da..17d04c702e1ba13a1642d36a1de98fb7831665eb 100644 (file)
@@ -10410,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                __ipr_remove(pdev);
                return rc;
        }
+       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       ioa_cfg->scan_enabled = 1;
+       schedule_work(&ioa_cfg->work_q);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 
-       scsi_scan_host(ioa_cfg->host);
        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
 
        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
@@ -10421,10 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
                }
        }
 
-       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-       ioa_cfg->scan_enabled = 1;
-       schedule_work(&ioa_cfg->work_q);
-       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+       scsi_scan_host(ioa_cfg->host);
+
        return 0;
 }
 
index 1b4ff0f4c7168f578253d248507cb57fc02245c2..ed5dd0e8865746fe928f08459218779b97227103 100644 (file)
@@ -426,6 +426,7 @@ clock_cooling_register(struct device *dev, const char *clock_name)
        if (!ccdev)
                return ERR_PTR(-ENOMEM);
 
+       mutex_init(&ccdev->lock);
        ccdev->dev = dev;
        ccdev->clk = devm_clk_get(dev, clock_name);
        if (IS_ERR(ccdev->clk))
index 34fe36504a552cdaf112a6982483655f25dc2238..68bd1b56911850ee41dd7c508c7cad7f7c278ddf 100644 (file)
@@ -116,7 +116,9 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
                instance->target = get_target_state(tz, cdev, percentage,
                                                    cur_trip_level);
 
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false;
+               mutex_unlock(&instance->cdev->lock);
                thermal_cdev_update(cdev);
        }
        return 0;
index fc52016d4e85be590e15f4e1976b8378d2c456d2..bb118a152cbbde3c570f90fb5355bc04afe54beb 100644 (file)
@@ -71,7 +71,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                dev_dbg(&instance->cdev->device, "target=%d\n",
                                        (int)instance->target);
 
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false; /* cdev needs update */
+               mutex_unlock(&instance->cdev->lock);
        }
 
        mutex_unlock(&tz->lock);
index 6a6ec1c95a7a2d04c4afd6b68af03d3335627f55..9b4815e81b0df01cf2160d752499b670c4a2d731 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/thermal.h>
+#include <linux/pm.h>
 
 /* Intel PCH thermal Device IDs */
 #define PCH_THERMAL_DID_WPT    0x9CA4 /* Wildcat Point */
@@ -65,6 +66,7 @@ struct pch_thermal_device {
        unsigned long crt_temp;
        int hot_trip_id;
        unsigned long hot_temp;
+       bool bios_enabled;
 };
 
 static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
@@ -75,8 +77,10 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
        *nr_trips = 0;
 
        /* Check if BIOS has already enabled thermal sensor */
-       if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
+       if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) {
+               ptd->bios_enabled = true;
                goto read_trips;
+       }
 
        tsel = readb(ptd->hw_base + WPT_TSEL);
        /*
@@ -130,9 +134,39 @@ static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
        return 0;
 }
 
+static int pch_wpt_suspend(struct pch_thermal_device *ptd)
+{
+       u8 tsel;
+
+       if (ptd->bios_enabled)
+               return 0;
+
+       tsel = readb(ptd->hw_base + WPT_TSEL);
+
+       writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL);
+
+       return 0;
+}
+
+static int pch_wpt_resume(struct pch_thermal_device *ptd)
+{
+       u8 tsel;
+
+       if (ptd->bios_enabled)
+               return 0;
+
+       tsel = readb(ptd->hw_base + WPT_TSEL);
+
+       writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+
+       return 0;
+}
+
 struct pch_dev_ops {
        int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
        int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+       int (*suspend)(struct pch_thermal_device *ptd);
+       int (*resume)(struct pch_thermal_device *ptd);
 };
 
 
@@ -140,6 +174,8 @@ struct pch_dev_ops {
 static const struct pch_dev_ops pch_dev_ops_wpt = {
        .hw_init = pch_wpt_init,
        .get_temp = pch_wpt_get_temp,
+       .suspend = pch_wpt_suspend,
+       .resume = pch_wpt_resume,
 };
 
 static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
@@ -269,6 +305,22 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+static int intel_pch_thermal_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+       return ptd->ops->suspend(ptd);
+}
+
+static int intel_pch_thermal_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+       return ptd->ops->resume(ptd);
+}
+
 static struct pci_device_id intel_pch_thermal_id[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
@@ -276,11 +328,17 @@ static struct pci_device_id intel_pch_thermal_id[] = {
 };
 MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
 
+static const struct dev_pm_ops intel_pch_pm_ops = {
+       .suspend = intel_pch_thermal_suspend,
+       .resume = intel_pch_thermal_resume,
+};
+
 static struct pci_driver intel_pch_thermal_driver = {
        .name           = "intel_pch_thermal",
        .id_table       = intel_pch_thermal_id,
        .probe          = intel_pch_thermal_probe,
        .remove         = intel_pch_thermal_remove,
+       .driver.pm      = &intel_pch_pm_ops,
 };
 
 module_pci_driver(intel_pch_thermal_driver);
index 015ce2eb6eb7ba0a254e2918c8d9710011283fda..0e4dc0afcfd244d510b003575249c4c3ce1d16bd 100644 (file)
@@ -388,7 +388,7 @@ static int clamp_thread(void *arg)
                int sleeptime;
                unsigned long target_jiffies;
                unsigned int guard;
-               unsigned int compensation = 0;
+               unsigned int compensated_ratio;
                int interval; /* jiffies to sleep for each attempt */
                unsigned int duration_jiffies = msecs_to_jiffies(duration);
                unsigned int window_size_now;
@@ -409,8 +409,11 @@ static int clamp_thread(void *arg)
                 * c-states, thus we need to compensate the injected idle ratio
                 * to achieve the actual target reported by the HW.
                 */
-               compensation = get_compensation(target_ratio);
-               interval = duration_jiffies*100/(target_ratio+compensation);
+               compensated_ratio = target_ratio +
+                       get_compensation(target_ratio);
+               if (compensated_ratio <= 0)
+                       compensated_ratio = 1;
+               interval = duration_jiffies * 100 / compensated_ratio;
 
                /* align idle time */
                target_jiffies = roundup(jiffies, interval);
@@ -647,8 +650,8 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
                goto exit_set;
        } else  if (set_target_ratio > 0 && new_target_ratio == 0) {
                pr_info("Stop forced idle injection\n");
-               set_target_ratio = 0;
                end_power_clamp();
+               set_target_ratio = 0;
        } else  /* adjust currently running */ {
                set_target_ratio = new_target_ratio;
                /* make new set_target_ratio visible to other cpus */
index 2f1a863a8e15bc834e007fea264d262d180a04cf..b4d3116cfdafe81767b2b1c91fcab4a034f29041 100644 (file)
@@ -529,7 +529,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
                        continue;
 
                instance->target = 0;
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false;
+               mutex_unlock(&instance->cdev->lock);
                thermal_cdev_update(instance->cdev);
        }
 }
index ea9366ad3e6bb285e52e368691a0d495cbb3429f..bcef2e7c4ec96f1cfc662019ccf0440d52d26328 100644 (file)
@@ -175,7 +175,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                        update_passive_instance(tz, trip_type, -1);
 
                instance->initialized = true;
+               mutex_lock(&instance->cdev->lock);
                instance->cdev->updated = false; /* cdev needs update */
+               mutex_unlock(&instance->cdev->lock);
        }
 
        mutex_unlock(&tz->lock);
index 5133cd1e10b7ae99d838823af2eef272aa00ab76..e2fc6161dded9650c300cf829a381c0a2d14b38c 100644 (file)
@@ -1093,7 +1093,9 @@ int power_actor_set_power(struct thermal_cooling_device *cdev,
                return ret;
 
        instance->target = state;
+       mutex_lock(&cdev->lock);
        cdev->updated = false;
+       mutex_unlock(&cdev->lock);
        thermal_cdev_update(cdev);
 
        return 0;
@@ -1623,11 +1625,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
        struct thermal_instance *instance;
        unsigned long target = 0;
 
+       mutex_lock(&cdev->lock);
        /* cooling device is updated*/
-       if (cdev->updated)
+       if (cdev->updated) {
+               mutex_unlock(&cdev->lock);
                return;
+       }
 
-       mutex_lock(&cdev->lock);
        /* Make sure cdev enters the deepest cooling state */
        list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
                dev_dbg(&cdev->device, "zone%d->target=%lu\n",
@@ -1637,9 +1641,9 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
                if (instance->target > target)
                        target = instance->target;
        }
-       mutex_unlock(&cdev->lock);
        cdev->ops->set_cur_state(cdev, target);
        cdev->updated = true;
+       mutex_unlock(&cdev->lock);
        trace_cdev_update(cdev, target);
        dev_dbg(&cdev->device, "set to state %lu\n", target);
 }
index 06fd2ed9ef9d13bf0ab09f727020f873152b8da9..c41c7742903ab43b2132241574376b85849666b9 100644 (file)
@@ -232,6 +232,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        return result;
 }
+EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs);
 
 void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
 {
@@ -270,3 +271,4 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        hwmon_device_unregister(hwmon->device);
        kfree(hwmon);
 }
+EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
index 15ecfc9c5f6c59e14cdf4477e401700df47e19ff..152b43822ef1912c980292f7927286e6df76656c 100644 (file)
@@ -564,67 +564,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
 }
 
 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
-                                          uint32_t flags, void *data)
+                                          unsigned int count, uint32_t flags,
+                                          void *data)
 {
-       int32_t fd = *(int32_t *)data;
-
-       if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
-               return -EINVAL;
-
        /* DATA_NONE/DATA_BOOL enables loopback testing */
        if (flags & VFIO_IRQ_SET_DATA_NONE) {
-               if (*ctx)
-                       eventfd_signal(*ctx, 1);
-               return 0;
+               if (*ctx) {
+                       if (count) {
+                               eventfd_signal(*ctx, 1);
+                       } else {
+                               eventfd_ctx_put(*ctx);
+                               *ctx = NULL;
+                       }
+                       return 0;
+               }
        } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
-               uint8_t trigger = *(uint8_t *)data;
+               uint8_t trigger;
+
+               if (!count)
+                       return -EINVAL;
+
+               trigger = *(uint8_t *)data;
                if (trigger && *ctx)
                        eventfd_signal(*ctx, 1);
-               return 0;
-       }
 
-       /* Handle SET_DATA_EVENTFD */
-       if (fd == -1) {
-               if (*ctx)
-                       eventfd_ctx_put(*ctx);
-               *ctx = NULL;
                return 0;
-       } else if (fd >= 0) {
-               struct eventfd_ctx *efdctx;
-               efdctx = eventfd_ctx_fdget(fd);
-               if (IS_ERR(efdctx))
-                       return PTR_ERR(efdctx);
-               if (*ctx)
-                       eventfd_ctx_put(*ctx);
-               *ctx = efdctx;
+       } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+               int32_t fd;
+
+               if (!count)
+                       return -EINVAL;
+
+               fd = *(int32_t *)data;
+               if (fd == -1) {
+                       if (*ctx)
+                               eventfd_ctx_put(*ctx);
+                       *ctx = NULL;
+               } else if (fd >= 0) {
+                       struct eventfd_ctx *efdctx;
+
+                       efdctx = eventfd_ctx_fdget(fd);
+                       if (IS_ERR(efdctx))
+                               return PTR_ERR(efdctx);
+
+                       if (*ctx)
+                               eventfd_ctx_put(*ctx);
+
+                       *ctx = efdctx;
+               }
                return 0;
-       } else
-               return -EINVAL;
+       }
+
+       return -EINVAL;
 }
 
 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
                                    unsigned index, unsigned start,
                                    unsigned count, uint32_t flags, void *data)
 {
-       if (index != VFIO_PCI_ERR_IRQ_INDEX)
+       if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
                return -EINVAL;
 
-       /*
-        * We should sanitize start & count, but that wasn't caught
-        * originally, so this IRQ index must forever ignore them :-(
-        */
-
-       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+                                              count, flags, data);
 }
 
 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
                                    unsigned index, unsigned start,
                                    unsigned count, uint32_t flags, void *data)
 {
-       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
                return -EINVAL;
 
-       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+                                              count, flags, data);
 }
 
 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
index 0ddf3a2dbfc490a58d150039a57136460e9a1e08..e3b30ea9ece5945c935791798ab27ed8f6c3dd11 100644 (file)
@@ -307,6 +307,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
 
        vhost_disable_notify(&vsock->dev, vq);
        for (;;) {
+               u32 len;
+
                if (!vhost_vsock_more_replies(vsock)) {
                        /* Stop tx until the device processes already
                         * pending replies.  Leave tx virtqueue
@@ -334,13 +336,15 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
                        continue;
                }
 
+               len = pkt->len;
+
                /* Only accept correctly addressed packets */
                if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
                        virtio_transport_recv_pkt(pkt);
                else
                        virtio_transport_free_pkt(pkt);
 
-               vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+               vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
                added = true;
        }
 
index 114a0c88afb8bfad71dc7b4612ccf9ed66f59d54..e383ecdaca594ce0786c321af35ee59b1807007e 100644 (file)
@@ -327,6 +327,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
                 * host should service the ring ASAP. */
                if (out_sgs)
                        vq->notify(&vq->vq);
+               if (indirect)
+                       kfree(desc);
                END_USE(vq);
                return -ENOSPC;
        }
@@ -426,6 +428,7 @@ unmap_release:
        if (indirect)
                kfree(desc);
 
+       END_USE(vq);
        return -EIO;
 }
 
index b6d210e7a993fd67634b3523aa3e61a1121d31bd..d9ddcfc18c91f8acd0ac977171116e3659aa2242 100644 (file)
@@ -862,33 +862,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
-                                    struct btrfs_trans_handle *trans,
-                                    u64 ref_root, u64 bytenr, u64 num_bytes)
-{
-       struct btrfs_delayed_ref_root *delayed_refs;
-       struct btrfs_delayed_ref_head *ref_head;
-       int ret = 0;
-
-       if (!fs_info->quota_enabled || !is_fstree(ref_root))
-               return 0;
-
-       delayed_refs = &trans->transaction->delayed_refs;
-
-       spin_lock(&delayed_refs->lock);
-       ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
-       if (!ref_head) {
-               ret = -ENOENT;
-               goto out;
-       }
-       WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
-       ref_head->qgroup_ref_root = ref_root;
-       ref_head->qgroup_reserved = num_bytes;
-out:
-       spin_unlock(&delayed_refs->lock);
-       return ret;
-}
-
 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
index 5fca9534a2712b0b4dec9e9b15a1e024f272bb2f..43f3629760e90f186730842b0b1c609f799ae256 100644 (file)
@@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, u64 reserved, int action,
                               struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
-                                    struct btrfs_trans_handle *trans,
-                                    u64 ref_root, u64 bytenr, u64 num_bytes);
 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                                struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
index 9404121fd5f7b44f165c6f76c856548cf5722aff..5842423f8f47b6a7146240c6b47ea1ead637f984 100644 (file)
@@ -2033,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                 */
                clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                          &BTRFS_I(inode)->runtime_flags);
+               /*
+                * An ordered extent might have started before and completed
+                * already with io errors, in which case the inode was not
+                * updated and we end up here. So check the inode's mapping
+                * flags for any errors that might have happened while doing
+                * writeback of file data.
+                */
+               ret = btrfs_inode_check_errors(inode);
                inode_unlock(inode);
                goto out;
        }
index 2f5975954ccf198737e07b29c8706024114a78ae..08dfc57e22705363f1159def79316263a9f4293a 100644 (file)
@@ -3435,10 +3435,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                found_key.offset = 0;
                inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
                ret = PTR_ERR_OR_ZERO(inode);
-               if (ret && ret != -ESTALE)
+               if (ret && ret != -ENOENT)
                        goto out;
 
-               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+               if (ret == -ENOENT && root == root->fs_info->tree_root) {
                        struct btrfs_root *dead_root;
                        struct btrfs_fs_info *fs_info = root->fs_info;
                        int is_dead_root = 0;
@@ -3474,7 +3474,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
                 */
-               if (ret == -ESTALE) {
+               if (ret == -ENOENT) {
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
                                ret = PTR_ERR(trans);
@@ -3633,7 +3633,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
 /*
  * read an inode from the btree into the in-memory inode
  */
-static void btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode)
 {
        struct btrfs_path *path;
        struct extent_buffer *leaf;
@@ -3652,14 +3652,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
                filled = true;
 
        path = btrfs_alloc_path();
-       if (!path)
+       if (!path) {
+               ret = -ENOMEM;
                goto make_bad;
+       }
 
        memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
        ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
-       if (ret)
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
                goto make_bad;
+       }
 
        leaf = path->nodes[0];
 
@@ -3812,11 +3817,12 @@ cache_acl:
        }
 
        btrfs_update_iflags(inode);
-       return;
+       return 0;
 
 make_bad:
        btrfs_free_path(path);
        make_bad_inode(inode);
+       return ret;
 }
 
 /*
@@ -4204,6 +4210,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        int err = 0;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_trans_handle *trans;
+       u64 last_unlink_trans;
 
        if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
                return -ENOTEMPTY;
@@ -4226,11 +4233,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (err)
                goto out;
 
+       last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+
        /* now the directory is empty */
        err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
                                 dentry->d_name.name, dentry->d_name.len);
-       if (!err)
+       if (!err) {
                btrfs_i_size_write(inode, 0);
+               /*
+                * Propagate the last_unlink_trans value of the deleted dir to
+                * its parent directory. This is to prevent an unrecoverable
+                * log tree in the case we do something like this:
+                * 1) create dir foo
+                * 2) create snapshot under dir foo
+                * 3) delete the snapshot
+                * 4) rmdir foo
+                * 5) mkdir foo
+                * 6) fsync foo or some file inside foo
+                */
+               if (last_unlink_trans >= trans->transid)
+                       BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+       }
 out:
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
@@ -5606,7 +5629,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                return ERR_PTR(-ENOMEM);
 
        if (inode->i_state & I_NEW) {
-               btrfs_read_locked_inode(inode);
+               int ret;
+
+               ret = btrfs_read_locked_inode(inode);
                if (!is_bad_inode(inode)) {
                        inode_tree_add(inode);
                        unlock_new_inode(inode);
@@ -5615,7 +5640,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                } else {
                        unlock_new_inode(inode);
                        iput(inode);
-                       inode = ERR_PTR(-ESTALE);
+                       ASSERT(ret < 0);
+                       inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
                }
        }
 
index b71dd298385c1b5cfb3c00761db0c8ee674e01e7..efe129fe26788c1078d737a2bc238373c2efcbac 100644 (file)
@@ -231,7 +231,6 @@ struct pending_dir_move {
        u64 parent_ino;
        u64 ino;
        u64 gen;
-       bool is_orphan;
        struct list_head update_refs;
 };
 
@@ -274,6 +273,39 @@ struct name_cache_entry {
        char name[];
 };
 
+static void inconsistent_snapshot_error(struct send_ctx *sctx,
+                                       enum btrfs_compare_tree_result result,
+                                       const char *what)
+{
+       const char *result_string;
+
+       switch (result) {
+       case BTRFS_COMPARE_TREE_NEW:
+               result_string = "new";
+               break;
+       case BTRFS_COMPARE_TREE_DELETED:
+               result_string = "deleted";
+               break;
+       case BTRFS_COMPARE_TREE_CHANGED:
+               result_string = "updated";
+               break;
+       case BTRFS_COMPARE_TREE_SAME:
+               ASSERT(0);
+               result_string = "unchanged";
+               break;
+       default:
+               ASSERT(0);
+               result_string = "unexpected";
+       }
+
+       btrfs_err(sctx->send_root->fs_info,
+                 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
+                 result_string, what, sctx->cmp_key->objectid,
+                 sctx->send_root->root_key.objectid,
+                 (sctx->parent_root ?
+                  sctx->parent_root->root_key.objectid : 0));
+}
+
 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 
 static struct waiting_dir_move *
@@ -1861,7 +1893,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
         * was already unlinked/moved, so we can safely assume that we will not
         * overwrite anything at this point in time.
         */
-       if (other_inode > sctx->send_progress) {
+       if (other_inode > sctx->send_progress ||
+           is_waiting_for_move(sctx, other_inode)) {
                ret = get_inode_info(sctx->parent_root, other_inode, NULL,
                                who_gen, NULL, NULL, NULL, NULL);
                if (ret < 0)
@@ -2502,6 +2535,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
        ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+       if (ret > 0)
+               ret = -ENOENT;
        if (ret < 0)
                goto out;
 
@@ -2947,6 +2982,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
                }
 
                if (loc.objectid > send_progress) {
+                       struct orphan_dir_info *odi;
+
+                       odi = get_orphan_dir_info(sctx, dir);
+                       free_orphan_dir_info(sctx, odi);
                        ret = 0;
                        goto out;
                }
@@ -3047,7 +3086,6 @@ static int add_pending_dir_move(struct send_ctx *sctx,
        pm->parent_ino = parent_ino;
        pm->ino = ino;
        pm->gen = ino_gen;
-       pm->is_orphan = is_orphan;
        INIT_LIST_HEAD(&pm->list);
        INIT_LIST_HEAD(&pm->update_refs);
        RB_CLEAR_NODE(&pm->node);
@@ -3113,6 +3151,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
        return NULL;
 }
 
+static int path_loop(struct send_ctx *sctx, struct fs_path *name,
+                    u64 ino, u64 gen, u64 *ancestor_ino)
+{
+       int ret = 0;
+       u64 parent_inode = 0;
+       u64 parent_gen = 0;
+       u64 start_ino = ino;
+
+       *ancestor_ino = 0;
+       while (ino != BTRFS_FIRST_FREE_OBJECTID) {
+               fs_path_reset(name);
+
+               if (is_waiting_for_rm(sctx, ino))
+                       break;
+               if (is_waiting_for_move(sctx, ino)) {
+                       if (*ancestor_ino == 0)
+                               *ancestor_ino = ino;
+                       ret = get_first_ref(sctx->parent_root, ino,
+                                           &parent_inode, &parent_gen, name);
+               } else {
+                       ret = __get_cur_name_and_parent(sctx, ino, gen,
+                                                       &parent_inode,
+                                                       &parent_gen, name);
+                       if (ret > 0) {
+                               ret = 0;
+                               break;
+                       }
+               }
+               if (ret < 0)
+                       break;
+               if (parent_inode == start_ino) {
+                       ret = 1;
+                       if (*ancestor_ino == 0)
+                               *ancestor_ino = ino;
+                       break;
+               }
+               ino = parent_inode;
+               gen = parent_gen;
+       }
+       return ret;
+}
+
 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
 {
        struct fs_path *from_path = NULL;
@@ -3123,6 +3203,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        u64 parent_ino, parent_gen;
        struct waiting_dir_move *dm = NULL;
        u64 rmdir_ino = 0;
+       u64 ancestor;
+       bool is_orphan;
        int ret;
 
        name = fs_path_alloc();
@@ -3135,9 +3217,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        dm = get_waiting_dir_move(sctx, pm->ino);
        ASSERT(dm);
        rmdir_ino = dm->rmdir_ino;
+       is_orphan = dm->orphanized;
        free_waiting_dir_move(sctx, dm);
 
-       if (pm->is_orphan) {
+       if (is_orphan) {
                ret = gen_unique_name(sctx, pm->ino,
                                      pm->gen, from_path);
        } else {
@@ -3155,6 +3238,24 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                goto out;
 
        sctx->send_progress = sctx->cur_ino + 1;
+       ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
+       if (ret < 0)
+               goto out;
+       if (ret) {
+               LIST_HEAD(deleted_refs);
+               ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
+               ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
+                                          &pm->update_refs, &deleted_refs,
+                                          is_orphan);
+               if (ret < 0)
+                       goto out;
+               if (rmdir_ino) {
+                       dm = get_waiting_dir_move(sctx, pm->ino);
+                       ASSERT(dm);
+                       dm->rmdir_ino = rmdir_ino;
+               }
+               goto out;
+       }
        fs_path_reset(name);
        to_path = name;
        name = NULL;
@@ -3174,7 +3275,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                        /* already deleted */
                        goto finish;
                }
-               ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
+               ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
                if (ret < 0)
                        goto out;
                if (!ret)
@@ -3204,8 +3305,18 @@ finish:
         * and old parent(s).
         */
        list_for_each_entry(cur, &pm->update_refs, list) {
-               if (cur->dir == rmdir_ino)
+               /*
+                * The parent inode might have been deleted in the send snapshot
+                */
+               ret = get_inode_info(sctx->send_root, cur->dir, NULL,
+                                    NULL, NULL, NULL, NULL, NULL);
+               if (ret == -ENOENT) {
+                       ret = 0;
                        continue;
+               }
+               if (ret < 0)
+                       goto out;
+
                ret = send_utimes(sctx, cur->dir, cur->dir_gen);
                if (ret < 0)
                        goto out;
@@ -3325,6 +3436,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
        u64 left_gen;
        u64 right_gen;
        int ret = 0;
+       struct waiting_dir_move *wdm;
 
        if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
                return 0;
@@ -3383,7 +3495,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
                goto out;
        }
 
-       if (is_waiting_for_move(sctx, di_key.objectid)) {
+       wdm = get_waiting_dir_move(sctx, di_key.objectid);
+       if (wdm && !wdm->orphanized) {
                ret = add_pending_dir_move(sctx,
                                           sctx->cur_ino,
                                           sctx->cur_inode_gen,
@@ -3470,7 +3583,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
                        ret = is_ancestor(sctx->parent_root,
                                          sctx->cur_ino, sctx->cur_inode_gen,
                                          ino, path_before);
-                       break;
+                       if (ret)
+                               break;
                }
 
                fs_path_reset(path_before);
@@ -3643,11 +3757,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                                goto out;
                        if (ret) {
                                struct name_cache_entry *nce;
+                               struct waiting_dir_move *wdm;
 
                                ret = orphanize_inode(sctx, ow_inode, ow_gen,
                                                cur->full_path);
                                if (ret < 0)
                                        goto out;
+
+                               /*
+                                * If ow_inode has its rename operation delayed
+                                * make sure that its orphanized name is used in
+                                * the source path when performing its rename
+                                * operation.
+                                */
+                               if (is_waiting_for_move(sctx, ow_inode)) {
+                                       wdm = get_waiting_dir_move(sctx,
+                                                                  ow_inode);
+                                       ASSERT(wdm);
+                                       wdm->orphanized = true;
+                               }
+
                                /*
                                 * Make sure we clear our orphanized inode's
                                 * name from the name cache. This is because the
@@ -3663,6 +3792,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
                                        name_cache_delete(sctx, nce);
                                        kfree(nce);
                                }
+
+                               /*
+                                * ow_inode might currently be an ancestor of
+                                * cur_ino, therefore compute valid_path (the
+                                * current path of cur_ino) again because it
+                                * might contain the pre-orphanization name of
+                                * ow_inode, which is no longer valid.
+                                */
+                               fs_path_reset(valid_path);
+                               ret = get_cur_path(sctx, sctx->cur_ino,
+                                          sctx->cur_inode_gen, valid_path);
+                               if (ret < 0)
+                                       goto out;
                        } else {
                                ret = send_unlink(sctx, cur->full_path);
                                if (ret < 0)
@@ -5602,7 +5744,10 @@ static int changed_ref(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "reference");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen &&
            sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
@@ -5627,7 +5772,10 @@ static int changed_xattr(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "xattr");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result == BTRFS_COMPARE_TREE_NEW)
@@ -5651,7 +5799,10 @@ static int changed_extent(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+       if (sctx->cur_ino != sctx->cmp_key->objectid) {
+               inconsistent_snapshot_error(sctx, result, "extent");
+               return -EIO;
+       }
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result != BTRFS_COMPARE_TREE_DELETED)
index d31a0c4f56bed436e0eb933cceb592fdc498eb53..fff3f3efa43602e0c04f9e5e019bf0e85a239d6f 100644 (file)
@@ -4469,7 +4469,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
                                         const int slot,
                                         const struct btrfs_key *key,
-                                        struct inode *inode)
+                                        struct inode *inode,
+                                        u64 *other_ino)
 {
        int ret;
        struct btrfs_path *search_path;
@@ -4528,7 +4529,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
                                           search_path, parent,
                                           name, this_name_len, 0);
                if (di && !IS_ERR(di)) {
-                       ret = 1;
+                       struct btrfs_key di_key;
+
+                       btrfs_dir_item_key_to_cpu(search_path->nodes[0],
+                                                 di, &di_key);
+                       if (di_key.type == BTRFS_INODE_ITEM_KEY) {
+                               ret = 1;
+                               *other_ino = di_key.objectid;
+                       } else {
+                               ret = -EAGAIN;
+                       }
                        goto out;
                } else if (IS_ERR(di)) {
                        ret = PTR_ERR(di);
@@ -4722,16 +4732,71 @@ again:
                if ((min_key.type == BTRFS_INODE_REF_KEY ||
                     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
                    BTRFS_I(inode)->generation == trans->transid) {
+                       u64 other_ino = 0;
+
                        ret = btrfs_check_ref_name_override(path->nodes[0],
                                                            path->slots[0],
-                                                           &min_key, inode);
+                                                           &min_key, inode,
+                                                           &other_ino);
                        if (ret < 0) {
                                err = ret;
                                goto out_unlock;
                        } else if (ret > 0) {
-                               err = 1;
-                               btrfs_set_log_full_commit(root->fs_info, trans);
-                               goto out_unlock;
+                               struct btrfs_key inode_key;
+                               struct inode *other_inode;
+
+                               if (ins_nr > 0) {
+                                       ins_nr++;
+                               } else {
+                                       ins_nr = 1;
+                                       ins_start_slot = path->slots[0];
+                               }
+                               ret = copy_items(trans, inode, dst_path, path,
+                                                &last_extent, ins_start_slot,
+                                                ins_nr, inode_only,
+                                                logged_isize);
+                               if (ret < 0) {
+                                       err = ret;
+                                       goto out_unlock;
+                               }
+                               ins_nr = 0;
+                               btrfs_release_path(path);
+                               inode_key.objectid = other_ino;
+                               inode_key.type = BTRFS_INODE_ITEM_KEY;
+                               inode_key.offset = 0;
+                               other_inode = btrfs_iget(root->fs_info->sb,
+                                                        &inode_key, root,
+                                                        NULL);
+                               /*
+                                * If the other inode that had a conflicting dir
+                                * entry was deleted in the current transaction,
+                                * we don't need to do more work nor fallback to
+                                * a transaction commit.
+                                */
+                               if (IS_ERR(other_inode) &&
+                                   PTR_ERR(other_inode) == -ENOENT) {
+                                       goto next_key;
+                               } else if (IS_ERR(other_inode)) {
+                                       err = PTR_ERR(other_inode);
+                                       goto out_unlock;
+                               }
+                               /*
+                                * We are safe logging the other inode without
+                                * acquiring its i_mutex as long as we log with
+                                * the LOG_INODE_EXISTS mode. We're safe against
+                                * concurrent renames of the other inode as well
+                                * because during a rename we pin the log and
+                                * update the log with the new name before we
+                                * unpin it.
+                                */
+                               err = btrfs_log_inode(trans, root, other_inode,
+                                                     LOG_INODE_EXISTS,
+                                                     0, LLONG_MAX, ctx);
+                               iput(other_inode);
+                               if (err)
+                                       goto out_unlock;
+                               else
+                                       goto next_key;
                        }
                }
 
@@ -4799,7 +4864,7 @@ next_slot:
                        ins_nr = 0;
                }
                btrfs_release_path(path);
-
+next_key:
                if (min_key.offset < (u64)-1) {
                        min_key.offset++;
                } else if (min_key.type < max_key.type) {
@@ -4993,8 +5058,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
                if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
                        break;
 
-               if (IS_ROOT(parent))
+               if (IS_ROOT(parent)) {
+                       inode = d_inode(parent);
+                       if (btrfs_must_commit_transaction(trans, inode))
+                               ret = 1;
                        break;
+               }
 
                parent = dget_parent(parent);
                dput(old_parent);
index 99115cae1652ac1661d37a5a286da7180b6d9d94..16e6ded0b7f281bf72e8074b9d2896713fe4aef2 100644 (file)
@@ -1347,9 +1347,12 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
-       struct ceph_mds_session *session = *psession;
+       struct ceph_mds_session *session = NULL;
        int mds;
+
        dout("ceph_flush_snaps %p\n", inode);
+       if (psession)
+               session = *psession;
 retry:
        spin_lock(&ci->i_ceph_lock);
        if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
index fa59a85226b262f2fe086ec5dfc1bf6813711986..f72d4ae303b273a98ee2631d8ed3dde21a71e796 100644 (file)
@@ -2759,6 +2759,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        } else {
                path = NULL;
                pathlen = 0;
+               pathbase = 0;
        }
 
        spin_lock(&ci->i_ceph_lock);
index 4d09d4441e3ee4fb7a2939a5c2cabbfa9e08070e..05713a5da0834233edad0049d7443dd63e6c1078 100644 (file)
@@ -1949,6 +1949,12 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
 {
        struct backing_dev_info *bdi;
 
+       /*
+        * If we are expecting writeback progress we must submit plugged IO.
+        */
+       if (blk_needs_flush_plug(current))
+               blk_schedule_flush_plug(current);
+
        if (!nr_pages)
                nr_pages = get_nr_dirty_pages();
 
index 33da841a21bb2871f753fb38a72dd76ce2725ded..6f47527348042dc83f5b97f48870eed6d1afcf41 100644 (file)
@@ -338,6 +338,8 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
        case 0:
                break;
        case -NFS4ERR_EXPIRED:
+       case -NFS4ERR_ADMIN_REVOKED:
+       case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_STALE_STATEID:
        case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_BAD_STATEID:
index 324bfdc212504de591347da77c1ad3f1db595974..9bf64eacba5bd6d47a04ca3c9ac66b74912bdc72 100644 (file)
@@ -396,6 +396,10 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
 extern void nfs4_kill_renewd(struct nfs_client *);
 extern void nfs4_renew_state(struct work_struct *);
+extern void nfs4_set_lease_period(struct nfs_client *clp,
+               unsigned long lease,
+               unsigned long lastrenewed);
+
 
 /* nfs4state.c */
 struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
index a036e93bdf9656813abec3a3565e26fca3b28838..1949bbd806ebd4381ec54ea06ea97fa2f5619bed 100644 (file)
@@ -4237,12 +4237,9 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
                err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
                trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
                if (err == 0) {
-                       struct nfs_client *clp = server->nfs_client;
-
-                       spin_lock(&clp->cl_lock);
-                       clp->cl_lease_time = fsinfo->lease_time * HZ;
-                       clp->cl_last_renewal = now;
-                       spin_unlock(&clp->cl_lock);
+                       nfs4_set_lease_period(server->nfs_client,
+                                       fsinfo->lease_time * HZ,
+                                       now);
                        break;
                }
                err = nfs4_handle_exception(server, err, &exception);
index e1ba58c3d1ad305ab28d932a5b90ac269092f98b..82e77198d17efdf656315f39c3d50c4d9aa568a9 100644 (file)
@@ -136,6 +136,26 @@ nfs4_kill_renewd(struct nfs_client *clp)
        cancel_delayed_work_sync(&clp->cl_renewd);
 }
 
+/**
+ * nfs4_set_lease_period - Sets the lease period on a nfs_client
+ *
+ * @clp: pointer to nfs_client
+ * @lease: new value for lease period
+ * @lastrenewed: time at which lease was last renewed
+ */
+void nfs4_set_lease_period(struct nfs_client *clp,
+               unsigned long lease,
+               unsigned long lastrenewed)
+{
+       spin_lock(&clp->cl_lock);
+       clp->cl_lease_time = lease;
+       clp->cl_last_renewal = lastrenewed;
+       spin_unlock(&clp->cl_lock);
+
+       /* Cap maximum reconnect timeout at 1/2 lease period */
+       rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1);
+}
+
 /*
  * Local variables:
  *   c-basic-offset: 8
index 834b875900d62addf6db7f6eb590ce5c2d1b09bc..cada00aa5096d7dbd57a0b8717d1a6583abbbfa7 100644 (file)
@@ -277,20 +277,17 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
 {
        int status;
        struct nfs_fsinfo fsinfo;
+       unsigned long now;
 
        if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
                nfs4_schedule_state_renewal(clp);
                return 0;
        }
 
+       now = jiffies;
        status = nfs4_proc_get_lease_time(clp, &fsinfo);
        if (status == 0) {
-               /* Update lease time and schedule renewal */
-               spin_lock(&clp->cl_lock);
-               clp->cl_lease_time = fsinfo.lease_time * HZ;
-               clp->cl_last_renewal = jiffies;
-               spin_unlock(&clp->cl_lock);
-
+               nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
                nfs4_schedule_state_renewal(clp);
        }
 
index 8410ca275db1aecf0a1f8a022b92cdc1597ff258..a204d7e109d4d63a76d01a31198b00f3f1cd09be 100644 (file)
@@ -4903,6 +4903,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        return nfs_ok;
 }
 
+static __be32
+nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
+{
+       struct nfs4_ol_stateid *stp = openlockstateid(s);
+       __be32 ret;
+
+       mutex_lock(&stp->st_mutex);
+
+       ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+       if (ret)
+               goto out;
+
+       ret = nfserr_locks_held;
+       if (check_for_locks(stp->st_stid.sc_file,
+                           lockowner(stp->st_stateowner)))
+               goto out;
+
+       release_lock_stateid(stp);
+       ret = nfs_ok;
+
+out:
+       mutex_unlock(&stp->st_mutex);
+       nfs4_put_stid(s);
+       return ret;
+}
+
 __be32
 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                   struct nfsd4_free_stateid *free_stateid)
@@ -4910,7 +4936,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        stateid_t *stateid = &free_stateid->fr_stateid;
        struct nfs4_stid *s;
        struct nfs4_delegation *dp;
-       struct nfs4_ol_stateid *stp;
        struct nfs4_client *cl = cstate->session->se_client;
        __be32 ret = nfserr_bad_stateid;
 
@@ -4929,18 +4954,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                ret = nfserr_locks_held;
                break;
        case NFS4_LOCK_STID:
-               ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
-               if (ret)
-                       break;
-               stp = openlockstateid(s);
-               ret = nfserr_locks_held;
-               if (check_for_locks(stp->st_stid.sc_file,
-                                   lockowner(stp->st_stateowner)))
-                       break;
-               WARN_ON(!unhash_lock_stateid(stp));
+               atomic_inc(&s->sc_count);
                spin_unlock(&cl->cl_lock);
-               nfs4_put_stid(s);
-               ret = nfs_ok;
+               ret = nfsd4_free_lock_stateid(stateid, s);
                goto out;
        case NFS4_REVOKED_DELEG_STID:
                dp = delegstateid(s);
@@ -5507,7 +5523,7 @@ static __be32
 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                            struct nfs4_ol_stateid *ost,
                            struct nfsd4_lock *lock,
-                           struct nfs4_ol_stateid **lst, bool *new)
+                           struct nfs4_ol_stateid **plst, bool *new)
 {
        __be32 status;
        struct nfs4_file *fi = ost->st_stid.sc_file;
@@ -5515,7 +5531,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
        struct nfs4_client *cl = oo->oo_owner.so_client;
        struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
        struct nfs4_lockowner *lo;
+       struct nfs4_ol_stateid *lst;
        unsigned int strhashval;
+       bool hashed;
 
        lo = find_lockowner_str(cl, &lock->lk_new_owner);
        if (!lo) {
@@ -5531,12 +5549,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                        goto out;
        }
 
-       *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
-       if (*lst == NULL) {
+retry:
+       lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
+       if (lst == NULL) {
                status = nfserr_jukebox;
                goto out;
        }
+
+       mutex_lock(&lst->st_mutex);
+
+       /* See if it's still hashed to avoid race with FREE_STATEID */
+       spin_lock(&cl->cl_lock);
+       hashed = !list_empty(&lst->st_perfile);
+       spin_unlock(&cl->cl_lock);
+
+       if (!hashed) {
+               mutex_unlock(&lst->st_mutex);
+               nfs4_put_stid(&lst->st_stid);
+               goto retry;
+       }
        status = nfs_ok;
+       *plst = lst;
 out:
        nfs4_put_stateowner(&lo->lo_owner);
        return status;
@@ -5603,8 +5636,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        goto out;
                status = lookup_or_create_lock_state(cstate, open_stp, lock,
                                                        &lock_stp, &new);
-               if (status == nfs_ok)
-                       mutex_lock(&lock_stp->st_mutex);
        } else {
                status = nfs4_preprocess_seqid_op(cstate,
                                       lock->lk_old_lock_seqid,
index ba944123167b92f3a7460d8acc66b02dc7c53575..ff476e654b8f8044b84808b1c92c54055e4ca393 100644 (file)
@@ -1252,10 +1252,13 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (IS_ERR(dchild))
                return nfserrno(host_err);
        err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
-       if (err) {
-               dput(dchild);
+       /*
+        * We unconditionally drop our ref to dchild as fh_compose will have
+        * already grabbed its own ref for it.
+        */
+       dput(dchild);
+       if (err)
                return err;
-       }
        return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
                                        rdev, resfhp);
 }
index 4b32928f542661f5e04730ca8c8cb134e59ea531..4ebe6b2e5217c2e26c7185bcf4254d8af3b55872 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -144,10 +144,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
        struct page *page = buf->page;
 
        if (page_count(page) == 1) {
-               if (memcg_kmem_enabled()) {
+               if (memcg_kmem_enabled())
                        memcg_kmem_uncharge(page, 0);
-                       __ClearPageKmemcg(page);
-               }
                __SetPageLocked(page);
                return 0;
        }
index 09e18fdf61e5b48dd67234b19972a10dd5131662..b9a8c813e5e66b5e751080e1bd7b11b7e8d87634 100644 (file)
@@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                cached = 0;
 
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
-               pages[lru] = global_page_state(NR_LRU_BASE + lru);
+               pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
 
        available = si_mem_available();
 
index 54a8e65e18b622edf10df6d07ca0d84fea9e49ae..7d026bf277131f7bc4c79529cc26488e3a8281ee 100644 (file)
 #include <asm-generic/qrwlock_types.h>
 
 /*
- * Writer states & reader shift and bias
+ * Writer states & reader shift and bias.
+ *
+ *       | +0 | +1 | +2 | +3 |
+ *   ----+----+----+----+----+
+ *    LE | 78 | 56 | 34 | 12 | 0x12345678
+ *   ----+----+----+----+----+
+ *       | wr |      rd      |
+ *       +----+----+----+----+
+ *
+ *   ----+----+----+----+----+
+ *    BE | 12 | 34 | 56 | 78 | 0x12345678
+ *   ----+----+----+----+----+
+ *       |      rd      | wr |
+ *       +----+----+----+----+
  */
 #define        _QW_WAITING     1               /* A writer is waiting     */
 #define        _QW_LOCKED      0xff            /* A writer holds the lock */
@@ -133,13 +146,23 @@ static inline void queued_read_unlock(struct qrwlock *lock)
        (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
 }
 
+/**
+ * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: the write byte address of a queue rwlock
+ */
+static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
+{
+       return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
+}
+
 /**
  * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
-       smp_store_release((u8 *)&lock->cnts, 0);
+       smp_store_release(__qrwlock_write_byte(lock), 0);
 }
 
 /*
index 4348d6d5877a213b95c4b4e546287c9032b44a1a..99c6d01d24f2439f2c8134d546bf287c2da0496e 100644 (file)
@@ -962,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -976,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait_gpu,
+                          bool evict, bool interruptible, bool no_wait_gpu,
                           struct ttm_mem_reg *new_mem);
 
 /**
index 701b64a3b7c5e3e94b0487f0ec73801d06715817..89b65b82d98f5c5e77c34f967e856dcc6028dabe 100644 (file)
@@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
                  "Attempted to advance past end of bvec iter\n");
 
        while (bytes) {
-               unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+               unsigned iter_len = bvec_iter_len(bv, *iter);
+               unsigned len = min(bytes, iter_len);
 
                bytes -= len;
                iter->bi_size -= len;
index 420b837f2aa7b1e29f100a8e94e683d31bf71e2f..1f0be7213e6dfd6a03da165da1e32d93eab1e7df 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/device.h>
 #include <linux/types.h>
-#include <linux/module.h>
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
@@ -16,6 +15,7 @@ struct of_phandle_args;
 struct device_node;
 struct seq_file;
 struct gpio_device;
+struct module;
 
 #ifdef CONFIG_GPIOLIB
 
index 01e908ac4a39a7ed65a68a59f097b302a80c3b57..9c28b4d4c90b137ac72acad323ae2785e3d4dcce 100644 (file)
@@ -1113,8 +1113,20 @@ struct kvm_device {
 /* create, destroy, and name are mandatory */
 struct kvm_device_ops {
        const char *name;
+
+       /*
+        * create is called holding kvm->lock and any operations not suitable
+        * to do while holding the lock should be deferred to init (see
+        * below).
+        */
        int (*create)(struct kvm_device *dev, u32 type);
 
+       /*
+        * init is called after create if create is successful and is called
+        * outside of holding kvm->lock.
+        */
+       void (*init)(struct kvm_device *dev);
+
        /*
         * Destroy is responsible for freeing dev.
         *
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
new file mode 100644 (file)
index 0000000..83b1bd7
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Functions to access LP873X power management chip.
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_LP873X_H
+#define __LINUX_MFD_LP873X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/* LP873x chip id list */
+#define LP873X                 0x00
+
+/* All register addresses */
+#define LP873X_REG_DEV_REV             0X00
+#define LP873X_REG_OTP_REV             0X01
+#define LP873X_REG_BUCK0_CTRL_1                0X02
+#define LP873X_REG_BUCK0_CTRL_2                0X03
+#define LP873X_REG_BUCK1_CTRL_1                0X04
+#define LP873X_REG_BUCK1_CTRL_2                0X05
+#define LP873X_REG_BUCK0_VOUT          0X06
+#define LP873X_REG_BUCK1_VOUT          0X07
+#define LP873X_REG_LDO0_CTRL           0X08
+#define LP873X_REG_LDO1_CTRL            0X09
+#define LP873X_REG_LDO0_VOUT           0X0A
+#define LP873X_REG_LDO1_VOUT           0X0B
+#define LP873X_REG_BUCK0_DELAY         0X0C
+#define LP873X_REG_BUCK1_DELAY         0X0D
+#define LP873X_REG_LDO0_DELAY          0X0E
+#define LP873X_REG_LDO1_DELAY          0X0F
+#define LP873X_REG_GPO_DELAY           0X10
+#define LP873X_REG_GPO2_DELAY          0X11
+#define LP873X_REG_GPO_CTRL            0X12
+#define LP873X_REG_CONFIG              0X13
+#define LP873X_REG_PLL_CTRL            0X14
+#define LP873X_REG_PGOOD_CTRL1         0X15
+#define LP873X_REG_PGOOD_CTRL2         0X16
+#define LP873X_REG_PG_FAULT            0X17
+#define LP873X_REG_RESET               0X18
+#define LP873X_REG_INT_TOP_1           0X19
+#define LP873X_REG_INT_TOP_2           0X1A
+#define LP873X_REG_INT_BUCK            0X1B
+#define LP873X_REG_INT_LDO             0X1C
+#define LP873X_REG_TOP_STAT            0X1D
+#define LP873X_REG_BUCK_STAT           0X1E
+#define LP873X_REG_LDO_STAT            0x1F
+#define LP873X_REG_TOP_MASK_1          0x20
+#define LP873X_REG_TOP_MASK_2          0x21
+#define LP873X_REG_BUCK_MASK           0x22
+#define LP873X_REG_LDO_MASK            0x23
+#define LP873X_REG_SEL_I_LOAD          0x24
+#define LP873X_REG_I_LOAD_2            0x25
+#define LP873X_REG_I_LOAD_1            0x26
+
+#define LP873X_REG_MAX                 LP873X_REG_I_LOAD_1
+
+/* Register field definitions */
+#define LP873X_DEV_REV_DEV_ID                  0xC0
+#define LP873X_DEV_REV_ALL_LAYER               0x30
+#define LP873X_DEV_REV_METAL_LAYER             0x0F
+
+#define LP873X_OTP_REV_OTP_ID                  0xFF
+
+#define LP873X_BUCK0_CTRL_1_BUCK0_FPWM         BIT(3)
+#define LP873X_BUCK0_CTRL_1_BUCK0_RDIS_EN      BIT(2)
+#define LP873X_BUCK0_CTRL_1_BUCK0_EN_PIN_CTRL  BIT(1)
+#define LP873X_BUCK0_CTRL_1_BUCK0_EN           BIT(0)
+
+#define LP873X_BUCK0_CTRL_2_BUCK0_ILIM         0x38
+#define LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE    0x07
+
+#define LP873X_BUCK1_CTRL_1_BUCK1_FPWM         BIT(3)
+#define LP873X_BUCK1_CTRL_1_BUCK1_RDIS_EN      BIT(2)
+#define LP873X_BUCK1_CTRL_1_BUCK1_EN_PIN_CTRL  BIT(1)
+#define LP873X_BUCK1_CTRL_1_BUCK1_EN           BIT(0)
+
+#define LP873X_BUCK1_CTRL_2_BUCK1_ILIM         0x38
+#define LP873X_BUCK1_CTRL_2_BUCK1_SLEW_RATE    0x07
+
+#define LP873X_BUCK0_VOUT_BUCK0_VSET           0xFF
+
+#define LP873X_BUCK1_VOUT_BUCK1_VSET           0xFF
+
+#define LP873X_LDO0_CTRL_LDO0_RDIS_EN          BIT(2)
+#define LP873X_LDO0_CTRL_LDO0_EN_PIN_CTRL      BIT(1)
+#define LP873X_LDO0_CTRL_LDO0_EN               BIT(0)
+
+#define LP873X_LDO1_CTRL_LDO1_RDIS_EN          BIT(2)
+#define LP873X_LDO1_CTRL_LDO1_EN_PIN_CTRL      BIT(1)
+#define LP873X_LDO1_CTRL_LDO1_EN               BIT(0)
+
+#define LP873X_LDO0_VOUT_LDO0_VSET             0x1F
+
+#define LP873X_LDO1_VOUT_LDO1_VSET             0x1F
+
+#define LP873X_BUCK0_DELAY_BUCK0_SD_DELAY      0xF0
+#define LP873X_BUCK0_DELAY_BUCK0_SU_DELAY      0x0F
+
+#define LP873X_BUCK1_DELAY_BUCK1_SD_DELAY      0xF0
+#define LP873X_BUCK1_DELAY_BUCK1_SU_DELAY      0x0F
+
+#define LP873X_LDO0_DELAY_LDO0_SD_DELAY        0xF0
+#define LP873X_LDO0_DELAY_LDO0_SU_DELAY        0x0F
+
+#define LP873X_LDO1_DELAY_LDO1_SD_DELAY        0xF0
+#define LP873X_LDO1_DELAY_LDO1_SU_DELAY        0x0F
+
+#define LP873X_GPO_DELAY_GPO_SD_DELAY          0xF0
+#define LP873X_GPO_DELAY_GPO_SU_DELAY          0x0F
+
+#define LP873X_GPO2_DELAY_GPO2_SD_DELAY        0xF0
+#define LP873X_GPO2_DELAY_GPO2_SU_DELAY        0x0F
+
+#define LP873X_GPO_CTRL_GPO2_OD                BIT(6)
+#define LP873X_GPO_CTRL_GPO2_EN_PIN_CTRL       BIT(5)
+#define LP873X_GPO_CTRL_GPO2_EN                BIT(4)
+#define LP873X_GPO_CTRL_GPO_OD                 BIT(2)
+#define LP873X_GPO_CTRL_GPO_EN_PIN_CTRL        BIT(1)
+#define LP873X_GPO_CTRL_GPO_EN                 BIT(0)
+
+#define LP873X_CONFIG_SU_DELAY_SEL             BIT(6)
+#define LP873X_CONFIG_SD_DELAY_SEL             BIT(5)
+#define LP873X_CONFIG_CLKIN_PIN_SEL            BIT(4)
+#define LP873X_CONFIG_CLKIN_PD                 BIT(3)
+#define LP873X_CONFIG_EN_PD                    BIT(2)
+#define LP873X_CONFIG_TDIE_WARN_LEVEL          BIT(1)
+#define LP873X_EN_SPREAD_SPEC                  BIT(0)
+
+#define LP873X_PLL_CTRL_EN_PLL                 BIT(6)
+#define LP873X_EXT_CLK_FREQ                    0x1F
+
+#define LP873X_PGOOD_CTRL1_PGOOD_POL           BIT(7)
+#define LP873X_PGOOD_CTRL1_PGOOD_OD            BIT(6)
+#define LP873X_PGOOD_CTRL1_PGOOD_WINDOW_LDO    BIT(5)
+#define LP873X_PGOOD_CTRL1_PGOOD_WINDOWN_BUCK  BIT(4)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO1 BIT(3)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO0 BIT(2)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK1        BIT(1)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK0        BIT(0)
+
+#define LP873X_PGOOD_CTRL2_EN_PGOOD_TWARN      BIT(2)
+#define LP873X_PGOOD_CTRL2_EN_PG_FAULT_GATE    BIT(1)
+#define LP873X_PGOOD_CTRL2_PGOOD_MODE          BIT(0)
+
+#define LP873X_PG_FAULT_PG_FAULT_LDO1          BIT(3)
+#define LP873X_PG_FAULT_PG_FAULT_LDO0          BIT(2)
+#define LP873X_PG_FAULT_PG_FAULT_BUCK1         BIT(1)
+#define LP873X_PG_FAULT_PG_FAULT_BUCK0         BIT(0)
+
+#define LP873X_RESET_SW_RESET                  BIT(0)
+
+#define LP873X_INT_TOP_1_PGOOD_INT             BIT(7)
+#define LP873X_INT_TOP_1_LDO_INT               BIT(6)
+#define LP873X_INT_TOP_1_BUCK_INT              BIT(5)
+#define LP873X_INT_TOP_1_SYNC_CLK_INT          BIT(4)
+#define LP873X_INT_TOP_1_TDIE_SD_INT           BIT(3)
+#define LP873X_INT_TOP_1_TDIE_WARN_INT         BIT(2)
+#define LP873X_INT_TOP_1_OVP_INT               BIT(1)
+#define LP873X_INT_TOP_1_I_MEAS_INT            BIT(0)
+
+#define LP873X_INT_TOP_2_RESET_REG_INT         BIT(0)
+
+#define LP873X_INT_BUCK_BUCK1_PG_INT           BIT(6)
+#define LP873X_INT_BUCK_BUCK1_SC_INT           BIT(5)
+#define LP873X_INT_BUCK_BUCK1_ILIM_INT         BIT(4)
+#define LP873X_INT_BUCK_BUCK0_PG_INT           BIT(2)
+#define LP873X_INT_BUCK_BUCK0_SC_INT           BIT(1)
+#define LP873X_INT_BUCK_BUCK0_ILIM_INT         BIT(0)
+
+#define LP873X_INT_LDO_LDO1_PG_INT             BIT(6)
+#define LP873X_INT_LDO_LDO1_SC_INT             BIT(5)
+#define LP873X_INT_LDO_LDO1_ILIM_INT           BIT(4)
+#define LP873X_INT_LDO_LDO0_PG_INT             BIT(2)
+#define LP873X_INT_LDO_LDO0_SC_INT             BIT(1)
+#define LP873X_INT_LDO_LDO0_ILIM_INT           BIT(0)
+
+#define LP873X_TOP_STAT_PGOOD_STAT             BIT(7)
+#define LP873X_TOP_STAT_SYNC_CLK_STAT          BIT(4)
+#define LP873X_TOP_STAT_TDIE_SD_STAT           BIT(3)
+#define LP873X_TOP_STAT_TDIE_WARN_STAT         BIT(2)
+#define LP873X_TOP_STAT_OVP_STAT               BIT(1)
+
+#define LP873X_BUCK_STAT_BUCK1_STAT            BIT(7)
+#define LP873X_BUCK_STAT_BUCK1_PG_STAT         BIT(6)
+#define LP873X_BUCK_STAT_BUCK1_ILIM_STAT       BIT(4)
+#define LP873X_BUCK_STAT_BUCK0_STAT            BIT(3)
+#define LP873X_BUCK_STAT_BUCK0_PG_STAT         BIT(2)
+#define LP873X_BUCK_STAT_BUCK0_ILIM_STAT       BIT(0)
+
+#define LP873X_LDO_STAT_LDO1_STAT              BIT(7)
+#define LP873X_LDO_STAT_LDO1_PG_STAT           BIT(6)
+#define LP873X_LDO_STAT_LDO1_ILIM_STAT         BIT(4)
+#define LP873X_LDO_STAT_LDO0_STAT              BIT(3)
+#define LP873X_LDO_STAT_LDO0_PG_STAT           BIT(2)
+#define LP873X_LDO_STAT_LDO0_ILIM_STAT         BIT(0)
+
+#define LP873X_TOP_MASK_1_PGOOD_INT_MASK       BIT(7)
+#define LP873X_TOP_MASK_1_SYNC_CLK_MASK        BIT(4)
+#define LP873X_TOP_MASK_1_TDIE_WARN_MASK       BIT(2)
+#define LP873X_TOP_MASK_1_I_MEAS_MASK          BIT(0)
+
+#define LP873X_TOP_MASK_2_RESET_REG_MASK       BIT(0)
+
+#define LP873X_BUCK_MASK_BUCK1_PGF_MASK        BIT(7)
+#define LP873X_BUCK_MASK_BUCK1_PGR_MASK        BIT(6)
+#define LP873X_BUCK_MASK_BUCK1_ILIM_MASK       BIT(4)
+#define LP873X_BUCK_MASK_BUCK0_PGF_MASK        BIT(3)
+#define LP873X_BUCK_MASK_BUCK0_PGR_MASK        BIT(2)
+#define LP873X_BUCK_MASK_BUCK0_ILIM_MASK       BIT(0)
+
+#define LP873X_LDO_MASK_LDO1_PGF_MASK          BIT(7)
+#define LP873X_LDO_MASK_LDO1_PGR_MASK          BIT(6)
+#define LP873X_LDO_MASK_LDO1_ILIM_MASK         BIT(4)
+#define LP873X_LDO_MASK_LDO0_PGF_MASK          BIT(3)
+#define LP873X_LDO_MASK_LDO0_PGR_MASK          BIT(2)
+#define LP873X_LDO_MASK_LDO0_ILIM_MASK         BIT(0)
+
+#define LP873X_SEL_I_LOAD_CURRENT_BUCK_SELECT  BIT(0)
+
+#define LP873X_I_LOAD_2_BUCK_LOAD_CURRENT      BIT(0)
+
+#define LP873X_I_LOAD_1_BUCK_LOAD_CURRENT      0xFF
+
+#define LP873X_MAX_REG_ID              LP873X_LDO_1
+
+/* Number of step-down converters available */
+#define LP873X_NUM_BUCK                2
+/* Number of LDO voltage regulators available */
+#define LP873X_NUM_LDO         2
+/* Number of total regulators available */
+#define LP873X_NUM_REGULATOR           (LP873X_NUM_BUCK + LP873X_NUM_LDO)
+
+enum lp873x_regulator_id {
+       /* BUCK's */
+       LP873X_BUCK_0,
+       LP873X_BUCK_1,
+       /* LDOs */
+       LP873X_LDO_0,
+       LP873X_LDO_1,
+};
+
+/**
+ * struct lp873x - state holder for the lp873x driver
+ * @dev: struct device pointer for MFD device
+ * @rev: revision of the lp873x
+ * @lock: lock guarding the data structure
+ * @regmap: register map of the lp873x PMIC
+ *
+ * Device data may be used to access the LP873X chip
+ */
+struct lp873x {
+       struct device *dev;
+       u8 rev;
+       struct mutex lock;      /* lock guarding the data structure */
+       struct regmap *regmap;
+};
+#endif /* __LINUX_MFD_LP873X_H */
index de748bc7525e56b9555bd41c35cb737e32929f48..4a827af17e598792264662beb2ed86260dfbe3eb 100644 (file)
@@ -26,6 +26,7 @@ enum stmpe_partnum {
        STMPE610,
        STMPE801,
        STMPE811,
+       STMPE1600,
        STMPE1601,
        STMPE1801,
        STMPE2401,
@@ -39,22 +40,42 @@ enum stmpe_partnum {
  */
 enum {
        STMPE_IDX_CHIP_ID,
+       STMPE_IDX_SYS_CTRL,
+       STMPE_IDX_SYS_CTRL2,
        STMPE_IDX_ICR_LSB,
        STMPE_IDX_IER_LSB,
+       STMPE_IDX_IER_MSB,
        STMPE_IDX_ISR_LSB,
        STMPE_IDX_ISR_MSB,
        STMPE_IDX_GPMR_LSB,
+       STMPE_IDX_GPMR_CSB,
+       STMPE_IDX_GPMR_MSB,
        STMPE_IDX_GPSR_LSB,
+       STMPE_IDX_GPSR_CSB,
+       STMPE_IDX_GPSR_MSB,
        STMPE_IDX_GPCR_LSB,
+       STMPE_IDX_GPCR_CSB,
+       STMPE_IDX_GPCR_MSB,
        STMPE_IDX_GPDR_LSB,
+       STMPE_IDX_GPDR_CSB,
+       STMPE_IDX_GPDR_MSB,
+       STMPE_IDX_GPEDR_LSB,
+       STMPE_IDX_GPEDR_CSB,
        STMPE_IDX_GPEDR_MSB,
        STMPE_IDX_GPRER_LSB,
+       STMPE_IDX_GPRER_CSB,
+       STMPE_IDX_GPRER_MSB,
        STMPE_IDX_GPFER_LSB,
+       STMPE_IDX_GPFER_CSB,
+       STMPE_IDX_GPFER_MSB,
        STMPE_IDX_GPPUR_LSB,
        STMPE_IDX_GPPDR_LSB,
        STMPE_IDX_GPAFR_U_MSB,
        STMPE_IDX_IEGPIOR_LSB,
+       STMPE_IDX_IEGPIOR_CSB,
+       STMPE_IDX_IEGPIOR_MSB,
        STMPE_IDX_ISGPIOR_LSB,
+       STMPE_IDX_ISGPIOR_CSB,
        STMPE_IDX_ISGPIOR_MSB,
        STMPE_IDX_MAX,
 };
index f2e4e90621ec25c237703bc5b1ecba814d1afce9..d572b78b65e14709d5fb8d8a60ee9d71147d0d81 100644 (file)
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
 
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
 #else
 #  define is_migrate_cma(migratetype) false
+#  define is_migrate_cma_page(_page) false
 #endif
 
 #define for_each_migratetype_order(order, type) \
index 4f0bfe5912b2f1eb9be7a5c25f4e1ec64c2fafbe..e8c81fbd5f9cd11d4ac01adb929c7d1eee86e69e 100644 (file)
@@ -270,6 +270,8 @@ enum {
        MSI_FLAG_MULTI_PCI_MSI          = (1 << 2),
        /* Support PCI MSIX interrupts */
        MSI_FLAG_PCI_MSIX               = (1 << 3),
+       /* Needs early activate, required for PCI */
+       MSI_FLAG_ACTIVATE_EARLY         = (1 << 4),
 };
 
 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
index 8ed4326164cc843b41da6fbfe69d85cee2d61232..2b6b43cc0dd5121d8d4f6024f8ec67f862ff3328 100644 (file)
@@ -743,7 +743,9 @@ struct perf_event_context {
        u64                             parent_gen;
        u64                             generation;
        int                             pin_count;
+#ifdef CONFIG_CGROUP_PERF
        int                             nr_cgroups;      /* cgroup evts */
+#endif
        void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
 };
@@ -769,7 +771,9 @@ struct perf_cpu_context {
        unsigned int                    hrtimer_active;
 
        struct pmu                      *unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
        struct perf_cgroup              *cgrp;
+#endif
 };
 
 struct perf_output_handle {
diff --git a/include/linux/platform_data/gpio-lpc32xx.h b/include/linux/platform_data/gpio-lpc32xx.h
deleted file mode 100644 (file)
index a544e96..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_GPIO_LPC32XX_H
-#define __MACH_GPIO_LPC32XX_H
-
-/*
- * Note!
- * Muxed GP pins need to be setup to the GP state in the board level
- * code prior to using this driver.
- * GPI pins : 28xP3 group
- * GPO pins : 24xP3 group
- * GPIO pins: 8xP0 group, 24xP1 group, 13xP2 group, 6xP3 group
- */
-
-#define LPC32XX_GPIO_P0_MAX 8
-#define LPC32XX_GPIO_P1_MAX 24
-#define LPC32XX_GPIO_P2_MAX 13
-#define LPC32XX_GPIO_P3_MAX 6
-#define LPC32XX_GPI_P3_MAX 29
-#define LPC32XX_GPO_P3_MAX 24
-
-#define LPC32XX_GPIO_P0_GRP 0
-#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
-#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
-#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
-#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
-#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
-
-/*
- * A specific GPIO can be selected with this macro
- * ie, GPIO_05 can be selected with LPC32XX_GPIO(LPC32XX_GPIO_P3_GRP, 5)
- * See the LPC32x0 User's guide for GPIO group numbers
- */
-#define LPC32XX_GPIO(x, y) ((x) + (y))
-
-#endif /* __MACH_GPIO_LPC32XX_H */
index 8dc155dab3ed4950ef7f2ce6bfb737b3617b0e68..696a56be7d3e2ba0f54d92ac059941470b228ccc 100644 (file)
@@ -266,39 +266,21 @@ extern asmlinkage void dump_stack(void) __cold;
  * and other debug macros are compiled out unless either DEBUG is defined
  * or CONFIG_DYNAMIC_DEBUG is set.
  */
-
-#ifdef CONFIG_PRINTK
-
-asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...);
-
-#define pr_emerg(fmt, ...)     __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...)     __pr_alert(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...)      __pr_crit(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...)       __pr_err(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...)      __pr_warn(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...)    __pr_notice(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...)      __pr_info(pr_fmt(fmt), ##__VA_ARGS__)
-
-#else
-
-#define pr_emerg(fmt, ...)     printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...)     printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...)      printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...)       printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...)      printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...)    printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...)      printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-#endif
-
-#define pr_warning pr_warn
-
+#define pr_emerg(fmt, ...) \
+       printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+       printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+       printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+       printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+       printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_notice(fmt, ...) \
+       printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+       printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 /*
  * Like KERN_CONT, pr_cont() should only be used when continuing
  * a line with no newline ('\n') enclosed. Otherwise it defaults
index 1a4ea551aae51c08fd70e4dd4693e97ca56f01de..4293808d8cfb5d4a473f220735e53da87390998b 100644 (file)
@@ -155,6 +155,18 @@ void kfree(const void *);
 void kzfree(const void *);
 size_t ksize(const void *);
 
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+                                             unsigned long n,
+                                             struct page *page)
+{
+       return NULL;
+}
+#endif
+
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  * alignment larger than the alignment of a 64-bit integer.
index b6810c92b8bb14d9ef8c9d1036a0b25d40530ac3..5c02b0691587797e303758eb74a4cad7d5e577ff 100644 (file)
@@ -195,6 +195,8 @@ int         rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
                                struct rpc_xprt *,
                                void *),
                        void *data);
+void           rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+                       unsigned long timeo);
 
 const char *rpc_proc_name(const struct rpc_task *task);
 #endif /* __KERNEL__ */
index 5e3e1b63dbb3c97f0145bd15a23dba7d4e916528..a16070dd03eefe9281476183ff4b5a0692523a09 100644 (file)
@@ -218,7 +218,8 @@ struct rpc_xprt {
        struct work_struct      task_cleanup;
        struct timer_list       timer;
        unsigned long           last_used,
-                               idle_timeout;
+                               idle_timeout,
+                               max_reconnect_timeout;
 
        /*
         * Send stuff
index 352b1542f5cc21953c037f499d9b48f03ee98314..cbd8990e2e77e6ffa49f45611eb6710980fddd23 100644 (file)
@@ -105,6 +105,30 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 
 #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
 
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+                                       bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{
+       __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 349557825428e9b3d815e0bc2781d5d79b172d47..f30c187ed785366231e318a7beab151e0bba64b6 100644 (file)
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 #ifndef user_access_begin
 #define user_access_begin() do { } while (0)
 #define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
 #endif
 
 #endif         /* __LINUX_UACCESS_H__ */
index 51440131d3372feb1f09b7139362db66119f0e2a..28c5da6fdfac762e64f3d7ae26edcc9adbefe9ab 100644 (file)
@@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire,
 #ifdef CONFIG_NO_HZ_COMMON
 
 #define TICK_DEP_NAMES                                 \
-               tick_dep_name(NONE)                     \
+               tick_dep_mask_name(NONE)                \
                tick_dep_name(POSIX_TIMER)              \
                tick_dep_name(PERF_EVENTS)              \
                tick_dep_name(SCHED)                    \
                tick_dep_name_end(CLOCK_UNSTABLE)
 
 #undef tick_dep_name
+#undef tick_dep_mask_name
 #undef tick_dep_name_end
 
-#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
-#define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* The MASK will convert to their bits and they need to be processed too */
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+       TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+       TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* NONE only has a mask defined for it */
+#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
 
 TICK_DEP_NAMES
 
 #undef tick_dep_name
+#undef tick_dep_mask_name
 #undef tick_dep_name_end
 
 #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
 #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
 
 #define show_tick_dep_name(val)                                \
index 6b011c19b50f969d66d3ec0b26d7036d6bac7525..1d57ed3d84d2c3d10d02dce56d728b9cf0ece17b 100644 (file)
@@ -32,7 +32,7 @@
  */
 
 #ifndef _UAPI_LINUX_VIRTIO_VSOCK_H
-#define _UAPI_LINUX_VIRTIO_VOSCK_H
+#define _UAPI_LINUX_VIRTIO_VSOCK_H
 
 #include <linux/types.h>
 #include <linux/virtio_ids.h>
index cbae529b7ce0999684c42d7906df3de3febdc209..180d526a55c3ab5b54befbb264279e61d7e5a519 100644 (file)
@@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved {
         *
         * Of course the contents will be ABI, but that's up the AFU driver.
         */
-       size_t data_size;
-       u8 data[];
+       __u32 data_size;
+       __u8 data[];
 };
 
 struct cxl_event {
index 69886493ff1e3d31daa02d909447c744352b38ff..cac3f096050d5b27a9bd5de8600469f3c5aa0ada 100644 (file)
@@ -1761,6 +1761,7 @@ choice
 
 config SLAB
        bool "SLAB"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
          The regular slab allocator that is established and known to work
          well in all environments. It organizes cache hot objects in
@@ -1768,6 +1769,7 @@ config SLAB
 
 config SLUB
        bool "SLUB (Unqueued Allocator)"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
           SLUB is a slab allocator that minimizes cache line usage
           instead of managing queues of cached objects (SLAB approach).
index a19550d80ab1724d03ac1b0799aad54a6f1cf823..1903b8f3a7057b5687f67bbe191758951d209968 100644 (file)
@@ -843,6 +843,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
                }
        }
 }
+
+/*
+ * Update cpuctx->cgrp so that it is set when first cgroup event is added and
+ * cleared when last cgroup event is removed.
+ */
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+                        struct perf_event_context *ctx, bool add)
+{
+       struct perf_cpu_context *cpuctx;
+
+       if (!is_cgroup_event(event))
+               return;
+
+       if (add && ctx->nr_cgroups++)
+               return;
+       else if (!add && --ctx->nr_cgroups)
+               return;
+       /*
+        * Because cgroup events are always per-cpu events,
+        * this will always be called from the right CPU.
+        */
+       cpuctx = __get_cpu_context(ctx);
+       cpuctx->cgrp = add ? event->cgrp : NULL;
+}
+
 #else /* !CONFIG_CGROUP_PERF */
 
 static inline bool
@@ -920,6 +946,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
                         struct perf_event_context *ctx)
 {
 }
+
+static inline void
+list_update_cgroup_event(struct perf_event *event,
+                        struct perf_event_context *ctx, bool add)
+{
+}
+
 #endif
 
 /*
@@ -1392,6 +1425,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
+
        lockdep_assert_held(&ctx->lock);
 
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1412,8 +1446,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
                list_add_tail(&event->group_entry, list);
        }
 
-       if (is_cgroup_event(event))
-               ctx->nr_cgroups++;
+       list_update_cgroup_event(event, ctx, true);
 
        list_add_rcu(&event->event_entry, &ctx->event_list);
        ctx->nr_events++;
@@ -1581,8 +1614,6 @@ static void perf_group_attach(struct perf_event *event)
 static void
 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-       struct perf_cpu_context *cpuctx;
-
        WARN_ON_ONCE(event->ctx != ctx);
        lockdep_assert_held(&ctx->lock);
 
@@ -1594,20 +1625,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
        event->attach_state &= ~PERF_ATTACH_CONTEXT;
 
-       if (is_cgroup_event(event)) {
-               ctx->nr_cgroups--;
-               /*
-                * Because cgroup events are always per-cpu events, this will
-                * always be called from the right CPU.
-                */
-               cpuctx = __get_cpu_context(ctx);
-               /*
-                * If there are no more cgroup events then clear cgrp to avoid
-                * stale pointer in update_cgrp_time_from_cpuctx().
-                */
-               if (!ctx->nr_cgroups)
-                       cpuctx->cgrp = NULL;
-       }
+       list_update_cgroup_event(event, ctx, false);
 
        ctx->nr_events--;
        if (event->attr.inherit_stat)
@@ -1716,8 +1734,8 @@ static inline int pmu_filter_match(struct perf_event *event)
 static inline int
 event_filter_match(struct perf_event *event)
 {
-       return (event->cpu == -1 || event->cpu == smp_processor_id())
-           && perf_cgroup_match(event) && pmu_filter_match(event);
+       return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
+              perf_cgroup_match(event) && pmu_filter_match(event);
 }
 
 static void
@@ -1737,8 +1755,8 @@ event_sched_out(struct perf_event *event,
         * maintained, otherwise bogus information is return
         * via read() for time_enabled, time_running:
         */
-       if (event->state == PERF_EVENT_STATE_INACTIVE
-           && !event_filter_match(event)) {
+       if (event->state == PERF_EVENT_STATE_INACTIVE &&
+           !event_filter_match(event)) {
                delta = tstamp - event->tstamp_stopped;
                event->tstamp_running += delta;
                event->tstamp_stopped = tstamp;
@@ -2236,10 +2254,15 @@ perf_install_in_context(struct perf_event_context *ctx,
 
        lockdep_assert_held(&ctx->mutex);
 
-       event->ctx = ctx;
        if (event->cpu != -1)
                event->cpu = cpu;
 
+       /*
+        * Ensures that if we can observe event->ctx, both the event and ctx
+        * will be 'complete'. See perf_iterate_sb_cpu().
+        */
+       smp_store_release(&event->ctx, ctx);
+
        if (!task) {
                cpu_function_call(cpu, __perf_install_in_context, event);
                return;
@@ -5969,6 +5992,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
        struct perf_event *event;
 
        list_for_each_entry_rcu(event, &pel->list, sb_list) {
+               /*
+                * Skip events that are not fully formed yet; ensure that
+                * if we observe event->ctx, both event and ctx will be
+                * complete enough. See perf_install_in_context().
+                */
+               if (!smp_load_acquire(&event->ctx))
+                       continue;
+
                if (event->state < PERF_EVENT_STATE_INACTIVE)
                        continue;
                if (!event_filter_match(event))
index 33664f70e2d25e880efdbc8695fcc12989871150..46cb3a301bc1555a84607bab7b2aea8a2bcaf7e6 100644 (file)
@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled;
  * Futex flags used to encode options to functions and preserve them across
  * restarts.
  */
-#define FLAGS_SHARED           0x01
+#ifdef CONFIG_MMU
+# define FLAGS_SHARED          0x01
+#else
+/*
+ * NOMMU does not have per process address space. Let the compiler optimize
+ * code away.
+ */
+# define FLAGS_SHARED          0x00
+#endif
 #define FLAGS_CLOCKRT          0x02
 #define FLAGS_HAS_TIMEOUT      0x04
 
@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key)
        if (!key->both.ptr)
                return;
 
+       /*
+        * On MMU less systems futexes are always "private" as there is no per
+        * process address space. We need the smp wmb nevertheless - yes,
+        * arch/blackfin has MMU less SMP ...
+        */
+       if (!IS_ENABLED(CONFIG_MMU)) {
+               smp_mb(); /* explicit smp_mb(); (B) */
+               return;
+       }
+
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
                ihold(key->shared.inode); /* implies smp_mb(); (B) */
@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key)
                return;
        }
 
+       if (!IS_ENABLED(CONFIG_MMU))
+               return;
+
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
                iput(key->shared.inode);
index 54999350162cbc89326a67a3728814e17871d784..19e9dfbe97fa53f732edd375cdd50341327750ac 100644 (file)
@@ -359,6 +359,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
                else
                        dev_dbg(dev, "irq [%d-%d] for MSI\n",
                                virq, virq + desc->nvec_used - 1);
+               /*
+                * This flag is set by the PCI layer as we need to activate
+                * the MSI entries before the PCI layer enables MSI in the
+                * card. Otherwise the card latches a random msi message.
+                */
+               if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
+                       struct irq_data *irq_data;
+
+                       irq_data = irq_domain_get_irq_data(domain, desc->irq);
+                       irq_domain_activate_irq(irq_data);
+               }
        }
 
        return 0;
index 37649e69056cf974e27d0137260f8ff46ad688df..8a99abf58080be21fbb954777b48aca24d4342b5 100644 (file)
@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
                                goto gotlock;
                        }
                }
-               WRITE_ONCE(pn->state, vcpu_halted);
+               WRITE_ONCE(pn->state, vcpu_hashed);
                qstat_inc(qstat_pv_wait_head, true);
                qstat_inc(qstat_pv_wait_again, waitcnt);
                pv_wait(&l->locked, _Q_SLOW_VAL);
index 22e02530984574a6fee718aad26720516c34d497..b9d0315162540d1236e5e1268f184531d8259114 100644 (file)
@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
                 */
                if ((counter == qstat_pv_latency_kick) ||
                    (counter == qstat_pv_latency_wake)) {
-                       stat = 0;
                        if (kicks)
                                stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
                }
index a881c6a7ba74020db9d4fee663445112af0edcc8..33c79b6105c55fdc4f228bb0348fbc2b92d68f0f 100644 (file)
@@ -300,12 +300,12 @@ static int create_image(int platform_mode)
        save_processor_state();
        trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
        error = swsusp_arch_suspend();
+       /* Restore control flow magically appears here */
+       restore_processor_state();
        trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
        if (error)
                printk(KERN_ERR "PM: Error %d creating hibernation image\n",
                        error);
-       /* Restore control flow magically appears here */
-       restore_processor_state();
        if (!in_suspend)
                events_check_enabled = false;
 
index 5d4505f30083550e59337fe5d90cf14f783573c0..7fd2838fa41748006cebf14ed7432739f3138301 100644 (file)
  */
 #include <linux/percpu.h>
 
-typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt,
-                                           va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
 
-__printf(2, 0)
-int vprintk_default(int level, const char *fmt, va_list args);
+int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
 
 #ifdef CONFIG_PRINTK_NMI
 
@@ -33,10 +31,9 @@ extern raw_spinlock_t logbuf_lock;
  * via per-CPU variable.
  */
 DECLARE_PER_CPU(printk_func_t, printk_func);
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
-       return this_cpu_read(printk_func)(level, fmt, args);
+       return this_cpu_read(printk_func)(fmt, args);
 }
 
 extern atomic_t nmi_message_lost;
@@ -47,10 +44,9 @@ static inline int get_nmi_message_lost(void)
 
 #else /* CONFIG_PRINTK_NMI */
 
-__printf(2, 0)
-static inline int vprintk_func(int level, const char *fmt, va_list args)
+static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
-       return vprintk_default(level, fmt, args);
+       return vprintk_default(fmt, args);
 }
 
 static inline int get_nmi_message_lost(void)
index bc3eeb1ae6da6b58a1aed09372f70fc8c3fda088..b69eb8a2876fc2ec7ba1af9e5cbc000737cd47fd 100644 (file)
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
  * one writer running. But the buffer might get flushed from another
  * CPU, so we need to be careful.
  */
-static int vprintk_nmi(int level, const char *fmt, va_list args)
+static int vprintk_nmi(const char *fmt, va_list args)
 {
        struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
        int add = 0;
@@ -79,16 +79,7 @@ again:
        if (!len)
                smp_rmb();
 
-       if (level != LOGLEVEL_DEFAULT) {
-               add = snprintf(s->buffer + len, sizeof(s->buffer) - len,
-                               KERN_SOH "%c", '0' + level);
-               add += vsnprintf(s->buffer + len + add,
-                                sizeof(s->buffer) - len - add,
-                                fmt, args);
-       } else {
-               add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len,
-                               fmt, args);
-       }
+       add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
 
        /*
         * Do it once again if the buffer has been flushed in the meantime.
index a5ef95ca18c9d945e7d8a826dacd27c7f7ebe008..eea6dbc2d8cf6ffc4d71729eb270a659b0a8c77d 100644 (file)
@@ -1930,28 +1930,7 @@ asmlinkage int printk_emit(int facility, int level,
 }
 EXPORT_SYMBOL(printk_emit);
 
-#ifdef CONFIG_PRINTK
-#define define_pr_level(func, loglevel)                                \
-asmlinkage __visible void func(const char *fmt, ...)           \
-{                                                              \
-       va_list args;                                           \
-                                                               \
-       va_start(args, fmt);                                    \
-       vprintk_default(loglevel, fmt, args);                   \
-       va_end(args);                                           \
-}                                                              \
-EXPORT_SYMBOL(func)
-
-define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
-define_pr_level(__pr_alert, LOGLEVEL_ALERT);
-define_pr_level(__pr_crit, LOGLEVEL_CRIT);
-define_pr_level(__pr_err, LOGLEVEL_ERR);
-define_pr_level(__pr_warn, LOGLEVEL_WARNING);
-define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
-define_pr_level(__pr_info, LOGLEVEL_INFO);
-#endif
-
-int vprintk_default(int level, const char *fmt, va_list args)
+int vprintk_default(const char *fmt, va_list args)
 {
        int r;
 
@@ -1961,7 +1940,7 @@ int vprintk_default(int level, const char *fmt, va_list args)
                return r;
        }
 #endif
-       r = vprintk_emit(0, level, NULL, 0, fmt, args);
+       r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
 
        return r;
 }
@@ -1994,7 +1973,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
        int r;
 
        va_start(args, fmt);
-       r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args);
+       r = vprintk_func(fmt, args);
        va_end(args);
 
        return r;
index 5c883fe8e44016df1109e8f66dd73377dfecb5e9..2a906f20fba7c4ba63d89fe87ac3527607be453a 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
 #include <linux/frame.h>
+#include <linux/prefetch.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -2971,6 +2972,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
+/*
+ * The function fair_sched_class.update_curr accesses the struct curr
+ * and its field curr->exec_start; when called from task_sched_runtime(),
+ * we observe a high rate of cache misses in practice.
+ * Prefetching this data results in improved performance.
+ */
+static inline void prefetch_curr_exec_start(struct task_struct *p)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       struct sched_entity *curr = (&p->se)->cfs_rq->curr;
+#else
+       struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
+#endif
+       prefetch(curr);
+       prefetch(&curr->exec_start);
+}
+
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * thread, breaking clock_gettime().
         */
        if (task_current(rq, p) && task_on_rq_queued(p)) {
+               prefetch_curr_exec_start(p);
                update_rq_clock(rq);
                p->sched_class->update_curr(rq);
        }
index 5be58820465cced6c0d1dc06c9de146bddcf664f..d4184498c9f5e3c8674015f97fe04da2417dafbd 100644 (file)
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
 
        if (old_idx == IDX_INVALID) {
                cp->size++;
-               cp->elements[cp->size - 1].dl = 0;
+               cp->elements[cp->size - 1].dl = dl;
                cp->elements[cp->size - 1].cpu = cpu;
                cp->elements[cpu].idx = cp->size - 1;
                cpudl_change_key(cp, cp->size - 1, dl);
index 1934f658c03604272e5809f32fee1a6a3c928990..9858266fb0b32b07158b2b9e8f66e7fa2f482eb7 100644 (file)
@@ -508,13 +508,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
  */
 void account_idle_ticks(unsigned long ticks)
 {
+       cputime_t cputime, steal;
 
        if (sched_clock_irqtime) {
                irqtime_account_idle_ticks(ticks);
                return;
        }
 
-       account_idle_time(jiffies_to_cputime(ticks));
+       cputime = jiffies_to_cputime(ticks);
+       steal = steal_account_process_time(cputime);
+
+       if (steal >= cputime)
+               return;
+
+       cputime -= steal;
+       account_idle_time(cputime);
 }
 
 /*
index fcb7f0217ff48610cca9bd5bd078f2f05df79164..1ce8867283dcde6e35ef74a72a1bca968decb918 100644 (file)
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
         *
         * XXX figure out if select_task_rq_dl() deals with offline cpus.
         */
-       if (unlikely(!rq->online))
+       if (unlikely(!rq->online)) {
+               lockdep_unpin_lock(&rq->lock, rf.cookie);
                rq = dl_task_offline_migration(rq, p);
+               rf.cookie = lockdep_pin_lock(&rq->lock);
+       }
 
        /*
         * Queueing this task back might have overloaded rq, check if we need
index 4088eedea7637859844c777dfa56dfb23136c142..039de34f15216d19f61386b6d6c66744660516c9 100644 (file)
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
        pcfs_rq = tg->parent->cfs_rq[cpu];
 
        cfs_rq->throttle_count = pcfs_rq->throttle_count;
-       pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+       cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
index 555670a5143c61bed5e7015f4f13849240be37b9..32bf6f75a8fec255c6d5fbf38e9fecd9e1e848fa 100644 (file)
@@ -1496,6 +1496,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
        u64 expires = KTIME_MAX;
        unsigned long nextevt;
+       bool is_max_delta;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1506,6 +1507,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 
        spin_lock(&base->lock);
        nextevt = __next_timer_interrupt(base);
+       is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
        base->next_expiry = nextevt;
        /*
         * We have a fresh next event. Check whether we can forward the base:
@@ -1519,7 +1521,8 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
                expires = basem;
                base->is_idle = false;
        } else {
-               expires = basem + (nextevt - basej) * TICK_NSEC;
+               if (!is_max_delta)
+                       expires = basem + (nextevt - basej) * TICK_NSEC;
                /*
                 * If we expect to sleep more than a tick, mark the base idle:
                 */
index 33f655ef48cd1668604a1f60c4a97e4acbf52508..9c5fe81104135364bca9f2b0da47f4e2a1ed51fc 100644 (file)
@@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
                unsigned long c, data;
 
                /* Fall back to byte-at-a-time if we get a page fault */
-               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
-                       break;
+               unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
                *(unsigned long *)(dst+res) = c;
                if (has_zero(c, &data, &constants)) {
                        data = prep_zero_mask(c, data, &constants);
@@ -56,8 +56,7 @@ byte_at_a_time:
        while (max) {
                char c;
 
-               if (unlikely(unsafe_get_user(c,src+res)))
-                       return -EFAULT;
+               unsafe_get_user(c,src+res, efault);
                dst[res] = c;
                if (!c)
                        return res;
@@ -76,6 +75,7 @@ byte_at_a_time:
         * Nope: we hit the address space limit, and we still had more
         * characters the caller would have wanted. That's an EFAULT.
         */
+efault:
        return -EFAULT;
 }
 
index 2625943625d7fb229e6e2cf104e5d84c95246ffa..8e105ed4df12bb6bb0a170afff54d979c15d73c0 100644 (file)
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        src -= align;
        max += align;
 
-       if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
-               return 0;
+       unsafe_get_user(c, (unsigned long __user *)src, efault);
        c |= aligned_byte_mask(align);
 
        for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
                if (unlikely(max <= sizeof(unsigned long)))
                        break;
                max -= sizeof(unsigned long);
-               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
-                       return 0;
+               unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
        }
        res -= align;
 
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
         * Nope: we hit the address space limit, and we still had more
         * characters the caller would have wanted. That's 0.
         */
+efault:
        return 0;
 }
 
index fc059666c760e179db0a070926a1e8204f5e9c92..2ca1faf3fa09038feaeea4fb4adbe5ea6717df30 100644 (file)
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
 KCOV_INSTRUMENT_mmzone.o := n
 KCOV_INSTRUMENT_vmstat.o := n
 
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := gup.o highmem.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
 obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
index b9aa1b0b38b0ecdb769cd33a8d77c0ffda2621bf..87e11d8ad536b8c360740ca9ce96461b0daeaaee 100644 (file)
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
                list_del(&page->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
+               h->max_huge_pages--;
                update_and_free_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
index b6728a33a4aca104fde8022b90fdf2df5630af31..baabaad4a4aaa89bb13fc691cf5df58af46c8b3b 100644 (file)
@@ -217,11 +217,8 @@ void quarantine_reduce(void)
        new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
                QUARANTINE_FRACTION;
        percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
-       if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
-               "Too little memory, disabling global KASAN quarantine.\n"))
-               new_quarantine_size = 0;
-       else
-               new_quarantine_size -= percpu_quarantines;
+       new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
+               0 : new_quarantine_size - percpu_quarantines;
        WRITE_ONCE(quarantine_size, new_quarantine_size);
 
        last = global_quarantine.head;
index 66beca1ad92ffcc16c3636c4e7e54c46bc89cc3f..2ff0289ad061322298b472edba9eebb9abb302a7 100644 (file)
@@ -2337,8 +2337,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
                return 0;
 
        memcg = get_mem_cgroup_from_mm(current->mm);
-       if (!mem_cgroup_is_root(memcg))
+       if (!mem_cgroup_is_root(memcg)) {
                ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+               if (!ret)
+                       __SetPageKmemcg(page);
+       }
        css_put(&memcg->css);
        return ret;
 }
@@ -2365,6 +2368,11 @@ void memcg_kmem_uncharge(struct page *page, int order)
                page_counter_uncharge(&memcg->memsw, nr_pages);
 
        page->mem_cgroup = NULL;
+
+       /* slab pages do not have PageKmemcg flag set */
+       if (PageKmemcg(page))
+               __ClearPageKmemcg(page);
+
        css_put_many(&memcg->css, nr_pages);
 }
 #endif /* !CONFIG_SLOB */
@@ -4069,14 +4077,32 @@ static struct cftype mem_cgroup_legacy_files[] = {
 
 static DEFINE_IDR(mem_cgroup_idr);
 
-static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
 {
-       atomic_inc(&memcg->id.ref);
+       atomic_add(n, &memcg->id.ref);
 }
 
-static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
 {
-       if (atomic_dec_and_test(&memcg->id.ref)) {
+       while (!atomic_inc_not_zero(&memcg->id.ref)) {
+               /*
+                * The root cgroup cannot be destroyed, so it's refcount must
+                * always be >= 1.
+                */
+               if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
+                       VM_BUG_ON(1);
+                       break;
+               }
+               memcg = parent_mem_cgroup(memcg);
+               if (!memcg)
+                       memcg = root_mem_cgroup;
+       }
+       return memcg;
+}
+
+static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
+{
+       if (atomic_sub_and_test(n, &memcg->id.ref)) {
                idr_remove(&mem_cgroup_idr, memcg->id.id);
                memcg->id.id = 0;
 
@@ -4085,6 +4111,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
        }
 }
 
+static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+       mem_cgroup_id_get_many(memcg, 1);
+}
+
+static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+       mem_cgroup_id_put_many(memcg, 1);
+}
+
 /**
  * mem_cgroup_from_id - look up a memcg from a memcg id
  * @id: the memcg id to look up
@@ -4719,6 +4755,8 @@ static void __mem_cgroup_clear_mc(void)
                if (!mem_cgroup_is_root(mc.from))
                        page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
 
+               mem_cgroup_id_put_many(mc.from, mc.moved_swap);
+
                /*
                 * we charged both to->memory and to->memsw, so we
                 * should uncharge to->memory.
@@ -4726,9 +4764,9 @@ static void __mem_cgroup_clear_mc(void)
                if (!mem_cgroup_is_root(mc.to))
                        page_counter_uncharge(&mc.to->memory, mc.moved_swap);
 
-               css_put_many(&mc.from->css, mc.moved_swap);
+               mem_cgroup_id_get_many(mc.to, mc.moved_swap);
+               css_put_many(&mc.to->css, mc.moved_swap);
 
-               /* we've already done css_get(mc.to) */
                mc.moved_swap = 0;
        }
        memcg_oom_recover(from);
@@ -5537,8 +5575,10 @@ static void uncharge_list(struct list_head *page_list)
                        else
                                nr_file += nr_pages;
                        pgpgout++;
-               } else
+               } else {
                        nr_kmem += 1 << compound_order(page);
+                       __ClearPageKmemcg(page);
+               }
 
                page->mem_cgroup = NULL;
        } while (next != page_list);
@@ -5790,7 +5830,7 @@ subsys_initcall(mem_cgroup_init);
  */
 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 {
-       struct mem_cgroup *memcg;
+       struct mem_cgroup *memcg, *swap_memcg;
        unsigned short oldid;
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5805,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!memcg)
                return;
 
-       mem_cgroup_id_get(memcg);
-       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       /*
+        * In case the memcg owning these pages has been offlined and doesn't
+        * have an ID allocated to it anymore, charge the closest online
+        * ancestor for the swap instead and transfer the memory+swap charge.
+        */
+       swap_memcg = mem_cgroup_id_get_online(memcg);
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
        VM_BUG_ON_PAGE(oldid, page);
-       mem_cgroup_swap_statistics(memcg, true);
+       mem_cgroup_swap_statistics(swap_memcg, true);
 
        page->mem_cgroup = NULL;
 
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
 
+       if (memcg != swap_memcg) {
+               if (!mem_cgroup_is_root(swap_memcg))
+                       page_counter_charge(&swap_memcg->memsw, 1);
+               page_counter_uncharge(&memcg->memsw, 1);
+       }
+
        /*
         * Interrupts should be disabled here because the caller holds the
         * mapping->tree_lock lock which is taken with interrupts-off. It is
@@ -5853,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
        if (!memcg)
                return 0;
 
+       memcg = mem_cgroup_id_get_online(memcg);
+
        if (!mem_cgroup_is_root(memcg) &&
-           !page_counter_try_charge(&memcg->swap, 1, &counter))
+           !page_counter_try_charge(&memcg->swap, 1, &counter)) {
+               mem_cgroup_id_put(memcg);
                return -ENOMEM;
+       }
 
-       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
index 3894b65b155555f11076f0cae90f71e2475b6929..41266dc29f33fb1278d7e4e9d6fd2efab69380a1 100644 (file)
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 
        /* init node's zones as empty zones, we don't have any present pages.*/
        free_area_init_node(nid, zones_size, start_pfn, zholes_size);
+       pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
 
        /*
         * The node we allocated has no zone fallback lists. For avoiding
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
 {
        arch_refresh_nodedata(nid, NULL);
+       free_percpu(pgdat->per_cpu_nodestats);
        arch_free_nodedata(pgdat);
        return;
 }
index 7d0a275df822e9e14c55e5d472cfc473ac3ae173..d53a9aa00977cbd0f81970e9e8a30b011cc73f31 100644 (file)
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task)
 {
        struct mm_struct *mm = task->mm;
        struct task_struct *p;
-       bool ret;
+       bool ret = true;
 
        /*
         * Skip tasks without mm because it might have passed its exit_mm and
index fb975cec351821151a422fb34171121f67459228..3fbe73a6fe4b6869dcd44a45de43928d4c08fe0c 100644 (file)
@@ -1008,10 +1008,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
        }
        if (PageMappingFlags(page))
                page->mapping = NULL;
-       if (memcg_kmem_enabled() && PageKmemcg(page)) {
+       if (memcg_kmem_enabled() && PageKmemcg(page))
                memcg_kmem_uncharge(page, order);
-               __ClearPageKmemcg(page);
-       }
        if (check_free)
                bad += free_pages_check(page);
        if (bad)
@@ -3756,12 +3754,10 @@ no_zone:
        }
 
 out:
-       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
-               if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
-                       __free_pages(page, order);
-                       page = NULL;
-               } else
-                       __SetPageKmemcg(page);
+       if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
+           unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+               __free_pages(page, order);
+               page = NULL;
        }
 
        if (kmemcheck_enabled && page)
@@ -4064,7 +4060,7 @@ long si_mem_available(void)
        int lru;
 
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
-               pages[lru] = global_page_state(NR_LRU_BASE + lru);
+               pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
 
        for_each_zone(zone)
                wmark_low += zone->watermark[WMARK_LOW];
@@ -4761,6 +4757,8 @@ int local_memory_node(int node)
 }
 #endif
 
+static void setup_min_unmapped_ratio(void);
+static void setup_min_slab_ratio(void);
 #else  /* CONFIG_NUMA */
 
 static void set_zonelist_order(void)
@@ -5882,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
                zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
 #ifdef CONFIG_NUMA
                zone->node = nid;
-               pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
-                                               / 100;
-               pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
 #endif
                zone->name = zone_names[j];
                zone->zone_pgdat = pgdat;
@@ -6805,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
        setup_per_zone_wmarks();
        refresh_zone_stat_thresholds();
        setup_per_zone_lowmem_reserve();
+
+#ifdef CONFIG_NUMA
+       setup_min_unmapped_ratio();
+       setup_min_slab_ratio();
+#endif
+
        return 0;
 }
 core_initcall(init_per_zone_wmark_min)
@@ -6846,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
 }
 
 #ifdef CONFIG_NUMA
+static void setup_min_unmapped_ratio(void)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+
+       for_each_online_pgdat(pgdat)
+               pgdat->min_unmapped_pages = 0;
+
+       for_each_zone(zone)
+               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
+                               sysctl_min_unmapped_ratio) / 100;
+}
+
+
 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       struct pglist_data *pgdat;
-       struct zone *zone;
        int rc;
 
        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
+       setup_min_unmapped_ratio();
+
+       return 0;
+}
+
+static void setup_min_slab_ratio(void)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+
        for_each_online_pgdat(pgdat)
                pgdat->min_slab_pages = 0;
 
        for_each_zone(zone)
-               zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
-                               sysctl_min_unmapped_ratio) / 100;
-       return 0;
+               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
+                               sysctl_min_slab_ratio) / 100;
 }
 
 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       struct pglist_data *pgdat;
-       struct zone *zone;
        int rc;
 
        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
-       for_each_online_pgdat(pgdat)
-               pgdat->min_slab_pages = 0;
+       setup_min_slab_ratio();
 
-       for_each_zone(zone)
-               zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
-                               sysctl_min_slab_ratio) / 100;
        return 0;
 }
 #endif
index 709bc83703b1bfef419fa674ef5b5e28f5d70f05..1ef36404e7b2d7daeef2061ff8f79524d7750bb9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
                VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
                __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
        } else {
-               if (PageTransCompound(page)) {
-                       VM_BUG_ON_PAGE(!PageLocked(page), page);
+               if (PageTransCompound(page) && page_mapping(page)) {
+                       VM_WARN_ON_ONCE(!PageLocked(page));
+
                        SetPageDoubleMap(compound_head(page));
                        if (PageMlocked(page))
                                clear_page_mlock(compound_head(page));
@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
 {
        int i, nr = 1;
 
-       VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
+       VM_BUG_ON_PAGE(compound && !PageHead(page), page);
        lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
index 7f7748a0f9e1f738fd1ffcaceccdee2ae54d8d35..fd8b2b5741b141a7bc4457d93929c100d988a639 100644 (file)
@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
 
 struct kobj_attribute shmem_enabled_attr =
        __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
 
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 bool shmem_huge_enabled(struct vm_area_struct *vma)
 {
        struct inode *inode = file_inode(vma->vm_file);
@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
                        return false;
        }
 }
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
+#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 #else /* !CONFIG_SHMEM */
 
index 261147ba156fb855c525975a0478d98096c7d0a2..b67271024135aff957f1361c3e327da41915b4c2 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4441,6 +4441,36 @@ static int __init slab_proc_init(void)
 module_init(slab_proc_init);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *cachep;
+       unsigned int objnr;
+       unsigned long offset;
+
+       /* Find and validate object. */
+       cachep = page->slab_cache;
+       objnr = obj_to_index(cachep, page, (void *)ptr);
+       BUG_ON(objnr >= cachep->num);
+
+       /* Find offset within object. */
+       offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+               return NULL;
+
+       return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 /**
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
index 850737bdfbd82410dcd9e0e87d64ea808b0e39c7..9adae58462f8191b22659b1aa438ec637f6fc765 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
  */
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
+       LIST_HEAD(discard);
        struct page *page, *h;
 
        BUG_ON(irqs_disabled());
@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
        list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
                        remove_partial(n, page);
-                       discard_slab(s, page);
+                       list_add(&page->lru, &discard);
                } else {
                        list_slab_objects(s, page,
                        "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
+
+       list_for_each_entry_safe(page, h, &discard, lru)
+               discard_slab(s, page);
 }
 
 /*
@@ -3764,6 +3768,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *s;
+       unsigned long offset;
+       size_t object_size;
+
+       /* Find object and usable object size. */
+       s = page->slab_cache;
+       object_size = slab_ksize(s);
+
+       /* Reject impossible pointers. */
+       if (ptr < page_address(page))
+               return s->name;
+
+       /* Find offset within object. */
+       offset = (ptr - page_address(page)) % s->size;
+
+       /* Adjust for redzone and reject if within the redzone. */
+       if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+               if (offset < s->red_left_pad)
+                       return s->name;
+               offset -= s->red_left_pad;
+       }
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= object_size && n <= object_size - offset)
+               return NULL;
+
+       return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
        struct page *page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644 (file)
index 0000000..8ebae91
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+       BAD_STACK = -1,
+       NOT_STACK = 0,
+       GOOD_FRAME,
+       GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ *     NOT_STACK: not at all on the stack
+ *     GOOD_FRAME: fully within a valid stack frame
+ *     GOOD_STACK: fully on the stack (when can't do frame-checking)
+ *     BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+       const void * const stack = task_stack_page(current);
+       const void * const stackend = stack + THREAD_SIZE;
+       int ret;
+
+       /* Object is not on the stack at all. */
+       if (obj + len <= stack || stackend <= obj)
+               return NOT_STACK;
+
+       /*
+        * Reject: object partially overlaps the stack (passing the
+        * the check above means at least one end is within the stack,
+        * so if this check fails, the other end is outside the stack).
+        */
+       if (obj < stack || stackend < obj + len)
+               return BAD_STACK;
+
+       /* Check if object is safely within a valid frame. */
+       ret = arch_within_stack_frames(stack, stackend, obj, len);
+       if (ret)
+               return ret;
+
+       return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+                           bool to_user, const char *type)
+{
+       pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+               to_user ? "exposure" : "overwrite",
+               to_user ? "from" : "to", ptr, type ? : "unknown", len);
+       /*
+        * For greater effect, it would be nice to do do_group_exit(),
+        * but BUG() actually hooks all the lock-breaking and per-arch
+        * Oops code, so that is used here instead.
+        */
+       BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+                    unsigned long high)
+{
+       unsigned long check_low = (uintptr_t)ptr;
+       unsigned long check_high = check_low + n;
+
+       /* Does not overlap if entirely above or entirely below. */
+       if (check_low >= high || check_high < low)
+               return false;
+
+       return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+                                                  unsigned long n)
+{
+       unsigned long textlow = (unsigned long)_stext;
+       unsigned long texthigh = (unsigned long)_etext;
+       unsigned long textlow_linear, texthigh_linear;
+
+       if (overlaps(ptr, n, textlow, texthigh))
+               return "<kernel text>";
+
+       /*
+        * Some architectures have virtual memory mappings with a secondary
+        * mapping of the kernel text, i.e. there is more than one virtual
+        * kernel address that points to the kernel image. It is usually
+        * when there is a separate linear physical memory mapping, in that
+        * __pa() is not just the reverse of __va(). This can be detected
+        * and checked:
+        */
+       textlow_linear = (unsigned long)__va(__pa(textlow));
+       /* No different mapping: we're done. */
+       if (textlow_linear == textlow)
+               return NULL;
+
+       /* Check the secondary mapping... */
+       texthigh_linear = (unsigned long)__va(__pa(texthigh));
+       if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+               return "<linear kernel text>";
+
+       return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+       /* Reject if object wraps past end of memory. */
+       if (ptr + n < ptr)
+               return "<wrapped address>";
+
+       /* Reject if NULL or ZERO-allocation. */
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+                                           bool to_user)
+{
+       struct page *page, *endpage;
+       const void *end = ptr + n - 1;
+       bool is_reserved, is_cma;
+
+       /*
+        * Some architectures (arm64) return true for virt_addr_valid() on
+        * vmalloced addresses. Work around this by checking for vmalloc
+        * first.
+        */
+       if (is_vmalloc_addr(ptr))
+               return NULL;
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       /* Check slab allocator for flags and size. */
+       if (PageSlab(page))
+               return __check_heap_object(ptr, n, page);
+
+       /*
+        * Sometimes the kernel data regions are not marked Reserved (see
+        * check below). And sometimes [_sdata,_edata) does not cover
+        * rodata and/or bss, so check each range explicitly.
+        */
+
+       /* Allow reads of kernel rodata region (if not marked as Reserved). */
+       if (ptr >= (const void *)__start_rodata &&
+           end <= (const void *)__end_rodata) {
+               if (!to_user)
+                       return "<rodata>";
+               return NULL;
+       }
+
+       /* Allow kernel data region (if not marked as Reserved). */
+       if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+               return NULL;
+
+       /* Allow kernel bss region (if not marked as Reserved). */
+       if (ptr >= (const void *)__bss_start &&
+           end <= (const void *)__bss_stop)
+               return NULL;
+
+       /* Is the object wholly within one base page? */
+       if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+                  ((unsigned long)end & (unsigned long)PAGE_MASK)))
+               return NULL;
+
+       /* Allow if start and end are inside the same compound page. */
+       endpage = virt_to_head_page(end);
+       if (likely(endpage == page))
+               return NULL;
+
+       /*
+        * Reject if range is entirely either Reserved (i.e. special or
+        * device memory), or CMA. Otherwise, reject since the object spans
+        * several independently allocated pages.
+        */
+       is_reserved = PageReserved(page);
+       is_cma = is_migrate_cma_page(page);
+       if (!is_reserved && !is_cma)
+               goto reject;
+
+       for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+               page = virt_to_head_page(ptr);
+               if (is_reserved && !PageReserved(page))
+                       goto reject;
+               if (is_cma && !is_migrate_cma_page(page))
+                       goto reject;
+       }
+
+       return NULL;
+
+reject:
+       return "<spans multiple pages>";
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+       const char *err;
+
+       /* Skip all tests if size is zero. */
+       if (!n)
+               return;
+
+       /* Check for invalid addresses. */
+       err = check_bogus_address(ptr, n);
+       if (err)
+               goto report;
+
+       /* Check for bad heap object. */
+       err = check_heap_object(ptr, n, to_user);
+       if (err)
+               goto report;
+
+       /* Check for bad stack object. */
+       switch (check_stack_object(ptr, n)) {
+       case NOT_STACK:
+               /* Object is not touching the current process stack. */
+               break;
+       case GOOD_FRAME:
+       case GOOD_STACK:
+               /*
+                * Object is either in the correct frame (when it
+                * is possible to check) or just generally on the
+                * process stack (when frame checking not available).
+                */
+               return;
+       default:
+               err = "<process stack>";
+               goto report;
+       }
+
+       /* Check for object in kernel to avoid text exposure. */
+       err = check_kernel_text_object(ptr, n);
+       if (!err)
+               return;
+
+report:
+       report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
index 4acb1d5417aaf980bc7797c817eb9a9a350ecbf5..f24b25c25106fb55fb713b7308a8d43413a2143b 100644 (file)
@@ -507,8 +507,8 @@ err_out:
                /* wakeup anybody waiting for slots to pin pages */
                wake_up(&vp_wq);
        }
-       kfree(in_pages);
-       kfree(out_pages);
+       kvfree(in_pages);
+       kvfree(out_pages);
        return err;
 }
 
index c83326c5ba580480b877079d2a465430ff408cf5..ef34a02719d73147f4a4af29f3d861b4dc34391e 100644 (file)
@@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req)
        put_generic_request(req);
 }
 
-void cancel_generic_request(struct ceph_mon_generic_request *req)
+static void cancel_generic_request(struct ceph_mon_generic_request *req)
 {
        struct ceph_mon_client *monc = req->monc;
        struct ceph_mon_generic_request *lookup_req;
index b5ec09612ff71daeb1b95ed4b6f939a172cc7545..a97e7b506612b4255f4b99de76d74c46a1b3896d 100644 (file)
@@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 
                pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
                                               GFP_NOIO);
-               if (!pages) {
+               if (IS_ERR(pages)) {
                        ceph_msg_put(m);
                        return NULL;
                }
index ca53c8319209469a25011b15d26af951a09d392d..22fb96efcf3467713a9e5430057ead684326e3db 100644 (file)
@@ -84,12 +84,6 @@ retry:
 }
 EXPORT_SYMBOL(ceph_find_or_create_string);
 
-static void ceph_free_string(struct rcu_head *head)
-{
-       struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
-       kfree(cs);
-}
-
 void ceph_release_string(struct kref *ref)
 {
        struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
@@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref)
        }
        spin_unlock(&string_tree_lock);
 
-       call_rcu(&cs->rcu, ceph_free_string);
+       kfree_rcu(cs, rcu);
 }
 EXPORT_SYMBOL(ceph_release_string);
 
index 23c8e7c3965651ad5ee03ee617ad92d06646802f..976c7812bbd520e51d34eb542b15f0e4730034b9 100644 (file)
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
 }
 
 static struct gss_upcall_msg *
-__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
+__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
 {
        struct gss_upcall_msg *pos;
        list_for_each_entry(pos, &pipe->in_downcall, list) {
                if (!uid_eq(pos->uid, uid))
                        continue;
+               if (auth && pos->auth->service != auth->service)
+                       continue;
                atomic_inc(&pos->count);
                dprintk("RPC:       %s found msg %p\n", __func__, pos);
                return pos;
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
        struct gss_upcall_msg *old;
 
        spin_lock(&pipe->lock);
-       old = __gss_find_upcall(pipe, gss_msg->uid);
+       old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
        if (old == NULL) {
                atomic_inc(&gss_msg->count);
                list_add(&gss_msg->list, &pipe->in_downcall);
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
        err = -ENOENT;
        /* Find a matching upcall */
        spin_lock(&pipe->lock);
-       gss_msg = __gss_find_upcall(pipe, uid);
+       gss_msg = __gss_find_upcall(pipe, uid, NULL);
        if (gss_msg == NULL) {
                spin_unlock(&pipe->lock);
                goto err_put_ctx;
index cb49898a5a58aacfadceda27a07ceb45eb88a8d3..7f79fb7dc6a00d6cc4082815d35fab4c60be1943 100644 (file)
@@ -2638,6 +2638,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
 {
        struct rpc_xprt_switch *xps;
        struct rpc_xprt *xprt;
+       unsigned long reconnect_timeout;
        unsigned char resvport;
        int ret = 0;
 
@@ -2649,6 +2650,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
                return -EAGAIN;
        }
        resvport = xprt->resvport;
+       reconnect_timeout = xprt->max_reconnect_timeout;
        rcu_read_unlock();
 
        xprt = xprt_create_transport(xprtargs);
@@ -2657,6 +2659,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
                goto out_put_switch;
        }
        xprt->resvport = resvport;
+       xprt->max_reconnect_timeout = reconnect_timeout;
 
        rpc_xprt_switch_set_roundrobin(xps);
        if (setup) {
@@ -2673,6 +2676,27 @@ out_put_switch:
 }
 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
 
+static int
+rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+               struct rpc_xprt *xprt,
+               void *data)
+{
+       unsigned long timeout = *((unsigned long *)data);
+
+       if (timeout < xprt->max_reconnect_timeout)
+               xprt->max_reconnect_timeout = timeout;
+       return 0;
+}
+
+void
+rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo)
+{
+       rpc_clnt_iterate_for_each_xprt(clnt,
+                       rpc_xprt_cap_max_reconnect_timeout,
+                       &timeo);
+}
+EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
+
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 static void rpc_show_header(void)
 {
index 8313960cac524dd36d220f9b55d124435400f25a..ea244b29138b0b86cf7860ce5c1e4605ade86a2a 100644 (file)
@@ -680,6 +680,20 @@ out:
        spin_unlock_bh(&xprt->transport_lock);
 }
 
+static bool
+xprt_has_timer(const struct rpc_xprt *xprt)
+{
+       return xprt->idle_timeout != 0;
+}
+
+static void
+xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
+       __must_hold(&xprt->transport_lock)
+{
+       if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
+               mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
+}
+
 static void
 xprt_init_autodisconnect(unsigned long data)
 {
@@ -688,6 +702,8 @@ xprt_init_autodisconnect(unsigned long data)
        spin_lock(&xprt->transport_lock);
        if (!list_empty(&xprt->recv))
                goto out_abort;
+       /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
+       xprt->last_used = jiffies;
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
                goto out_abort;
        spin_unlock(&xprt->transport_lock);
@@ -725,6 +741,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
                goto out;
        xprt->snd_task =NULL;
        xprt->ops->release_xprt(xprt, NULL);
+       xprt_schedule_autodisconnect(xprt);
 out:
        spin_unlock_bh(&xprt->transport_lock);
        wake_up_bit(&xprt->state, XPRT_LOCKED);
@@ -888,11 +905,6 @@ static void xprt_timer(struct rpc_task *task)
        spin_unlock_bh(&xprt->transport_lock);
 }
 
-static inline int xprt_has_timer(struct rpc_xprt *xprt)
-{
-       return xprt->idle_timeout != 0;
-}
-
 /**
  * xprt_prepare_transmit - reserve the transport before sending a request
  * @task: RPC task about to send a request
@@ -1280,9 +1292,7 @@ void xprt_release(struct rpc_task *task)
        if (!list_empty(&req->rq_list))
                list_del(&req->rq_list);
        xprt->last_used = jiffies;
-       if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
-               mod_timer(&xprt->timer,
-                               xprt->last_used + xprt->idle_timeout);
+       xprt_schedule_autodisconnect(xprt);
        spin_unlock_bh(&xprt->transport_lock);
        if (req->rq_buffer)
                xprt->ops->buf_free(req->rq_buffer);
index 111767ab124aa4037dfe8c7040866d7196343292..8ede3bc52481b73c82834aa684111013c6d40cad 100644 (file)
@@ -177,7 +177,6 @@ static struct ctl_table sunrpc_table[] = {
  * increase over time if the server is down or not responding.
  */
 #define XS_TCP_INIT_REEST_TO   (3U * HZ)
-#define XS_TCP_MAX_REEST_TO    (5U * 60 * HZ)
 
 /*
  * TCP idle timeout; client drops the transport socket if it is idle
@@ -2173,6 +2172,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                write_unlock_bh(&sk->sk_callback_lock);
        }
        xs_udp_do_set_buffer_size(xprt);
+
+       xprt->stat.connect_start = jiffies;
 }
 
 static void xs_udp_setup_socket(struct work_struct *work)
@@ -2236,6 +2237,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                unsigned int keepcnt = xprt->timeout->to_retries + 1;
                unsigned int opt_on = 1;
                unsigned int timeo;
+               unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
 
                /* TCP Keepalive options */
                kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
@@ -2247,6 +2249,16 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
                                (char *)&keepcnt, sizeof(keepcnt));
 
+               /* Avoid temporary address, they are bad for long-lived
+                * connections such as NFS mounts.
+                * RFC4941, section 3.6 suggests that:
+                *    Individual applications, which have specific
+                *    knowledge about the normal duration of connections,
+                *    MAY override this as appropriate.
+                */
+               kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
+                               (char *)&addr_pref, sizeof(addr_pref));
+
                /* TCP user timeout (see RFC5482) */
                timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
                        (xprt->timeout->to_retries + 1);
@@ -2295,6 +2307,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                /* SYN_SENT! */
                if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
                        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+               break;
+       case -EADDRNOTAVAIL:
+               /* Source port number is unavailable. Try a new one! */
+               transport->srcport = 0;
        }
 out:
        return ret;
@@ -2369,6 +2385,25 @@ out:
        xprt_wake_pending_tasks(xprt, status);
 }
 
+static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
+{
+       unsigned long start, now = jiffies;
+
+       start = xprt->stat.connect_start + xprt->reestablish_timeout;
+       if (time_after(start, now))
+               return start - now;
+       return 0;
+}
+
+static void xs_reconnect_backoff(struct rpc_xprt *xprt)
+{
+       xprt->reestablish_timeout <<= 1;
+       if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
+               xprt->reestablish_timeout = xprt->max_reconnect_timeout;
+       if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+               xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+}
+
 /**
  * xs_connect - connect a socket to a remote endpoint
  * @xprt: pointer to transport structure
@@ -2386,6 +2421,7 @@ out:
 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       unsigned long delay = 0;
 
        WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
 
@@ -2397,19 +2433,15 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
                /* Start by resetting any existing state */
                xs_reset_transport(transport);
 
-               queue_delayed_work(xprtiod_workqueue,
-                                  &transport->connect_worker,
-                                  xprt->reestablish_timeout);
-               xprt->reestablish_timeout <<= 1;
-               if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
-                       xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
-               if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
-                       xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
-       } else {
+               delay = xs_reconnect_delay(xprt);
+               xs_reconnect_backoff(xprt);
+
+       } else
                dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
-               queue_delayed_work(xprtiod_workqueue,
-                                  &transport->connect_worker, 0);
-       }
+
+       queue_delayed_work(xprtiod_workqueue,
+                       &transport->connect_worker,
+                       delay);
 }
 
 /**
@@ -2961,6 +2993,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
        xprt->ops = &xs_tcp_ops;
        xprt->timeout = &xs_tcp_default_timeout;
 
+       xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
+
        INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
        INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
 
index 15b196fc2f49b35f6663b6b359493f6a27564797..179219845dfcdfbeb586d12c5ec1296095d9fbf4 100644 (file)
@@ -108,16 +108,20 @@ as-option = $(call try-run,\
 as-instr = $(call try-run,\
        printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
 
 cc-option = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
 cc-option-yn = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-option-align
 # Prefix align with either -falign or -malign
@@ -127,7 +131,7 @@ cc-option-align = $(subst -functions=0,,\
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
 cc-disable-warning = $(call try-run,\
-       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-name
 # Expands to either gcc or clang
index 5e22b60589c1ea7b5155daf92ff9d59cf33019b3..61f0e6db909bbf0a7016d60f4b58a4c4bd3c0dba 100644 (file)
@@ -19,25 +19,42 @@ ifdef CONFIG_GCC_PLUGINS
     endif
   endif
 
-  GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y))
+  GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
 
-  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN
+  export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
 
+  ifneq ($(PLUGINCC),)
+    # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
+    GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
+  endif
+
+  KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+  GCC_PLUGIN := $(gcc-plugin-y)
+  GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
+endif
+
+# If plugins aren't supported, abort the build before hard-to-read compiler
+# errors start getting spewed by the main build.
+PHONY += gcc-plugins-check
+gcc-plugins-check: FORCE
+ifdef CONFIG_GCC_PLUGINS
   ifeq ($(PLUGINCC),)
     ifneq ($(GCC_PLUGINS_CFLAGS),)
       ifeq ($(call cc-ifversion, -ge, 0405, y), y)
-        PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)")
-        $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?)
+       $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true
+       @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1
       else
-        $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least)
+       @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1
       endif
     endif
-  else
-    # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
-    GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
   endif
+endif
+       @:
 
-  KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-  GCC_PLUGIN := $(gcc-plugin-y)
-
+# Actually do the build, if requested.
+PHONY += gcc-plugins
+gcc-plugins: scripts_basic gcc-plugins-check
+ifdef CONFIG_GCC_PLUGINS
+       $(Q)$(MAKE) $(build)=scripts/gcc-plugins
 endif
+       @:
index fb92075654711393cd19af2c40fb37dcf7605dd0..b65224bfb847302db905ed2ac33dc5d598b9b48b 100755 (executable)
@@ -1,5 +1,12 @@
 #!/bin/sh
 srctree=$(dirname "$0")
+
+SHOW_ERROR=
+if [ "$1" = "--show-error" ] ; then
+       SHOW_ERROR=1
+       shift || true
+fi
+
 gccplugins_dir=$($3 -print-file-name=plugin)
 plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
 #include "gcc-common.h"
@@ -13,6 +20,9 @@ EOF
 
 if [ $? -ne 0 ]
 then
+       if [ -n "$SHOW_ERROR" ] ; then
+               echo "${plugincc}" >&2
+       fi
        exit 1
 fi
 
@@ -48,4 +58,8 @@ then
        echo "$2"
        exit 0
 fi
+
+if [ -n "$SHOW_ERROR" ] ; then
+       echo "${plugincc}" >&2
+fi
 exit 1
index 88c8ec47232b1c8595992109fbdd8a503cc6cc09..8b29dc17c73cad2730531464d3528c27973cb659 100644 (file)
@@ -12,16 +12,18 @@ else
   export HOST_EXTRACXXFLAGS
 endif
 
-export GCCPLUGINS_DIR HOSTLIBS
-
 ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
   GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
 endif
 
-$(HOSTLIBS)-y := $(GCC_PLUGIN)
+export HOSTLIBS
+
+$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
 always := $($(HOSTLIBS)-y)
 
-cyc_complexity_plugin-objs := cyc_complexity_plugin.o
-sancov_plugin-objs := sancov_plugin.o
+$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
+
+subdir-y := $(GCC_PLUGIN_SUBDIR)
+subdir-  += $(GCC_PLUGIN_SUBDIR)
 
 clean-files += *.so
index 122fcdaf42c86cec7a5fbce08cc3b406f692f6c5..49a00d54b835f156745c27bfb12b1f64bcf9140c 100755 (executable)
@@ -432,7 +432,7 @@ foreach my $file (@ARGV) {
            die "$P: file '${file}' not found\n";
        }
     }
-    if ($from_filename || vcs_file_exists($file)) {
+    if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) {
        $file =~ s/^\Q${cur_path}\E//;  #strip any absolute path
        $file =~ s/^\Q${lk_path}\E//;   #or the path to the lk tree
        push(@files, $file);
index 176758cdfa577f4c25e3d4afdea4f6292a0be0c3..df28f2b6f3e1b47ab9a290c02f74b2da5f703196 100644 (file)
@@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR
          this low address space will need the permission specific to the
          systems running LSM.
 
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+       bool
+       help
+         The heap allocator implements __check_heap_object() for
+         validating memory ranges against heap object sizes in
+         support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+       bool
+       help
+         The architecture supports CONFIG_HARDENED_USERCOPY by
+         calling check_object_size() just before performing the
+         userspace copies in the low level implementation of
+         copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+       bool "Harden memory copies between kernel and userspace"
+       depends on HAVE_ARCH_HARDENED_USERCOPY
+       select BUG
+       help
+         This option checks for obviously wrong memory regions when
+         copying memory to/from the kernel (via copy_to_user() and
+         copy_from_user() functions) by rejecting memory ranges that
+         are larger than the specified heap object, span multiple
+         separately allocates pages, are not on the process stack,
+         or are part of the kernel text. This kills entire classes
+         of heap overflow exploits and similar kernel memory exposures.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
index 89dacf9b4e6cbcdd7caaed257ce77a510083c3c4..160c7f71372289034f87953de4650fb08498298a 100644 (file)
@@ -906,20 +906,23 @@ static int azx_resume(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
        struct hda_intel *hda;
+       struct hdac_bus *bus;
 
        if (!card)
                return 0;
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
+       bus = azx_bus(chip);
        if (chip->disabled || hda->init_failed || !chip->running)
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
-               && hda->need_i915_power) {
-               snd_hdac_display_power(azx_bus(chip), true);
-               snd_hdac_i915_set_bclk(azx_bus(chip));
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+               snd_hdac_display_power(bus, true);
+               if (hda->need_i915_power)
+                       snd_hdac_i915_set_bclk(bus);
        }
+
        if (chip->msi)
                if (pci_enable_msi(pci) < 0)
                        chip->msi = 0;
@@ -929,6 +932,11 @@ static int azx_resume(struct device *dev)
 
        hda_intel_init_chip(chip, true);
 
+       /* power down again for link-controlled chips */
+       if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+           !hda->need_i915_power)
+               snd_hdac_display_power(bus, false);
+
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
        trace_azx_resume(chip);
@@ -1008,6 +1016,7 @@ static int azx_runtime_resume(struct device *dev)
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
+       bus = azx_bus(chip);
        if (chip->disabled || hda->init_failed)
                return 0;
 
@@ -1015,15 +1024,9 @@ static int azx_runtime_resume(struct device *dev)
                return 0;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
-               bus = azx_bus(chip);
-               if (hda->need_i915_power) {
-                       snd_hdac_display_power(bus, true);
+               snd_hdac_display_power(bus, true);
+               if (hda->need_i915_power)
                        snd_hdac_i915_set_bclk(bus);
-               } else {
-                       /* toggle codec wakeup bit for STATESTS read */
-                       snd_hdac_set_codec_wakeup(bus, true);
-                       snd_hdac_set_codec_wakeup(bus, false);
-               }
        }
 
        /* Read STATESTS before controller reset */
@@ -1043,6 +1046,11 @@ static int azx_runtime_resume(struct device *dev)
        azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
                        ~STATESTS_INT_MASK);
 
+       /* power down again for link-controlled chips */
+       if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+           !hda->need_i915_power)
+               snd_hdac_display_power(bus, false);
+
        trace_azx_runtime_resume(chip);
        return 0;
 }
index 6adde457b602e08aedd1806e8b79863d65e006cc..6cf1f35974558053101e00351482574c2be6a988 100644 (file)
@@ -1128,6 +1128,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
 {
        /* devices which do not support reading the sample rate. */
        switch (chip->usb_id) {
+       case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
        case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
@@ -1138,6 +1139,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+       case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
        case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
index 4a413485f9eb8ef58ec71c77ff2594f4300c8ea6..92a8308b96f64cb6ce845a8379ca06cb9a6a00d6 100644 (file)
 #define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
 #define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
 #define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT    ( 9*32+22) /* PCOMMIT instruction */
 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
 #define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
 #define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
 #define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
 #define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG       X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  */
 #define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
-
+#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR                X86_BUG(12) /* IPI required to wake up remote CPU */
 #endif /* _ASM_X86_CPUFEATURES_H */
index 911e9358ceb184b6b0b0f38b9c7b853fc4506fbe..85599ad4d0247863cef655d02b9a4b3f83c77fb7 100644 (file)
@@ -56,5 +56,7 @@
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
 #define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK17        0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
 #endif /* _ASM_X86_DISABLED_FEATURES_H */
index 4916144e3c42668a3e07af33859b4a1af3f2985b..fac9a5c0abe94b233b72b35bca8c7a665847b694 100644 (file)
@@ -99,5 +99,7 @@
 #define REQUIRED_MASK14        0
 #define REQUIRED_MASK15        0
 #define REQUIRED_MASK16        0
+#define REQUIRED_MASK17        0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
 #endif /* _ASM_X86_REQUIRED_FEATURES_H */
index 5b15d94a33f818d04ee7ae2a0f5685125bd89a40..37fee272618f1de348a7d5961f1792debba72991 100644 (file)
@@ -78,7 +78,6 @@
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
-#define EXIT_REASON_PCOMMIT             65
 
 #define VMX_EXIT_REASONS \
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_INVVPID,               "INVVPID" }, \
        { EXIT_REASON_INVPCID,               "INVPCID" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
-       { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
-       { EXIT_REASON_PCOMMIT,               "PCOMMIT" }
+       { EXIT_REASON_XRSTORS,               "XRSTORS" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
index 406459b935a27c4f9b518426a4fd60493432221d..da218fec605657ee415f8ad71a95d8851330a9de 100644 (file)
@@ -84,6 +84,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_PERCPU_HASH,
        BPF_MAP_TYPE_PERCPU_ARRAY,
        BPF_MAP_TYPE_STACK_TRACE,
+       BPF_MAP_TYPE_CGROUP_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -93,6 +94,7 @@ enum bpf_prog_type {
        BPF_PROG_TYPE_SCHED_CLS,
        BPF_PROG_TYPE_SCHED_ACT,
        BPF_PROG_TYPE_TRACEPOINT,
+       BPF_PROG_TYPE_XDP,
 };
 
 #define BPF_PSEUDO_MAP_FD      1
@@ -313,6 +315,66 @@ enum bpf_func_id {
         */
        BPF_FUNC_skb_get_tunnel_opt,
        BPF_FUNC_skb_set_tunnel_opt,
+
+       /**
+        * bpf_skb_change_proto(skb, proto, flags)
+        * Change protocol of the skb. Currently supported is
+        * v4 -> v6, v6 -> v4 transitions. The helper will also
+        * resize the skb. eBPF program is expected to fill the
+        * new headers via skb_store_bytes and lX_csum_replace.
+        * @skb: pointer to skb
+        * @proto: new skb->protocol type
+        * @flags: reserved
+        * Return: 0 on success or negative error
+        */
+       BPF_FUNC_skb_change_proto,
+
+       /**
+        * bpf_skb_change_type(skb, type)
+        * Change packet type of skb.
+        * @skb: pointer to skb
+        * @type: new skb->pkt_type type
+        * Return: 0 on success or negative error
+        */
+       BPF_FUNC_skb_change_type,
+
+       /**
+        * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
+        * @skb: pointer to skb
+        * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+        * @index: index of the cgroup in the bpf_map
+        * Return:
+        *   == 0 skb failed the cgroup2 descendant test
+        *   == 1 skb succeeded the cgroup2 descendant test
+        *    < 0 error
+        */
+       BPF_FUNC_skb_in_cgroup,
+
+       /**
+        * bpf_get_hash_recalc(skb)
+        * Retrieve and possibly recalculate skb->hash.
+        * @skb: pointer to skb
+        * Return: hash
+        */
+       BPF_FUNC_get_hash_recalc,
+
+       /**
+        * u64 bpf_get_current_task(void)
+        * Returns current task_struct
+        * Return: current
+        */
+       BPF_FUNC_get_current_task,
+
+       /**
+        * bpf_probe_write_user(void *dst, void *src, int len)
+        * safely attempt to write to a location
+        * @dst: destination address in userspace
+        * @src: source address on stack
+        * @len: number of bytes to copy
+        * Return: 0 on success or negative error
+        */
+       BPF_FUNC_probe_write_user,
+
        __BPF_FUNC_MAX_ID,
 };
 
@@ -347,9 +409,11 @@ enum bpf_func_id {
 #define BPF_F_ZERO_CSUM_TX             (1ULL << 1)
 #define BPF_F_DONT_FRAGMENT            (1ULL << 2)
 
-/* BPF_FUNC_perf_event_output flags. */
+/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
 #define BPF_F_INDEX_MASK               0xffffffffULL
 #define BPF_F_CURRENT_CPU              BPF_F_INDEX_MASK
+/* BPF_FUNC_perf_event_output for sk_buff input context. */
+#define BPF_F_CTXLEN_MASK              (0xfffffULL << 32)
 
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
@@ -386,4 +450,24 @@ struct bpf_tunnel_key {
        __u32 tunnel_label;
 };
 
+/* User return codes for XDP prog type.
+ * A valid XDP program must return one of these defined values. All other
+ * return codes are reserved for future use. Unknown return codes will result
+ * in packet drop.
+ */
+enum xdp_action {
+       XDP_ABORTED = 0,
+       XDP_DROP,
+       XDP_PASS,
+       XDP_TX,
+};
+
+/* user accessible metadata for XDP packet hook
+ * new fields must be added to the end of this structure
+ */
+struct xdp_md {
+       __u32 data;
+       __u32 data_end;
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
index 736da44596e451fa1a14d9a045f7feda269a0779..b303bcdd8ed15fb9d140e0e7369388bc714aaace 100644 (file)
@@ -176,10 +176,18 @@ Each probe argument follows below syntax.
 
 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
 '$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
-'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail)
 
 On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
 
+TYPES
+-----
+Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe.
+String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is;
+
+ b<bit-width>@<bit-offset>/<container-size>
+
 LINE SYNTAX
 -----------
 Line range is described by following syntax.
index 1f6c70594f0f79e378163c6430b9028642eee45d..053bbbd84ece30c673afe7e176328f8390a6bda9 100644 (file)
@@ -116,8 +116,8 @@ OPTIONS
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-       srcline, period, iregs, brstack, brstacksym, flags.
-        Field list can be prepended with the type, trace, sw or hw,
+        srcline, period, iregs, brstack, brstacksym, flags, bpf-output,
+        callindent. Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -F sw:comm,tid,time,ip,sym  and -F trace:time,cpu,trace
 
index c6d0f91731a14732333af62d0a40a3ea43fb4c99..8d4dc97d80baeeadd3bd2eb1d966f5fc8292bdd0 100644 (file)
@@ -54,10 +54,6 @@ int arch__compare_symbol_names(const char *namea, const char *nameb)
 #endif
 
 #if defined(_CALL_ELF) && _CALL_ELF == 2
-bool arch__prefers_symtab(void)
-{
-       return true;
-}
 
 #ifdef HAVE_LIBELF_SUPPORT
 void arch__sym_update(struct symbol *s, GElf_Sym *sym)
@@ -100,4 +96,27 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev,
                        tev->point.offset += lep_offset;
        }
 }
+
+void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+                                          int ntevs)
+{
+       struct probe_trace_event *tev;
+       struct map *map;
+       struct symbol *sym = NULL;
+       struct rb_node *tmp;
+       int i = 0;
+
+       map = get_target_map(pev->target, pev->uprobes);
+       if (!map || map__load(map, NULL) < 0)
+               return;
+
+       for (i = 0; i < ntevs; i++) {
+               tev = &pev->tevs[i];
+               map__for_each_symbol(map, sym, tmp) {
+                       if (map->unmap_ip(map, sym->start) == tev->point.address)
+                               arch__fix_tev_from_maps(pev, tev, map, sym);
+               }
+       }
+}
+
 #endif
index 971ff91b16cb3be52702cca780c3df818d52c51a..9c640a8081c70a48a1c9090c809fa4b96f918602 100644 (file)
@@ -2116,7 +2116,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                     "Valid types: hw,sw,trace,raw. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
                     "addr,symoff,period,iregs,brstack,brstacksym,flags,"
-                    "callindent", parse_output_fields),
+                    "bpf-output,callindent", parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
index 0c16d20d7e32fa2eb7377247c2e5542d2a5d076a..3c7452b39f57649b05d675db3d19395fb765df2d 100644 (file)
@@ -331,7 +331,7 @@ static int read_counter(struct perf_evsel *counter)
        return 0;
 }
 
-static void read_counters(bool close_counters)
+static void read_counters(void)
 {
        struct perf_evsel *counter;
 
@@ -341,11 +341,6 @@ static void read_counters(bool close_counters)
 
                if (perf_stat_process_counter(&stat_config, counter))
                        pr_warning("failed to process counter %s\n", counter->name);
-
-               if (close_counters) {
-                       perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
-                                            thread_map__nr(evsel_list->threads));
-               }
        }
 }
 
@@ -353,7 +348,7 @@ static void process_interval(void)
 {
        struct timespec ts, rs;
 
-       read_counters(false);
+       read_counters();
 
        clock_gettime(CLOCK_MONOTONIC, &ts);
        diff_timespec(&rs, &ts, &ref_time);
@@ -380,6 +375,17 @@ static void enable_counters(void)
                perf_evlist__enable(evsel_list);
 }
 
+static void disable_counters(void)
+{
+       /*
+        * If we don't have tracee (attaching to task or cpu), counters may
+        * still be running. To get accurate group ratios, we must stop groups
+        * from counting before reading their constituent counters.
+        */
+       if (!target__none(&target))
+               perf_evlist__disable(evsel_list);
+}
+
 static volatile int workload_exec_errno;
 
 /*
@@ -657,11 +663,20 @@ try_again:
                }
        }
 
+       disable_counters();
+
        t1 = rdclock();
 
        update_stats(&walltime_nsecs_stats, t1 - t0);
 
-       read_counters(true);
+       /*
+        * Closing a group leader splits the group, and as we only disable
+        * group leaders, results in remaining events becoming enabled. To
+        * avoid arbitrary skew, we must read all counters before closing any
+        * group leaders.
+        */
+       read_counters();
+       perf_evlist__close(evsel_list);
 
        return WEXITSTATUS(status);
 }
index 953dc1ab2ed7bd0be442c4491c9c8a4862aa3386..28733962cd80a63e1376b5b71f4771ab8f8857f7 100644 (file)
@@ -170,15 +170,17 @@ static struct map *kernel_get_module_map(const char *module)
                module = "kernel";
 
        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+               /* short_name is "[module]" */
                if (strncmp(pos->dso->short_name + 1, module,
-                           pos->dso->short_name_len - 2) == 0) {
+                           pos->dso->short_name_len - 2) == 0 &&
+                   module[pos->dso->short_name_len - 2] == '\0') {
                        return pos;
                }
        }
        return NULL;
 }
 
-static struct map *get_target_map(const char *target, bool user)
+struct map *get_target_map(const char *target, bool user)
 {
        /* Init maps of given executable or kernel */
        if (user)
@@ -385,7 +387,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
                if (uprobes)
                        address = sym->start;
                else
-                       address = map->unmap_ip(map, sym->start);
+                       address = map->unmap_ip(map, sym->start) - map->reloc;
                break;
        }
        if (!address) {
@@ -664,22 +666,14 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
        return ret;
 }
 
-/* Post processing the probe events */
-static int post_process_probe_trace_events(struct probe_trace_event *tevs,
-                                          int ntevs, const char *module,
-                                          bool uprobe)
+static int
+post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
+                                      int ntevs)
 {
        struct ref_reloc_sym *reloc_sym;
        char *tmp;
        int i, skipped = 0;
 
-       if (uprobe)
-               return add_exec_to_probe_trace_events(tevs, ntevs, module);
-
-       /* Note that currently ref_reloc_sym based probe is not for drivers */
-       if (module)
-               return add_module_to_probe_trace_events(tevs, ntevs, module);
-
        reloc_sym = kernel_get_ref_reloc_sym();
        if (!reloc_sym) {
                pr_warning("Relocated base symbol is not found!\n");
@@ -711,6 +705,34 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
        return skipped;
 }
 
+void __weak
+arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused,
+                                     int ntevs __maybe_unused)
+{
+}
+
+/* Post processing the probe events */
+static int post_process_probe_trace_events(struct perf_probe_event *pev,
+                                          struct probe_trace_event *tevs,
+                                          int ntevs, const char *module,
+                                          bool uprobe)
+{
+       int ret;
+
+       if (uprobe)
+               ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
+       else if (module)
+               /* Currently ref_reloc_sym based probe is not for drivers */
+               ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+       else
+               ret = post_process_kernel_probe_trace_events(tevs, ntevs);
+
+       if (ret >= 0)
+               arch__post_process_probe_trace_events(pev, ntevs);
+
+       return ret;
+}
+
 /* Try to find perf_probe_event with debuginfo */
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                          struct probe_trace_event **tevs)
@@ -749,7 +771,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
 
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("Found %d probe_trace_events.\n", ntevs);
-               ret = post_process_probe_trace_events(*tevs, ntevs,
+               ret = post_process_probe_trace_events(pev, *tevs, ntevs,
                                                pev->target, pev->uprobes);
                if (ret < 0 || ret == ntevs) {
                        clear_probe_trace_events(*tevs, ntevs);
@@ -2936,8 +2958,6 @@ errout:
        return err;
 }
 
-bool __weak arch__prefers_symtab(void) { return false; }
-
 /* Concatinate two arrays */
 static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
 {
@@ -3158,12 +3178,6 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
        if (ret > 0 || pev->sdt)        /* SDT can be found only in the cache */
                return ret == 0 ? -ENOENT : ret; /* Found in probe cache */
 
-       if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
-               ret = find_probe_trace_events_from_map(pev, tevs);
-               if (ret > 0)
-                       return ret; /* Found in symbol table */
-       }
-
        /* Convert perf_probe_event with debuginfo */
        ret = try_to_find_probe_trace_events(pev, tevs);
        if (ret != 0)
index e18ea9fe63857cb7a9b382dac563fd2c8cdfbd85..f4f45db77c1c1ec59c3ee505f525f2b2561530ee 100644 (file)
@@ -158,7 +158,6 @@ int show_line_range(struct line_range *lr, const char *module, bool user);
 int show_available_vars(struct perf_probe_event *pevs, int npevs,
                        struct strfilter *filter);
 int show_available_funcs(const char *module, struct strfilter *filter, bool user);
-bool arch__prefers_symtab(void);
 void arch__fix_tev_from_maps(struct perf_probe_event *pev,
                             struct probe_trace_event *tev, struct map *map,
                             struct symbol *sym);
@@ -173,4 +172,9 @@ int e_snprintf(char *str, size_t size, const char *format, ...)
 int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
                            struct perf_probe_arg *pvar);
 
+struct map *get_target_map(const char *target, bool user);
+
+void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+                                          int ntevs);
+
 #endif /*_PROBE_EVENT_H */
index f2d9ff064e2de720247e77f9645faf94b6b4ce57..5c290c682afe7176607fe01f4d29742b1821f1a9 100644 (file)
@@ -297,10 +297,13 @@ static int convert_variable_type(Dwarf_Die *vr_die,
        char sbuf[STRERR_BUFSIZE];
        int bsize, boffs, total;
        int ret;
+       char sign;
 
        /* TODO: check all types */
-       if (cast && strcmp(cast, "string") != 0) {
+       if (cast && strcmp(cast, "string") != 0 &&
+           strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
                /* Non string type is OK */
+               /* and respect signedness cast */
                tvar->type = strdup(cast);
                return (tvar->type == NULL) ? -ENOMEM : 0;
        }
@@ -361,6 +364,13 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                return (tvar->type == NULL) ? -ENOMEM : 0;
        }
 
+       if (cast && (strcmp(cast, "u") == 0))
+               sign = 'u';
+       else if (cast && (strcmp(cast, "s") == 0))
+               sign = 's';
+       else
+               sign = die_is_signed_type(&type) ? 's' : 'u';
+
        ret = dwarf_bytesize(&type);
        if (ret <= 0)
                /* No size ... try to use default type */
@@ -373,8 +383,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                        dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
                ret = MAX_BASIC_TYPE_BITS;
        }
-       ret = snprintf(buf, 16, "%c%d",
-                      die_is_signed_type(&type) ? 's' : 'u', ret);
+       ret = snprintf(buf, 16, "%c%d", sign, ret);
 
 formatted:
        if (ret < 0 || ret >= 16) {
index 947d21f3839838c433430b01fe52165522f87295..3d3cb8392c86029bb488f737564730e0cd8995bc 100644 (file)
@@ -588,7 +588,11 @@ static char *get_trace_output(struct hist_entry *he)
        } else {
                pevent_event_info(&seq, evsel->tp_format, &rec);
        }
-       return seq.buffer;
+       /*
+        * Trim the buffer, it starts at 4KB and we're not going to
+        * add anything more to this buffer.
+        */
+       return realloc(seq.buffer, seq.len + 1);
 }
 
 static int64_t
index 5404efa578a3fcea18ce5bbab2a991e0c3d98b73..dd48f421844c7902773526d8c0845109ab9c5e55 100644 (file)
@@ -13,6 +13,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
 #include <linux/libnvdimm.h>
 #include <linux/vmalloc.h>
 #include <linux/device.h>
@@ -1474,6 +1475,7 @@ static int nfit_test_probe(struct platform_device *pdev)
        if (nfit_test->setup != nfit_test0_setup)
                return 0;
 
+       flush_work(&acpi_desc->work);
        nfit_test->setup_hotplug = 1;
        nfit_test->setup(nfit_test);
 
index 3c40c9d0e6c70a87b83c9d92a61f0fc0f2f4570c..1cc6d64c39b709dd28f104ced47829d7427c435f 100644 (file)
@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc)
 
 GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
 
-CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
+CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
 
 export CFLAGS
 
index fb4b0a79a9502ba9bcaad7acfdbbf692c5ed5208..83777c1cbae0693c14e1d3f23df927c5cae25fef 100644 (file)
@@ -73,12 +73,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
        int i, vcpu_lock_idx = -1, ret;
        struct kvm_vcpu *vcpu;
 
-       mutex_lock(&kvm->lock);
-
-       if (irqchip_in_kernel(kvm)) {
-               ret = -EEXIST;
-               goto out;
-       }
+       if (irqchip_in_kernel(kvm))
+               return -EEXIST;
 
        /*
         * This function is also called by the KVM_CREATE_IRQCHIP handler,
@@ -87,10 +83,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
         * the proper checks already.
         */
        if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
-               !kvm_vgic_global_state.can_emulate_gicv2) {
-               ret = -ENODEV;
-               goto out;
-       }
+               !kvm_vgic_global_state.can_emulate_gicv2)
+               return -ENODEV;
 
        /*
         * Any time a vcpu is run, vcpu_load is called which tries to grab the
@@ -138,9 +132,6 @@ out_unlock:
                vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
                mutex_unlock(&vcpu->mutex);
        }
-
-out:
-       mutex_unlock(&kvm->lock);
        return ret;
 }
 
index cc081ccfcaa3743ca5e0f46dd350bda36bc3ec72..195078225aa5d0c3b3214fd40e0b5f441c5d7518 100644 (file)
@@ -696,6 +696,11 @@ static void kvm_destroy_devices(struct kvm *kvm)
 {
        struct kvm_device *dev, *tmp;
 
+       /*
+        * We do not need to take the kvm->lock here, because nobody else
+        * has a reference to the struct kvm at this point and therefore
+        * cannot access the devices list anyhow.
+        */
        list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
                list_del(&dev->vm_node);
                dev->ops->destroy(dev);
@@ -2832,19 +2837,28 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        dev->ops = ops;
        dev->kvm = kvm;
 
+       mutex_lock(&kvm->lock);
        ret = ops->create(dev, cd->type);
        if (ret < 0) {
+               mutex_unlock(&kvm->lock);
                kfree(dev);
                return ret;
        }
+       list_add(&dev->vm_node, &kvm->devices);
+       mutex_unlock(&kvm->lock);
+
+       if (ops->init)
+               ops->init(dev);
 
        ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
        if (ret < 0) {
                ops->destroy(dev);
+               mutex_lock(&kvm->lock);
+               list_del(&dev->vm_node);
+               mutex_unlock(&kvm->lock);
                return ret;
        }
 
-       list_add(&dev->vm_node, &kvm->devices);
        kvm_get_kvm(kvm);
        cd->fd = ret;
        return 0;