Merge tag 'char-misc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 20:47:50 +0000 (13:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Mar 2016 20:47:50 +0000 (13:47 -0700)
Pull char/misc updates from Greg KH:
 "Here is the big char/misc driver update for 4.6-rc1.

  The majority of the patches here is hwtracing and some new mic
  drivers, but there's a lot of other driver updates as well.  Full
  details in the shortlog.

  All have been in linux-next for a while with no reported issues"

* tag 'char-misc-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (238 commits)
  goldfish: Fix build error of missing ioremap on UM
  nvmem: mediatek: Fix later provider initialization
  nvmem: imx-ocotp: Fix return value of imx_ocotp_read
  nvmem: Fix dependencies for !HAS_IOMEM archs
  char: genrtc: replace blacklist with whitelist
  drivers/hwtracing: make coresight-etm-perf.c explicitly non-modular
  drivers: char: mem: fix IS_ERROR_VALUE usage
  char: xillybus: Fix internal data structure initialization
  pch_phub: return -ENODATA if ROM can't be mapped
  Drivers: hv: vmbus: Support kexec on ws2012 r2 and above
  Drivers: hv: vmbus: Support handling messages on multiple CPUs
  Drivers: hv: utils: Remove util transport handler from list if registration fails
  Drivers: hv: util: Pass the channel information during the init call
  Drivers: hv: vmbus: avoid unneeded compiler optimizations in vmbus_wait_for_unload()
  Drivers: hv: vmbus: remove code duplication in message handling
  Drivers: hv: vmbus: avoid wait_for_completion() on crash
  Drivers: hv: vmbus: don't loose HVMSG_TIMER_EXPIRED messages
  misc: at24: replace memory_accessor with nvmem_device_read
  eeprom: 93xx46: extend driver to plug into the NVMEM framework
  eeprom: at25: extend driver to plug into the NVMEM framework
  ...

184 files changed:
Documentation/ABI/stable/sysfs-bus-vmbus
Documentation/devicetree/bindings/goldfish/pipe.txt [new file with mode: 0644]
Documentation/devicetree/bindings/misc/eeprom-93xx46.txt [new file with mode: 0644]
Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt [new file with mode: 0644]
Documentation/devicetree/bindings/nvmem/mtk-efuse.txt [new file with mode: 0644]
Documentation/mic/mic_overview.txt
Documentation/mic/mpssd/mpss
Documentation/mic/mpssd/mpssd.c
Documentation/misc-devices/mei/mei.txt
MAINTAINERS
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/common.c
drivers/android/binder.c
drivers/base/firmware_class.c
drivers/char/Kconfig
drivers/char/mem.c
drivers/char/nvram.c
drivers/char/nwbutton.c
drivers/char/ppdev.c
drivers/char/raw.c
drivers/char/xillybus/xillybus_core.c
drivers/extcon/extcon-arizona.c
drivers/extcon/extcon-gpio.c
drivers/extcon/extcon-max14577.c
drivers/extcon/extcon-max77693.c
drivers/extcon/extcon-max77843.c
drivers/extcon/extcon-max8997.c
drivers/extcon/extcon-palmas.c
drivers/extcon/extcon-rt8973a.c
drivers/extcon/extcon-sm5502.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/connection.c
drivers/hv/hv.c
drivers/hv/hv_fcopy.c
drivers/hv/hv_kvp.c
drivers/hv/hv_snapshot.c
drivers/hv/hv_util.c
drivers/hv/hv_utils_transport.c
drivers/hv/hyperv_vmbus.h
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/hwtracing/coresight/Kconfig
drivers/hwtracing/coresight/Makefile
drivers/hwtracing/coresight/coresight-etb10.c
drivers/hwtracing/coresight/coresight-etm-perf.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm-perf.h [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm.h
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c [new file with mode: 0644]
drivers/hwtracing/coresight/coresight-etm3x.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/coresight/coresight-funnel.c
drivers/hwtracing/coresight/coresight-priv.h
drivers/hwtracing/coresight/coresight-replicator-qcom.c
drivers/hwtracing/coresight/coresight-replicator.c
drivers/hwtracing/coresight/coresight-tmc.c
drivers/hwtracing/coresight/coresight-tpiu.c
drivers/hwtracing/coresight/coresight.c
drivers/hwtracing/coresight/of_coresight.c
drivers/hwtracing/intel_th/Kconfig
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/gth.c
drivers/hwtracing/intel_th/gth.h
drivers/hwtracing/intel_th/intel_th.h
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/intel_th/sth.c
drivers/hwtracing/stm/Kconfig
drivers/hwtracing/stm/Makefile
drivers/hwtracing/stm/core.c
drivers/hwtracing/stm/dummy_stm.c
drivers/hwtracing/stm/heartbeat.c [new file with mode: 0644]
drivers/hwtracing/stm/policy.c
drivers/hwtracing/stm/stm.h
drivers/misc/Kconfig
drivers/misc/ad525x_dpot.c
drivers/misc/apds990x.c
drivers/misc/arm-charlcd.c
drivers/misc/bh1770glc.c
drivers/misc/c2port/core.c
drivers/misc/cxl/sysfs.c
drivers/misc/eeprom/Kconfig
drivers/misc/eeprom/at24.c
drivers/misc/eeprom/at25.c
drivers/misc/eeprom/eeprom.c
drivers/misc/eeprom/eeprom_93xx46.c
drivers/misc/genwqe/card_sysfs.c
drivers/misc/ibmasm/ibmasm.h
drivers/misc/lis3lv02d/lis3lv02d_i2c.c
drivers/misc/lkdtm.c
drivers/misc/mei/Kconfig
drivers/misc/mei/Makefile
drivers/misc/mei/amthif.c
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/client.h
drivers/misc/mei/debugfs.c
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-txe.c
drivers/misc/mei/hw.h
drivers/misc/mei/init.c
drivers/misc/mei/interrupt.c
drivers/misc/mei/main.c
drivers/misc/mei/mei-trace.c
drivers/misc/mei/mei-trace.h
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/mei/pci-txe.c
drivers/misc/mei/wd.c [deleted file]
drivers/misc/mic/Kconfig
drivers/misc/mic/Makefile
drivers/misc/mic/bus/Makefile
drivers/misc/mic/bus/cosm_bus.h
drivers/misc/mic/bus/vop_bus.c [new file with mode: 0644]
drivers/misc/mic/bus/vop_bus.h [new file with mode: 0644]
drivers/misc/mic/card/Makefile
drivers/misc/mic/card/mic_device.c
drivers/misc/mic/card/mic_device.h
drivers/misc/mic/card/mic_virtio.c [deleted file]
drivers/misc/mic/card/mic_virtio.h [deleted file]
drivers/misc/mic/card/mic_x100.c
drivers/misc/mic/cosm/cosm_main.c
drivers/misc/mic/host/Makefile
drivers/misc/mic/host/mic_boot.c
drivers/misc/mic/host/mic_debugfs.c
drivers/misc/mic/host/mic_device.h
drivers/misc/mic/host/mic_fops.c [deleted file]
drivers/misc/mic/host/mic_fops.h [deleted file]
drivers/misc/mic/host/mic_main.c
drivers/misc/mic/host/mic_virtio.c [deleted file]
drivers/misc/mic/host/mic_virtio.h [deleted file]
drivers/misc/mic/host/mic_x100.c
drivers/misc/mic/scif/scif_dma.c
drivers/misc/mic/scif/scif_rma.c
drivers/misc/mic/vop/Makefile [new file with mode: 0644]
drivers/misc/mic/vop/vop_debugfs.c [new file with mode: 0644]
drivers/misc/mic/vop/vop_main.c [new file with mode: 0644]
drivers/misc/mic/vop/vop_main.h [new file with mode: 0644]
drivers/misc/mic/vop/vop_vringh.c [new file with mode: 0644]
drivers/misc/pch_phub.c
drivers/misc/ti-st/st_core.c
drivers/misc/vmw_vmci/vmci_driver.c
drivers/nvmem/Kconfig
drivers/nvmem/Makefile
drivers/nvmem/core.c
drivers/nvmem/imx-ocotp.c
drivers/nvmem/lpc18xx_eeprom.c [new file with mode: 0644]
drivers/nvmem/mtk-efuse.c [new file with mode: 0644]
drivers/nvmem/rockchip-efuse.c
drivers/nvmem/sunxi_sid.c
drivers/platform/Kconfig
drivers/platform/goldfish/Kconfig
drivers/platform/goldfish/Makefile
drivers/platform/goldfish/goldfish_pipe.c
drivers/spmi/spmi-pmic-arb.c
drivers/staging/goldfish/goldfish_audio.c
drivers/staging/goldfish/goldfish_nand.c
drivers/vme/bridges/vme_ca91cx42.c
drivers/w1/masters/omap_hdq.c
drivers/w1/w1.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/mei_wdt.c [new file with mode: 0644]
fs/compat_ioctl.c
include/linux/amba/bus.h
include/linux/coresight-pmu.h [new file with mode: 0644]
include/linux/coresight.h
include/linux/davinci_emac.h
include/linux/eeprom_93xx46.h
include/linux/hyperv.h
include/linux/memory.h
include/linux/mfd/palmas.h
include/linux/nvmem-provider.h
include/linux/platform_data/at24.h
include/linux/spi/eeprom.h
include/linux/stm.h
include/linux/vmw_vmci_defs.h
lib/devres.c
scripts/ver_linux
tools/hv/Makefile

index 636e938d5e33a4e9331a328a05a6b93a0b538e60..5d0125f7bcaf145088359bed69a2ba2cce07fa97 100644 (file)
@@ -27,3 +27,17 @@ Description: The mapping of which primary/sub channels are bound to which
                Virtual Processors.
                Format: <channel's child_relid:the bound cpu's number>
 Users:         tools/hv/lsvmbus
+
+What:          /sys/bus/vmbus/devices/vmbus_*/device
+Date:          Dec. 2015
+KernelVersion: 4.5
+Contact:       K. Y. Srinivasan <kys@microsoft.com>
+Description:   The 16 bit device ID of the device
+Users:         tools/hv/lsvmbus and user level RDMA libraries
+
+What:          /sys/bus/vmbus/devices/vmbus_*/vendor
+Date:          Dec. 2015
+KernelVersion: 4.5
+Contact:       K. Y. Srinivasan <kys@microsoft.com>
+Description:   The 16 bit vendor ID of the device
+Users:         tools/hv/lsvmbus and user level RDMA libraries
diff --git a/Documentation/devicetree/bindings/goldfish/pipe.txt b/Documentation/devicetree/bindings/goldfish/pipe.txt
new file mode 100644 (file)
index 0000000..e417a31
--- /dev/null
@@ -0,0 +1,17 @@
+Android Goldfish QEMU Pipe
+
+Andorid pipe virtual device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,android-pipe" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+       android_pipe@a010000 {
+               compatible = "google,android-pipe";
+               reg = <ff018000 0x2000>;
+               interrupts = <0x12>;
+       };
diff --git a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
new file mode 100644 (file)
index 0000000..a8ebb46
--- /dev/null
@@ -0,0 +1,25 @@
+EEPROMs (SPI) compatible with Microchip Technology 93xx46 family.
+
+Required properties:
+- compatible : shall be one of:
+    "atmel,at93c46d"
+    "eeprom-93xx46"
+- data-size : number of data bits per word (either 8 or 16)
+
+Optional properties:
+- read-only : parameter-less property which disables writes to the EEPROM
+- select-gpios : if present, specifies the GPIO that will be asserted prior to
+  each access to the EEPROM (e.g. for SPI bus multiplexing)
+
+Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
+apply.  In particular, "reg" and "spi-max-frequency" properties must be given.
+
+Example:
+       eeprom@0 {
+               compatible = "eeprom-93xx46";
+               reg = <0>;
+               spi-max-frequency = <1000000>;
+               spi-cs-high;
+               data-size = <8>;
+               select-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>;
+       };
diff --git a/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt b/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt
new file mode 100644 (file)
index 0000000..809df68
--- /dev/null
@@ -0,0 +1,28 @@
+* NXP LPC18xx EEPROM memory NVMEM driver
+
+Required properties:
+  - compatible: Should be "nxp,lpc1857-eeprom"
+  - reg: Must contain an entry with the physical base address and length
+    for each entry in reg-names.
+  - reg-names: Must include the following entries.
+    - reg: EEPROM registers.
+    - mem: EEPROM address space.
+  - clocks: Must contain an entry for each entry in clock-names.
+  - clock-names: Must include the following entries.
+    - eeprom: EEPROM operating clock.
+  - resets: Should contain a reference to the reset controller asserting
+    the EEPROM in reset.
+  - interrupts: Should contain EEPROM interrupt.
+
+Example:
+
+  eeprom: eeprom@4000e000 {
+    compatible = "nxp,lpc1857-eeprom";
+    reg = <0x4000e000 0x1000>,
+          <0x20040000 0x4000>;
+    reg-names = "reg", "mem";
+    clocks = <&ccu1 CLK_CPU_EEPROM>;
+    clock-names = "eeprom";
+    resets = <&rgu 27>;
+    interrupts = <4>;
+  };
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
new file mode 100644 (file)
index 0000000..74cf529
--- /dev/null
@@ -0,0 +1,36 @@
+= Mediatek MTK-EFUSE device tree bindings =
+
+This binding is intended to represent MTK-EFUSE which is found in most Mediatek SOCs.
+
+Required properties:
+- compatible: should be "mediatek,mt8173-efuse" or "mediatek,efuse"
+- reg: Should contain registers location and length
+
+= Data cells =
+Are child nodes of MTK-EFUSE, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+       efuse: efuse@10206000 {
+               compatible = "mediatek,mt8173-efuse";
+               reg        = <0 0x10206000 0 0x1000>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* Data cells */
+               thermal_calibration: calib@528 {
+                       reg = <0x528 0xc>;
+               };
+       };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+
+       thermal {
+               ...
+               nvmem-cells = <&thermal_calibration>;
+               nvmem-cell-names = "calibration";
+       };
index 73f44fc3e71569f98bf14283239af7d1f310c9d9..074adbdf83a44205cf9378a953865df791d1e8fb 100644 (file)
@@ -12,10 +12,19 @@ for the X100 devices.
 
 Since it is a PCIe card, it does not have the ability to host hardware
 devices for networking, storage and console. We provide these devices
-on X100 coprocessors thus enabling a self-bootable equivalent environment
-for applications. A key benefit of our solution is that it leverages
-the standard virtio framework for network, disk and console devices,
-though in our case the virtio framework is used across a PCIe bus.
+on X100 coprocessors thus enabling a self-bootable equivalent
+environment for applications. A key benefit of our solution is that it
+leverages the standard virtio framework for network, disk and console
+devices, though in our case the virtio framework is used across a PCIe
+bus. A Virtio Over PCIe (VOP) driver allows creating user space
+backends or devices on the host which are used to probe virtio drivers
+for these devices on the MIC card. The existing VRINGH infrastructure
+in the kernel is used to access virtio rings from the host. The card
+VOP driver allows card virtio drivers to communicate with their user
+space backends on the host via a device page. Ring 3 apps on the host
+can add, remove and configure virtio devices. A thin MIC specific
+virtio_config_ops is implemented which is borrowed heavily from
+previous similar implementations in lguest and s390.
 
 MIC PCIe card has a dma controller with 8 channels. These channels are
 shared between the host s/w and the card s/w. 0 to 3 are used by host
@@ -38,7 +47,6 @@ single threaded performance for the host compared to MIC, the ability of
 the host to initiate DMA's to/from the card using the MIC DMA engine and
 the fact that the virtio block storage backend can only be on the host.
 
-                                      |
                +----------+           |             +----------+
                | Card OS  |           |             | Host OS  |
                +----------+           |             +----------+
@@ -47,27 +55,25 @@ the fact that the virtio block storage backend can only be on the host.
         | Virtio| |Virtio  | |Virtio| | |Virtio   |  |Virtio  | |Virtio  |
         | Net   | |Console | |Block | | |Net      |  |Console | |Block   |
         | Driver| |Driver  | |Driver| | |backend  |  |backend | |backend |
-        +-------+ +--------+ +------+ | +---------+  +--------+ +--------+
+        +---+---+ +---+----+ +--+---+ | +---------+  +----+---+ +--------+
             |         |         |     |      |            |         |
             |         |         |     |User  |            |         |
-            |         |         |     |------|------------|---------|-------
-            +-------------------+     |Kernel +--------------------------+
-                      |               |       | Virtio over PCIe IOCTLs  |
-                      |               |       +--------------------------+
-+-----------+         |               |                   |  +-----------+
-| MIC DMA   |         |      +------+ | +------+ +------+ |  | MIC DMA   |
-| Driver    |         |      | SCIF | | | SCIF | | COSM | |  | Driver    |
-+-----------+         |      +------+ | +------+ +--+---+ |  +-----------+
-      |               |         |     |    |        |     |        |
-+---------------+     |      +------+ | +--+---+ +--+---+ | +----------------+
-|MIC virtual Bus|     |      |SCIF  | | |SCIF  | | COSM | | |MIC virtual Bus |
-+---------------+     |      |HW Bus| | |HW Bus| | Bus  | | +----------------+
-      |               |      +------+ | +--+---+ +------+ |              |
-      |               |         |     |       |     |     |              |
-      |   +-----------+---+     |     |       |    +---------------+     |
-      |   |Intel MIC      |     |     |       |    |Intel MIC      |     |
-      +---|Card Driver    |     |     |       |    |Host Driver    |     |
-          +------------+--------+     |       +----+---------------+-----+
+            |         |         |     |------|------------|--+------|-------
+            +---------+---------+     |Kernel                |
+                      |               |                      |
+  +---------+     +---+----+ +------+ | +------+ +------+ +--+---+  +-------+
+  |MIC DMA  |     |  VOP   | | SCIF | | | SCIF | | COSM | | VOP  |  |MIC DMA|
+  +---+-----+     +---+----+ +--+---+ | +--+---+ +--+---+ +------+  +----+--+
+      |               |         |     |    |        |                    |
+  +---+-----+     +---+----+ +--+---+ | +--+---+ +--+---+ +------+  +----+--+
+  |MIC      |     |  VOP   | |SCIF  | | |SCIF  | | COSM | | VOP  |  | MIC   |
+  |HW Bus   |     |  HW Bus| |HW Bus| | |HW Bus| | Bus  | |HW Bus|  |HW Bus |
+  +---------+     +--------+ +--+---+ | +--+---+ +------+ +------+  +-------+
+      |               |         |     |       |     |                    |
+      |   +-----------+--+      |     |       |    +---------------+     |
+      |   |Intel MIC     |      |     |       |    |Intel MIC      |     |
+      |   |Card Driver   |      |     |       |    |Host Driver    |     |
+      +---+--------------+------+     |       +----+---------------+-----+
                  |                    |                   |
              +-------------------------------------------------------------+
              |                                                             |
index 09ea90931649c68263aac41af827c9e329a9460e..5fcf9fa4b082184c6502bd8afb8fcba1d94dcf11 100755 (executable)
@@ -35,7 +35,7 @@
 
 exec=/usr/sbin/mpssd
 sysfs="/sys/class/mic"
-mic_modules="mic_host mic_x100_dma scif"
+mic_modules="mic_host mic_x100_dma scif vop"
 
 start()
 {
index 7ce1e53568df5c0dde85b5fe3ec6c33c2de0f380..30fb842a976d3ed3b9b9e1cb0bf5e1f64640dcc4 100644 (file)
@@ -926,7 +926,7 @@ add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd)
        char path[PATH_MAX];
        int fd, err;
 
-       snprintf(path, PATH_MAX, "/dev/mic%d", mic->id);
+       snprintf(path, PATH_MAX, "/dev/vop_virtio%d", mic->id);
        fd = open(path, O_RDWR);
        if (fd < 0) {
                mpsslog("Could not open %s %s\n", path, strerror(errno));
index 91c1fa34f48b4a5a662bb14b2d422437795fd474..2b80a0cd621f09554fb74f2dc100787f76a6176b 100644 (file)
@@ -231,15 +231,15 @@ IT knows when a platform crashes even when there is a hard failure on the host.
 The Intel AMT Watchdog is composed of two parts:
        1) Firmware feature - receives the heartbeats
           and sends an event when the heartbeats stop.
-       2) Intel MEI driver - connects to the watchdog feature, configures the
-          watchdog and sends the heartbeats.
+       2) Intel MEI iAMT watchdog driver - connects to the watchdog feature,
+          configures the watchdog and sends the heartbeats.
 
-The Intel MEI driver uses the kernel watchdog API to configure the Intel AMT
-Watchdog and to send heartbeats to it. The default timeout of the
+The Intel iAMT watchdog MEI driver uses the kernel watchdog API to configure
+the Intel AMT Watchdog and to send heartbeats to it. The default timeout of the
 watchdog is 120 seconds.
 
-If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
-the Intel MEI driver will disable the sending of heartbeats.
+If the Intel AMT is not enabled in the firmware then the watchdog client won't enumerate
+on the me client bus and watchdog devices won't be exposed.
 
 
 Supported Chipsets
index f3c688f87abb9c3317f023f893e12dbe1650ad62..543dd219de80350b45907a59635e073d02387584 100644 (file)
@@ -5765,6 +5765,7 @@ S:        Supported
 F:     include/uapi/linux/mei.h
 F:     include/linux/mei_cl_bus.h
 F:     drivers/misc/mei/*
+F:     drivers/watchdog/mei_wdt.c
 F:     Documentation/misc-devices/mei/*
 
 INTEL MIC DRIVERS (mic)
@@ -6598,6 +6599,11 @@ F:       samples/livepatch/
 L:     live-patching@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
 
+LINUX KERNEL DUMP TEST MODULE (LKDTM)
+M:     Kees Cook <keescook@chromium.org>
+S:     Maintained
+F:     drivers/misc/lkdtm.c
+
 LLC (802.2)
 M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 S:     Maintained
index a0986c65be0cfa71856947c58f57abe8df61c7fd..592e65c3a4e0cd4c2116537c84afc0cabe79cd61 100644 (file)
                extcon_usb2: tps659038_usb {
                        compatible = "ti,palmas-usb-vid";
                        ti,enable-vbus-detection;
-                       ti,enable-id-detection;
-                       id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>;
+                       vbus-gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
                };
 
        };
index de1316bf643a56fc3689a830ad04b4f1b69206f1..62ebac51bab93c0d40edaf5358e46088ee5ebb9a 100644 (file)
@@ -115,13 +115,14 @@ static void mityomapl138_cpufreq_init(const char *partnum)
 static void mityomapl138_cpufreq_init(const char *partnum) { }
 #endif
 
-static void read_factory_config(struct memory_accessor *a, void *context)
+static void read_factory_config(struct nvmem_device *nvmem, void *context)
 {
        int ret;
        const char *partnum = NULL;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
 
-       ret = a->read(a, (char *)&factory_config, 0, sizeof(factory_config));
+       ret = nvmem_device_read(nvmem, 0, sizeof(factory_config),
+                               &factory_config);
        if (ret != sizeof(struct factory_config)) {
                pr_warn("Read Factory Config Failed: %d\n", ret);
                goto bad_config;
index a794f6d9d4440adbeebd8bb28a657999b37fafd5..f55ef2ef2f92eb88c9c7485e5bfda537af126d31 100644 (file)
@@ -28,13 +28,13 @@ EXPORT_SYMBOL(davinci_soc_info);
 void __iomem *davinci_intc_base;
 int davinci_intc_type;
 
-void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context)
+void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
 {
        char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
        off_t offset = (off_t)context;
 
        /* Read MAC addr from EEPROM */
-       if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN)
+       if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
                pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
 }
 
index 7d00b7a015ead0182feaace6231a8d7fad719b64..57f52a2afa356788d4ee18f02867361da2b20a26 100644 (file)
@@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc,
        struct binder_transaction *t;
        struct binder_work *tcomplete;
        binder_size_t *offp, *off_end;
+       binder_size_t off_min;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
@@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc,
                goto err_bad_offset;
        }
        off_end = (void *)offp + tr->offsets_size;
+       off_min = 0;
        for (; offp < off_end; offp++) {
                struct flat_binder_object *fp;
 
                if (*offp > t->buffer->data_size - sizeof(*fp) ||
+                   *offp < off_min ||
                    t->buffer->data_size < sizeof(*fp) ||
                    !IS_ALIGNED(*offp, sizeof(u32))) {
-                       binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
-                                         proc->pid, thread->pid, (u64)*offp);
+                       binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+                                         proc->pid, thread->pid, (u64)*offp,
+                                         (u64)off_min,
+                                         (u64)(t->buffer->data_size -
+                                         sizeof(*fp)));
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_offset;
                }
                fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+               off_min = *offp + sizeof(struct flat_binder_object);
                switch (fp->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
@@ -3593,13 +3600,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
 
 static int binder_proc_show(struct seq_file *m, void *unused)
 {
+       struct binder_proc *itr;
        struct binder_proc *proc = m->private;
        int do_lock = !binder_debug_no_lock;
+       bool valid_proc = false;
 
        if (do_lock)
                binder_lock(__func__);
-       seq_puts(m, "binder proc state:\n");
-       print_binder_proc(m, proc, 1);
+
+       hlist_for_each_entry(itr, &binder_procs, proc_node) {
+               if (itr == proc) {
+                       valid_proc = true;
+                       break;
+               }
+       }
+       if (valid_proc) {
+               seq_puts(m, "binder proc state:\n");
+               print_binder_proc(m, proc, 1);
+       }
        if (do_lock)
                binder_unlock(__func__);
        return 0;
index f3f7215ad378d6a971fa643fe3a656ee064fe5fa..773fc30997697711c2ff926ab49ed88d272cdbcc 100644 (file)
@@ -258,7 +258,7 @@ static void __fw_free_buf(struct kref *ref)
                vunmap(buf->data);
                for (i = 0; i < buf->nr_pages; i++)
                        __free_page(buf->pages[i]);
-               kfree(buf->pages);
+               vfree(buf->pages);
        } else
 #endif
                vfree(buf->data);
@@ -635,7 +635,7 @@ static ssize_t firmware_loading_store(struct device *dev,
                if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
                        for (i = 0; i < fw_buf->nr_pages; i++)
                                __free_page(fw_buf->pages[i]);
-                       kfree(fw_buf->pages);
+                       vfree(fw_buf->pages);
                        fw_buf->pages = NULL;
                        fw_buf->page_array_size = 0;
                        fw_buf->nr_pages = 0;
@@ -746,8 +746,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
                                         buf->page_array_size * 2);
                struct page **new_pages;
 
-               new_pages = kmalloc(new_array_size * sizeof(void *),
-                                   GFP_KERNEL);
+               new_pages = vmalloc(new_array_size * sizeof(void *));
                if (!new_pages) {
                        fw_load_abort(fw_priv);
                        return -ENOMEM;
@@ -756,7 +755,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
                       buf->page_array_size * sizeof(void *));
                memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
                       (new_array_size - buf->page_array_size));
-               kfree(buf->pages);
+               vfree(buf->pages);
                buf->pages = new_pages;
                buf->page_array_size = new_array_size;
        }
index a043107da2af775d567126174107c23ff89a5afb..3ec0766ed5e97a80390f3e8dcbe009c4414c9cf2 100644 (file)
@@ -328,7 +328,8 @@ config JS_RTC
 
 config GEN_RTC
        tristate "Generic /dev/rtc emulation"
-       depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML
+       depends on RTC!=y
+       depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86
        ---help---
          If you say Y here and create a character special file /dev/rtc with
          major number 10 and minor number 135 using mknod ("man mknod"), you
index 4f6f94c43412c0773e176cf8d67acd73fb6ca274..71025c2f6bbb072ff27b1d21a4fdca544b5a2e97 100644 (file)
@@ -695,7 +695,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
                offset += file->f_pos;
        case SEEK_SET:
                /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
-               if (IS_ERR_VALUE((unsigned long long)offset)) {
+               if ((unsigned long long)offset >= -MAX_ERRNO) {
                        ret = -EOVERFLOW;
                        break;
                }
index 01292328a45677d7da51e662c3c94dec2c5e970c..678fa97e41fbe5583a52ad474de8eaab1ed8aaab 100644 (file)
@@ -496,12 +496,12 @@ static void pc_set_checksum(void)
 
 #ifdef CONFIG_PROC_FS
 
-static char *floppy_types[] = {
+static const char * const floppy_types[] = {
        "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
        "3.5'' 2.88M", "3.5'' 2.88M"
 };
 
-static char *gfx_types[] = {
+static const char * const gfx_types[] = {
        "EGA, VGA, ... (with BIOS)",
        "CGA (40 cols)",
        "CGA (80 cols)",
@@ -602,7 +602,7 @@ static void atari_set_checksum(void)
 
 static struct {
        unsigned char val;
-       char *name;
+       const char *name;
 } boot_prefs[] = {
        { 0x80, "TOS" },
        { 0x40, "ASV" },
@@ -611,7 +611,7 @@ static struct {
        { 0x00, "unspecified" }
 };
 
-static char *languages[] = {
+static const char * const languages[] = {
        "English (US)",
        "German",
        "French",
@@ -623,7 +623,7 @@ static char *languages[] = {
        "Swiss (German)"
 };
 
-static char *dateformat[] = {
+static const char * const dateformat[] = {
        "MM%cDD%cYY",
        "DD%cMM%cYY",
        "YY%cMM%cDD",
@@ -634,7 +634,7 @@ static char *dateformat[] = {
        "7 (undefined)"
 };
 
-static char *colors[] = {
+static const char * const colors[] = {
        "2", "4", "16", "256", "65536", "??", "??", "??"
 };
 
index 76c490fa051103b99ba6b1006f7b8c2ed2a575a2..0e184426db98ed956c9d25bfb9566f608711980d 100644 (file)
@@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount)
 
 static void button_sequence_finished (unsigned long parameters)
 {
-#ifdef CONFIG_NWBUTTON_REBOOT          /* Reboot using button is enabled */
-       if (button_press_count == reboot_count)
+       if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+           button_press_count == reboot_count)
                kill_cad_pid(SIGINT, 1);        /* Ask init to reboot us */
-#endif /* CONFIG_NWBUTTON_REBOOT */
        button_consume_callbacks (button_press_count);
        bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
        button_press_count = 0;         /* Reset the button press counter */
index ae0b42b66e55e80405a91a6ad3783ab2f157bd48..d23368874710f726d485917e4d3f1ef93f3e3843 100644 (file)
 #include <linux/ppdev.h>
 #include <linux/mutex.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #define PP_VERSION "ppdev: user-space parallel port driver"
 #define CHRDEV "ppdev"
 
 struct pp_struct {
-       struct pardevice * pdev;
+       struct pardevice *pdev;
        wait_queue_head_t irq_wait;
        atomic_t irqc;
        unsigned int flags;
@@ -98,18 +99,26 @@ struct pp_struct {
 #define ROUND_UP(x,y) (((x)+(y)-1)/(y))
 
 static DEFINE_MUTEX(pp_do_mutex);
-static inline void pp_enable_irq (struct pp_struct *pp)
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32    _IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32    _IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64    _IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64    _IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
 {
        struct parport *port = pp->pdev->port;
-       port->ops->enable_irq (port);
+
+       port->ops->enable_irq(port);
 }
 
-static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
-                       loff_t * ppos)
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+                      loff_t *ppos)
 {
        unsigned int minor = iminor(file_inode(file));
        struct pp_struct *pp = file->private_data;
-       char * kbuffer;
+       char *kbuffer;
        ssize_t bytes_read = 0;
        struct parport *pport;
        int mode;
@@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
                return 0;
 
        kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
-       if (!kbuffer) {
+       if (!kbuffer)
                return -ENOMEM;
-       }
        pport = pp->pdev->port;
        mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
 
-       parport_set_timeout (pp->pdev,
-                            (file->f_flags & O_NONBLOCK) ?
-                            PARPORT_INACTIVITY_O_NONBLOCK :
-                            pp->default_inactivity);
+       parport_set_timeout(pp->pdev,
+                           (file->f_flags & O_NONBLOCK) ?
+                           PARPORT_INACTIVITY_O_NONBLOCK :
+                           pp->default_inactivity);
 
        while (bytes_read == 0) {
                ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
@@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
                        int flags = 0;
                        size_t (*fn)(struct parport *, void *, size_t, int);
 
-                       if (pp->flags & PP_W91284PIC) {
+                       if (pp->flags & PP_W91284PIC)
                                flags |= PARPORT_W91284PIC;
-                       }
-                       if (pp->flags & PP_FASTREAD) {
+                       if (pp->flags & PP_FASTREAD)
                                flags |= PARPORT_EPP_FAST;
-                       }
-                       if (pport->ieee1284.mode & IEEE1284_ADDR) {
+                       if (pport->ieee1284.mode & IEEE1284_ADDR)
                                fn = pport->ops->epp_read_addr;
-                       } else {
+                       else
                                fn = pport->ops->epp_read_data;
-                       }
                        bytes_read = (*fn)(pport, kbuffer, need, flags);
                } else {
-                       bytes_read = parport_read (pport, kbuffer, need);
+                       bytes_read = parport_read(pport, kbuffer, need);
                }
 
                if (bytes_read != 0)
@@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
                        break;
                }
 
-               if (signal_pending (current)) {
+               if (signal_pending(current)) {
                        bytes_read = -ERESTARTSYS;
                        break;
                }
@@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
                cond_resched();
        }
 
-       parport_set_timeout (pp->pdev, pp->default_inactivity);
+       parport_set_timeout(pp->pdev, pp->default_inactivity);
 
-       if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
+       if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
                bytes_read = -EFAULT;
 
-       kfree (kbuffer);
-       pp_enable_irq (pp);
+       kfree(kbuffer);
+       pp_enable_irq(pp);
        return bytes_read;
 }
 
-static ssize_t pp_write (struct file * file, const char __user * buf,
-                        size_t count, loff_t * ppos)
+static ssize_t pp_write(struct file *file, const char __user *buf,
+                       size_t count, loff_t *ppos)
 {
        unsigned int minor = iminor(file_inode(file));
        struct pp_struct *pp = file->private_data;
-       char * kbuffer;
+       char *kbuffer;
        ssize_t bytes_written = 0;
        ssize_t wrote;
        int mode;
@@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
        }
 
        kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
-       if (!kbuffer) {
+       if (!kbuffer)
                return -ENOMEM;
-       }
+
        pport = pp->pdev->port;
        mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
 
-       parport_set_timeout (pp->pdev,
-                            (file->f_flags & O_NONBLOCK) ?
-                            PARPORT_INACTIVITY_O_NONBLOCK :
-                            pp->default_inactivity);
+       parport_set_timeout(pp->pdev,
+                           (file->f_flags & O_NONBLOCK) ?
+                           PARPORT_INACTIVITY_O_NONBLOCK :
+                           pp->default_inactivity);
 
        while (bytes_written < count) {
                ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
 
-               if (copy_from_user (kbuffer, buf + bytes_written, n)) {
+               if (copy_from_user(kbuffer, buf + bytes_written, n)) {
                        bytes_written = -EFAULT;
                        break;
                }
@@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
                if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
                        /* do a fast EPP write */
                        if (pport->ieee1284.mode & IEEE1284_ADDR) {
-                               wrote = pport->ops->epp_write_addr (pport,
+                               wrote = pport->ops->epp_write_addr(pport,
                                        kbuffer, n, PARPORT_EPP_FAST);
                        } else {
-                               wrote = pport->ops->epp_write_data (pport,
+                               wrote = pport->ops->epp_write_data(pport,
                                        kbuffer, n, PARPORT_EPP_FAST);
                        }
                } else {
-                       wrote = parport_write (pp->pdev->port, kbuffer, n);
+                       wrote = parport_write(pp->pdev->port, kbuffer, n);
                }
 
                if (wrote <= 0) {
-                       if (!bytes_written) {
+                       if (!bytes_written)
                                bytes_written = wrote;
-                       }
                        break;
                }
 
@@ -251,67 +255,69 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
                        break;
                }
 
-               if (signal_pending (current))
+               if (signal_pending(current))
                        break;
 
                cond_resched();
        }
 
-       parport_set_timeout (pp->pdev, pp->default_inactivity);
+       parport_set_timeout(pp->pdev, pp->default_inactivity);
 
-       kfree (kbuffer);
-       pp_enable_irq (pp);
+       kfree(kbuffer);
+       pp_enable_irq(pp);
        return bytes_written;
 }
 
-static void pp_irq (void *private)
+static void pp_irq(void *private)
 {
        struct pp_struct *pp = private;
 
        if (pp->irqresponse) {
-               parport_write_control (pp->pdev->port, pp->irqctl);
+               parport_write_control(pp->pdev->port, pp->irqctl);
                pp->irqresponse = 0;
        }
 
-       atomic_inc (&pp->irqc);
-       wake_up_interruptible (&pp->irq_wait);
+       atomic_inc(&pp->irqc);
+       wake_up_interruptible(&pp->irq_wait);
 }
 
-static int register_device (int minor, struct pp_struct *pp)
+static int register_device(int minor, struct pp_struct *pp)
 {
        struct parport *port;
-       struct pardevice * pdev = NULL;
+       struct pardevice *pdev = NULL;
        char *name;
-       int fl;
+       struct pardev_cb ppdev_cb;
 
        name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
        if (name == NULL)
                return -ENOMEM;
 
-       port = parport_find_number (minor);
+       port = parport_find_number(minor);
        if (!port) {
-               printk (KERN_WARNING "%s: no associated port!\n", name);
-               kfree (name);
+               printk(KERN_WARNING "%s: no associated port!\n", name);
+               kfree(name);
                return -ENXIO;
        }
 
-       fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
-       pdev = parport_register_device (port, name, NULL,
-                                       NULL, pp_irq, fl, pp);
-       parport_put_port (port);
+       memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+       ppdev_cb.irq_func = pp_irq;
+       ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+       ppdev_cb.private = pp;
+       pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
+       parport_put_port(port);
 
        if (!pdev) {
-               printk (KERN_WARNING "%s: failed to register device!\n", name);
-               kfree (name);
+               printk(KERN_WARNING "%s: failed to register device!\n", name);
+               kfree(name);
                return -ENXIO;
        }
 
        pp->pdev = pdev;
-       pr_debug("%s: registered pardevice\n", name);
+       dev_dbg(&pdev->dev, "registered pardevice\n");
        return 0;
 }
 
-static enum ieee1284_phase init_phase (int mode)
+static enum ieee1284_phase init_phase(int mode)
 {
        switch (mode & ~(IEEE1284_DEVICEID
                         | IEEE1284_ADDR)) {
@@ -322,11 +328,27 @@ static enum ieee1284_phase init_phase (int mode)
        return IEEE1284_PH_FWD_IDLE;
 }
 
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+       long to_jiffies;
+
+       if ((tv_sec < 0) || (tv_usec < 0))
+               return -EINVAL;
+
+       to_jiffies = usecs_to_jiffies(tv_usec);
+       to_jiffies += tv_sec * HZ;
+       if (to_jiffies <= 0)
+               return -EINVAL;
+
+       pdev->timeout = to_jiffies;
+       return 0;
+}
+
 static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        unsigned int minor = iminor(file_inode(file));
        struct pp_struct *pp = file->private_data;
-       struct parport * port;
+       struct parport *port;
        void __user *argp = (void __user *)arg;
 
        /* First handle the cases that don't take arguments. */
@@ -337,19 +359,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                int ret;
 
                if (pp->flags & PP_CLAIMED) {
-                       pr_debug(CHRDEV "%x: you've already got it!\n", minor);
+                       dev_dbg(&pp->pdev->dev, "you've already got it!\n");
                        return -EINVAL;
                }
 
                /* Deferred device registration. */
                if (!pp->pdev) {
-                       int err = register_device (minor, pp);
-                       if (err) {
+                       int err = register_device(minor, pp);
+
+                       if (err)
                                return err;
-                       }
                }
 
-               ret = parport_claim_or_block (pp->pdev);
+               ret = parport_claim_or_block(pp->pdev);
                if (ret < 0)
                        return ret;
 
@@ -357,7 +379,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                /* For interrupt-reporting to work, we need to be
                 * informed of each interrupt. */
-               pp_enable_irq (pp);
+               pp_enable_irq(pp);
 
                /* We may need to fix up the state machine. */
                info = &pp->pdev->port->ieee1284;
@@ -365,15 +387,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                pp->saved_state.phase = info->phase;
                info->mode = pp->state.mode;
                info->phase = pp->state.phase;
-               pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
-               parport_set_timeout (pp->pdev, pp->default_inactivity);
+               pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+               parport_set_timeout(pp->pdev, pp->default_inactivity);
 
                return 0;
            }
        case PPEXCL:
                if (pp->pdev) {
-                       pr_debug(CHRDEV "%x: too late for PPEXCL; "
-                               "already registered\n", minor);
+                       dev_dbg(&pp->pdev->dev,
+                               "too late for PPEXCL; already registered\n");
                        if (pp->flags & PP_EXCL)
                                /* But it's not really an error. */
                                return 0;
@@ -388,11 +410,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case PPSETMODE:
            {
                int mode;
-               if (copy_from_user (&mode, argp, sizeof (mode)))
+
+               if (copy_from_user(&mode, argp, sizeof(mode)))
                        return -EFAULT;
                /* FIXME: validate mode */
                pp->state.mode = mode;
-               pp->state.phase = init_phase (mode);
+               pp->state.phase = init_phase(mode);
 
                if (pp->flags & PP_CLAIMED) {
                        pp->pdev->port->ieee1284.mode = mode;
@@ -405,28 +428,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
            {
                int mode;
 
-               if (pp->flags & PP_CLAIMED) {
+               if (pp->flags & PP_CLAIMED)
                        mode = pp->pdev->port->ieee1284.mode;
-               } else {
+               else
                        mode = pp->state.mode;
-               }
-               if (copy_to_user (argp, &mode, sizeof (mode))) {
+
+               if (copy_to_user(argp, &mode, sizeof(mode)))
                        return -EFAULT;
-               }
                return 0;
            }
        case PPSETPHASE:
            {
                int phase;
-               if (copy_from_user (&phase, argp, sizeof (phase))) {
+
+               if (copy_from_user(&phase, argp, sizeof(phase)))
                        return -EFAULT;
-               }
+
                /* FIXME: validate phase */
                pp->state.phase = phase;
 
-               if (pp->flags & PP_CLAIMED) {
+               if (pp->flags & PP_CLAIMED)
                        pp->pdev->port->ieee1284.phase = phase;
-               }
 
                return 0;
            }
@@ -434,38 +456,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
            {
                int phase;
 
-               if (pp->flags & PP_CLAIMED) {
+               if (pp->flags & PP_CLAIMED)
                        phase = pp->pdev->port->ieee1284.phase;
-               } else {
+               else
                        phase = pp->state.phase;
-               }
-               if (copy_to_user (argp, &phase, sizeof (phase))) {
+               if (copy_to_user(argp, &phase, sizeof(phase)))
                        return -EFAULT;
-               }
                return 0;
            }
        case PPGETMODES:
            {
                unsigned int modes;
 
-               port = parport_find_number (minor);
+               port = parport_find_number(minor);
                if (!port)
                        return -ENODEV;
 
                modes = port->modes;
                parport_put_port(port);
-               if (copy_to_user (argp, &modes, sizeof (modes))) {
+               if (copy_to_user(argp, &modes, sizeof(modes)))
                        return -EFAULT;
-               }
                return 0;
            }
        case PPSETFLAGS:
            {
                int uflags;
 
-               if (copy_from_user (&uflags, argp, sizeof (uflags))) {
+               if (copy_from_user(&uflags, argp, sizeof(uflags)))
                        return -EFAULT;
-               }
                pp->flags &= ~PP_FLAGMASK;
                pp->flags |= (uflags & PP_FLAGMASK);
                return 0;
@@ -475,9 +493,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                int uflags;
 
                uflags = pp->flags & PP_FLAGMASK;
-               if (copy_to_user (argp, &uflags, sizeof (uflags))) {
+               if (copy_to_user(argp, &uflags, sizeof(uflags)))
                        return -EFAULT;
-               }
                return 0;
            }
        }       /* end switch() */
@@ -495,27 +512,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                unsigned char reg;
                unsigned char mask;
                int mode;
+               s32 time32[2];
+               s64 time64[2];
+               struct timespec64 ts;
                int ret;
-               struct timeval par_timeout;
-               long to_jiffies;
 
        case PPRSTATUS:
-               reg = parport_read_status (port);
-               if (copy_to_user (argp, &reg, sizeof (reg)))
+               reg = parport_read_status(port);
+               if (copy_to_user(argp, &reg, sizeof(reg)))
                        return -EFAULT;
                return 0;
        case PPRDATA:
-               reg = parport_read_data (port);
-               if (copy_to_user (argp, &reg, sizeof (reg)))
+               reg = parport_read_data(port);
+               if (copy_to_user(argp, &reg, sizeof(reg)))
                        return -EFAULT;
                return 0;
        case PPRCONTROL:
-               reg = parport_read_control (port);
-               if (copy_to_user (argp, &reg, sizeof (reg)))
+               reg = parport_read_control(port);
+               if (copy_to_user(argp, &reg, sizeof(reg)))
                        return -EFAULT;
                return 0;
        case PPYIELD:
-               parport_yield_blocking (pp->pdev);
+               parport_yield_blocking(pp->pdev);
                return 0;
 
        case PPRELEASE:
@@ -525,45 +543,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                pp->state.phase = info->phase;
                info->mode = pp->saved_state.mode;
                info->phase = pp->saved_state.phase;
-               parport_release (pp->pdev);
+               parport_release(pp->pdev);
                pp->flags &= ~PP_CLAIMED;
                return 0;
 
        case PPWCONTROL:
-               if (copy_from_user (&reg, argp, sizeof (reg)))
+               if (copy_from_user(&reg, argp, sizeof(reg)))
                        return -EFAULT;
-               parport_write_control (port, reg);
+               parport_write_control(port, reg);
                return 0;
 
        case PPWDATA:
-               if (copy_from_user (&reg, argp, sizeof (reg)))
+               if (copy_from_user(&reg, argp, sizeof(reg)))
                        return -EFAULT;
-               parport_write_data (port, reg);
+               parport_write_data(port, reg);
                return 0;
 
        case PPFCONTROL:
-               if (copy_from_user (&mask, argp,
-                                   sizeof (mask)))
+               if (copy_from_user(&mask, argp,
+                                  sizeof(mask)))
                        return -EFAULT;
-               if (copy_from_user (&reg, 1 + (unsigned char __user *) arg,
-                                   sizeof (reg)))
+               if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+                                  sizeof(reg)))
                        return -EFAULT;
-               parport_frob_control (port, mask, reg);
+               parport_frob_control(port, mask, reg);
                return 0;
 
        case PPDATADIR:
-               if (copy_from_user (&mode, argp, sizeof (mode)))
+               if (copy_from_user(&mode, argp, sizeof(mode)))
                        return -EFAULT;
                if (mode)
-                       port->ops->data_reverse (port);
+                       port->ops->data_reverse(port);
                else
-                       port->ops->data_forward (port);
+                       port->ops->data_forward(port);
                return 0;
 
        case PPNEGOT:
-               if (copy_from_user (&mode, argp, sizeof (mode)))
+               if (copy_from_user(&mode, argp, sizeof(mode)))
                        return -EFAULT;
-               switch ((ret = parport_negotiate (port, mode))) {
+               switch ((ret = parport_negotiate(port, mode))) {
                case 0: break;
                case -1: /* handshake failed, peripheral not IEEE 1284 */
                        ret = -EIO;
@@ -572,11 +590,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        ret = -ENXIO;
                        break;
                }
-               pp_enable_irq (pp);
+               pp_enable_irq(pp);
                return ret;
 
        case PPWCTLONIRQ:
-               if (copy_from_user (&reg, argp, sizeof (reg)))
+               if (copy_from_user(&reg, argp, sizeof(reg)))
                        return -EFAULT;
 
                /* Remember what to set the control lines to, for next
@@ -586,39 +604,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return 0;
 
        case PPCLRIRQ:
-               ret = atomic_read (&pp->irqc);
-               if (copy_to_user (argp, &ret, sizeof (ret)))
+               ret = atomic_read(&pp->irqc);
+               if (copy_to_user(argp, &ret, sizeof(ret)))
                        return -EFAULT;
-               atomic_sub (ret, &pp->irqc);
+               atomic_sub(ret, &pp->irqc);
                return 0;
 
-       case PPSETTIME:
-               if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
+       case PPSETTIME32:
+               if (copy_from_user(time32, argp, sizeof(time32)))
                        return -EFAULT;
-               }
-               /* Convert to jiffies, place in pp->pdev->timeout */
-               if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
-                       return -EINVAL;
-               }
-               to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
-               to_jiffies += par_timeout.tv_sec * (long)HZ;
-               if (to_jiffies <= 0) {
+
+               return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+       case PPSETTIME64:
+               if (copy_from_user(time64, argp, sizeof(time64)))
+                       return -EFAULT;
+
+               return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+       case PPGETTIME32:
+               jiffies_to_timespec64(pp->pdev->timeout, &ts);
+               time32[0] = ts.tv_sec;
+               time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+               if ((time32[0] < 0) || (time32[1] < 0))
                        return -EINVAL;
-               }
-               pp->pdev->timeout = to_jiffies;
+
+               if (copy_to_user(argp, time32, sizeof(time32)))
+                       return -EFAULT;
+
                return 0;
 
-       case PPGETTIME:
-               to_jiffies = pp->pdev->timeout;
-               memset(&par_timeout, 0, sizeof(par_timeout));
-               par_timeout.tv_sec = to_jiffies / HZ;
-               par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
-               if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
+       case PPGETTIME64:
+               jiffies_to_timespec64(pp->pdev->timeout, &ts);
+               time64[0] = ts.tv_sec;
+               time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+               if ((time64[0] < 0) || (time64[1] < 0))
+                       return -EINVAL;
+
+               if (copy_to_user(argp, time64, sizeof(time64)))
                        return -EFAULT;
+
                return 0;
 
        default:
-               pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
+               dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
                return -EINVAL;
        }
 
@@ -629,13 +658,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        long ret;
+
        mutex_lock(&pp_do_mutex);
        ret = pp_do_ioctl(file, cmd, arg);
        mutex_unlock(&pp_do_mutex);
        return ret;
 }
 
-static int pp_open (struct inode * inode, struct file * file)
+#ifdef CONFIG_COMPAT
+static long pp_compat_ioctl(struct file *file, unsigned int cmd,
+                           unsigned long arg)
+{
+       return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int pp_open(struct inode *inode, struct file *file)
 {
        unsigned int minor = iminor(inode);
        struct pp_struct *pp;
@@ -643,16 +681,16 @@ static int pp_open (struct inode * inode, struct file * file)
        if (minor >= PARPORT_MAX)
                return -ENXIO;
 
-       pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
+       pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
        if (!pp)
                return -ENOMEM;
 
        pp->state.mode = IEEE1284_MODE_COMPAT;
-       pp->state.phase = init_phase (pp->state.mode);
+       pp->state.phase = init_phase(pp->state.mode);
        pp->flags = 0;
        pp->irqresponse = 0;
-       atomic_set (&pp->irqc, 0);
-       init_waitqueue_head (&pp->irq_wait);
+       atomic_set(&pp->irqc, 0);
+       init_waitqueue_head(&pp->irq_wait);
 
        /* Defer the actual device registration until the first claim.
         * That way, we know whether or not the driver wants to have
@@ -664,7 +702,7 @@ static int pp_open (struct inode * inode, struct file * file)
        return 0;
 }
 
-static int pp_release (struct inode * inode, struct file * file)
+static int pp_release(struct inode *inode, struct file *file)
 {
        unsigned int minor = iminor(inode);
        struct pp_struct *pp = file->private_data;
@@ -673,10 +711,10 @@ static int pp_release (struct inode * inode, struct file * file)
        compat_negot = 0;
        if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
            (pp->state.mode != IEEE1284_MODE_COMPAT)) {
-               struct ieee1284_info *info;
+               struct ieee1284_info *info;
 
                /* parport released, but not in compatibility mode */
-               parport_claim_or_block (pp->pdev);
+               parport_claim_or_block(pp->pdev);
                pp->flags |= PP_CLAIMED;
                info = &pp->pdev->port->ieee1284;
                pp->saved_state.mode = info->mode;
@@ -689,9 +727,9 @@ static int pp_release (struct inode * inode, struct file * file)
                compat_negot = 2;
        }
        if (compat_negot) {
-               parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
-               pr_debug(CHRDEV "%x: negotiated back to compatibility "
-                       "mode because user-space forgot\n", minor);
+               parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+               dev_dbg(&pp->pdev->dev,
+                       "negotiated back to compatibility mode because user-space forgot\n");
        }
 
        if (pp->flags & PP_CLAIMED) {
@@ -702,7 +740,7 @@ static int pp_release (struct inode * inode, struct file * file)
                pp->state.phase = info->phase;
                info->mode = pp->saved_state.mode;
                info->phase = pp->saved_state.phase;
-               parport_release (pp->pdev);
+               parport_release(pp->pdev);
                if (compat_negot != 1) {
                        pr_debug(CHRDEV "%x: released pardevice "
                                "because user-space forgot\n", minor);
@@ -711,25 +749,26 @@ static int pp_release (struct inode * inode, struct file * file)
 
        if (pp->pdev) {
                const char *name = pp->pdev->name;
-               parport_unregister_device (pp->pdev);
-               kfree (name);
+
+               parport_unregister_device(pp->pdev);
+               kfree(name);
                pp->pdev = NULL;
                pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
        }
 
-       kfree (pp);
+       kfree(pp);
 
        return 0;
 }
 
 /* No kernel lock held - fine */
-static unsigned int pp_poll (struct file * file, poll_table * wait)
+static unsigned int pp_poll(struct file *file, poll_table *wait)
 {
        struct pp_struct *pp = file->private_data;
        unsigned int mask = 0;
 
-       poll_wait (file, &pp->irq_wait, wait);
-       if (atomic_read (&pp->irqc))
+       poll_wait(file, &pp->irq_wait, wait);
+       if (atomic_read(&pp->irqc))
                mask |= POLLIN | POLLRDNORM;
 
        return mask;
@@ -744,6 +783,9 @@ static const struct file_operations pp_fops = {
        .write          = pp_write,
        .poll           = pp_poll,
        .unlocked_ioctl = pp_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = pp_compat_ioctl,
+#endif
        .open           = pp_open,
        .release        = pp_release,
 };
@@ -759,19 +801,32 @@ static void pp_detach(struct parport *port)
        device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
 }
 
+static int pp_probe(struct pardevice *par_dev)
+{
+       struct device_driver *drv = par_dev->dev.driver;
+       int len = strlen(drv->name);
+
+       if (strncmp(par_dev->name, drv->name, len))
+               return -ENODEV;
+
+       return 0;
+}
+
 static struct parport_driver pp_driver = {
        .name           = CHRDEV,
-       .attach         = pp_attach,
+       .probe          = pp_probe,
+       .match_port     = pp_attach,
        .detach         = pp_detach,
+       .devmodel       = true,
 };
 
-static int __init ppdev_init (void)
+static int __init ppdev_init(void)
 {
        int err = 0;
 
-       if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
-               printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
-                       PP_MAJOR);
+       if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+               printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
+                      PP_MAJOR);
                return -EIO;
        }
        ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -781,11 +836,11 @@ static int __init ppdev_init (void)
        }
        err = parport_register_driver(&pp_driver);
        if (err < 0) {
-               printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
+               printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
                goto out_class;
        }
 
-       printk (KERN_INFO PP_VERSION "\n");
+       printk(KERN_INFO PP_VERSION "\n");
        goto out;
 
 out_class:
@@ -796,12 +851,12 @@ out:
        return err;
 }
 
-static void __exit ppdev_cleanup (void)
+static void __exit ppdev_cleanup(void)
 {
        /* Clean up all parport stuff */
        parport_unregister_driver(&pp_driver);
        class_destroy(ppdev_class);
-       unregister_chrdev (PP_MAJOR, CHRDEV);
+       unregister_chrdev(PP_MAJOR, CHRDEV);
 }
 
 module_init(ppdev_init);
index 9b9809b709a55f19f8fc2e5187a26eb47df7f807..e83b2adc014ad92c374a1e136a158612d3fd1883 100644 (file)
@@ -334,10 +334,8 @@ static int __init raw_init(void)
 
        cdev_init(&raw_cdev, &raw_fops);
        ret = cdev_add(&raw_cdev, dev, max_raw_minors);
-       if (ret) {
+       if (ret)
                goto error_region;
-       }
-
        raw_class = class_create(THIS_MODULE, "raw");
        if (IS_ERR(raw_class)) {
                printk(KERN_ERR "Error creating raw class.\n");
index 77d6c127e691a4a8a6417bd4cd0760fed0d513e8..dcd19f3f182e569e4e8e991653afdfb3b5af14be 100644 (file)
@@ -509,7 +509,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
                        channel->log2_element_size = ((format > 2) ?
                                                      2 : format);
 
-                       bytebufsize = channel->rd_buf_size = bufsize *
+                       bytebufsize = bufsize *
                                (1 << channel->log2_element_size);
 
                        buffers = devm_kcalloc(dev, bufnum,
@@ -523,6 +523,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
 
                if (!is_writebuf) {
                        channel->num_rd_buffers = bufnum;
+                       channel->rd_buf_size = bytebufsize;
                        channel->rd_allow_partial = allowpartial;
                        channel->rd_synchronous = synchronous;
                        channel->rd_exclusive_open = exclusive_open;
@@ -533,6 +534,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
                                                   bufnum, bytebufsize);
                } else if (channelnum > 0) {
                        channel->num_wr_buffers = bufnum;
+                       channel->wr_buf_size = bytebufsize;
 
                        channel->seekable = seekable;
                        channel->wr_supports_nonempty = supports_nonempty;
index c121d01a5cd6f24f1e9b452ee927fcd102eda757..1d8e0a57bd51bbe84a8ecd29d53717f2465922de 100644 (file)
@@ -185,7 +185,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
                break;
        };
 
-       mutex_lock(&arizona->dapm->card->dapm_mutex);
+       snd_soc_dapm_mutex_lock(arizona->dapm);
 
        arizona->hpdet_clamp = clamp;
 
@@ -227,7 +227,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
                                 ret);
        }
 
-       mutex_unlock(&arizona->dapm->card->dapm_mutex);
+       snd_soc_dapm_mutex_unlock(arizona->dapm);
 }
 
 static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
index 279ff8f6637da8beb62cd92bbbe63b8759c896a6..d023789f0fdac94a2fd8b6bfc90618793a2bda37 100644 (file)
@@ -126,7 +126,7 @@ static int gpio_extcon_probe(struct platform_device *pdev)
        INIT_DELAYED_WORK(&data->work, gpio_extcon_work);
 
        /*
-        * Request the interrput of gpio to detect whether external connector
+        * Request the interrupt of gpio to detect whether external connector
         * is attached or detached.
         */
        ret = devm_request_any_context_irq(&pdev->dev, data->irq,
index b30ab97ce75f779d5be85ba0b14074e81a153ea0..852a7112f451990243c20c8a746f7d2e7d167cad 100644 (file)
@@ -150,6 +150,7 @@ enum max14577_muic_acc_type {
 
 static const unsigned int max14577_extcon_cable[] = {
        EXTCON_USB,
+       EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_DCP,
        EXTCON_CHG_USB_FAST,
        EXTCON_CHG_USB_SLOW,
@@ -454,6 +455,8 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
                        return ret;
 
                extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+               extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                       attached);
                break;
        case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
                extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
index fdf8f5d4d4e9ff72aaac992756d1b18792fe4d98..f17cb76b567cc949e4827932e1eeff9953ca7ea8 100644 (file)
@@ -204,6 +204,7 @@ enum max77693_muic_acc_type {
 static const unsigned int max77693_extcon_cable[] = {
        EXTCON_USB,
        EXTCON_USB_HOST,
+       EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_DCP,
        EXTCON_CHG_USB_FAST,
        EXTCON_CHG_USB_SLOW,
@@ -512,8 +513,11 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
                break;
        case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:         /* Dock-Audio */
                dock_id = EXTCON_DOCK;
-               if (!attached)
+               if (!attached) {
                        extcon_set_cable_state_(info->edev, EXTCON_USB, false);
+                       extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                               false);
+               }
                break;
        default:
                dev_err(info->dev, "failed to detect %s dock device\n",
@@ -601,6 +605,8 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
                if (ret < 0)
                        return ret;
                extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+               extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                       attached);
                break;
        case MAX77693_MUIC_GND_MHL:
        case MAX77693_MUIC_GND_MHL_VB:
@@ -830,6 +836,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
                         */
                        extcon_set_cable_state_(info->edev, EXTCON_USB,
                                                attached);
+                       extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                               attached);
 
                        if (!cable_attached)
                                extcon_set_cable_state_(info->edev, EXTCON_DOCK,
@@ -899,6 +907,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
 
                        extcon_set_cable_state_(info->edev, EXTCON_USB,
                                                attached);
+                       extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                               attached);
                        break;
                case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
                        /* Only TA cable */
index 74dfb7f4f2774c5c87e4e14a39db3e84ecbf8191..b188bd650efa5261c13d1fc9d4fb3fea894f225a 100644 (file)
@@ -122,6 +122,7 @@ enum max77843_muic_charger_type {
 static const unsigned int max77843_extcon_cable[] = {
        EXTCON_USB,
        EXTCON_USB_HOST,
+       EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_DCP,
        EXTCON_CHG_USB_CDP,
        EXTCON_CHG_USB_FAST,
@@ -486,6 +487,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
                        return ret;
 
                extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+               extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                       attached);
                break;
        case MAX77843_MUIC_CHG_DOWNSTREAM:
                ret = max77843_muic_set_path(info,
@@ -803,7 +806,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
        /* Clear IRQ bits before request IRQs */
        ret = regmap_bulk_read(max77843->regmap_muic,
                        MAX77843_MUIC_REG_INT1, info->status,
-                       MAX77843_MUIC_IRQ_NUM);
+                       MAX77843_MUIC_STATUS_NUM);
        if (ret) {
                dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
                goto err_muic_irq;
index b2b13b3dce14743d1a977c0fa760325d15b6969b..9a89320d09a8b6e08660cadca6abd6b79e23c24d 100644 (file)
@@ -148,6 +148,7 @@ struct max8997_muic_info {
 static const unsigned int max8997_extcon_cable[] = {
        EXTCON_USB,
        EXTCON_USB_HOST,
+       EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_DCP,
        EXTCON_CHG_USB_FAST,
        EXTCON_CHG_USB_SLOW,
@@ -334,6 +335,8 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
                break;
        case MAX8997_USB_DEVICE:
                extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+               extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                       attached);
                break;
        default:
                dev_err(info->dev, "failed to detect %s usb cable\n",
index 93c30a885740e9bebb08c8f80162118c7e3f2359..841a4b58639543a4d5107dfaab9544b0a5425d37 100644 (file)
@@ -216,11 +216,23 @@ static int palmas_usb_probe(struct platform_device *pdev)
                return PTR_ERR(palmas_usb->id_gpiod);
        }
 
+       palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+                                                       GPIOD_IN);
+       if (IS_ERR(palmas_usb->vbus_gpiod)) {
+               dev_err(&pdev->dev, "failed to get vbus gpio\n");
+               return PTR_ERR(palmas_usb->vbus_gpiod);
+       }
+
        if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
                palmas_usb->enable_id_detection = false;
                palmas_usb->enable_gpio_id_detection = true;
        }
 
+       if (palmas_usb->enable_vbus_detection && palmas_usb->vbus_gpiod) {
+               palmas_usb->enable_vbus_detection = false;
+               palmas_usb->enable_gpio_vbus_detection = true;
+       }
+
        if (palmas_usb->enable_gpio_id_detection) {
                u32 debounce;
 
@@ -266,7 +278,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
                                palmas_usb->id_irq,
                                NULL, palmas_id_irq_handler,
                                IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
-                               IRQF_ONESHOT | IRQF_EARLY_RESUME,
+                               IRQF_ONESHOT,
                                "palmas_usb_id", palmas_usb);
                if (status < 0) {
                        dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
@@ -304,13 +316,47 @@ static int palmas_usb_probe(struct platform_device *pdev)
                                palmas_usb->vbus_irq, NULL,
                                palmas_vbus_irq_handler,
                                IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
-                               IRQF_ONESHOT | IRQF_EARLY_RESUME,
+                               IRQF_ONESHOT,
                                "palmas_usb_vbus", palmas_usb);
                if (status < 0) {
                        dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                                        palmas_usb->vbus_irq, status);
                        return status;
                }
+       } else if (palmas_usb->enable_gpio_vbus_detection) {
+               /* remux GPIO_1 as VBUSDET */
+               status = palmas_update_bits(palmas,
+                       PALMAS_PU_PD_OD_BASE,
+                       PALMAS_PRIMARY_SECONDARY_PAD1,
+                       PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK,
+                       (1 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT));
+               if (status < 0) {
+                       dev_err(&pdev->dev, "can't remux GPIO1\n");
+                       return status;
+               }
+
+               palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+                                                      PALMAS_VBUS_OTG_IRQ);
+               palmas_usb->gpio_vbus_irq = gpiod_to_irq(palmas_usb->vbus_gpiod);
+               if (palmas_usb->gpio_vbus_irq < 0) {
+                       dev_err(&pdev->dev, "failed to get vbus irq\n");
+                       return palmas_usb->gpio_vbus_irq;
+               }
+               status = devm_request_threaded_irq(&pdev->dev,
+                                               palmas_usb->gpio_vbus_irq,
+                                               NULL,
+                                               palmas_vbus_irq_handler,
+                                               IRQF_TRIGGER_FALLING |
+                                               IRQF_TRIGGER_RISING |
+                                               IRQF_ONESHOT |
+                                               IRQF_EARLY_RESUME,
+                                               "palmas_usb_vbus",
+                                               palmas_usb);
+               if (status < 0) {
+                       dev_err(&pdev->dev,
+                               "failed to request handler for vbus irq\n");
+                       return status;
+               }
        }
 
        palmas_enable_irq(palmas_usb);
@@ -337,6 +383,8 @@ static int palmas_usb_suspend(struct device *dev)
        if (device_may_wakeup(dev)) {
                if (palmas_usb->enable_vbus_detection)
                        enable_irq_wake(palmas_usb->vbus_irq);
+               if (palmas_usb->enable_gpio_vbus_detection)
+                       enable_irq_wake(palmas_usb->gpio_vbus_irq);
                if (palmas_usb->enable_id_detection)
                        enable_irq_wake(palmas_usb->id_irq);
                if (palmas_usb->enable_gpio_id_detection)
@@ -352,6 +400,8 @@ static int palmas_usb_resume(struct device *dev)
        if (device_may_wakeup(dev)) {
                if (palmas_usb->enable_vbus_detection)
                        disable_irq_wake(palmas_usb->vbus_irq);
+               if (palmas_usb->enable_gpio_vbus_detection)
+                       disable_irq_wake(palmas_usb->gpio_vbus_irq);
                if (palmas_usb->enable_id_detection)
                        disable_irq_wake(palmas_usb->id_irq);
                if (palmas_usb->enable_gpio_id_detection)
index e1bb82809bef53ad2a853de2442a7ecc2e4a7597..97e074d70eca21264d0743ae7ebf3e90b4ce228c 100644 (file)
@@ -93,6 +93,7 @@ static struct reg_data rt8973a_reg_data[] = {
 static const unsigned int rt8973a_extcon_cable[] = {
        EXTCON_USB,
        EXTCON_USB_HOST,
+       EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_DCP,
        EXTCON_JIG,
        EXTCON_NONE,
@@ -398,6 +399,9 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
 
        /* Change the state of external accessory */
        extcon_set_cable_state_(info->edev, id, attached);
+       if (id == EXTCON_USB)
+               extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                       attached);
 
        return 0;
 }
@@ -663,7 +667,7 @@ MODULE_DEVICE_TABLE(of, rt8973a_dt_match);
 #ifdef CONFIG_PM_SLEEP
 static int rt8973a_muic_suspend(struct device *dev)
 {
-       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *i2c = to_i2c_client(dev);
        struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
 
        enable_irq_wake(info->irq);
@@ -673,7 +677,7 @@ static int rt8973a_muic_suspend(struct device *dev)
 
 static int rt8973a_muic_resume(struct device *dev)
 {
-       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *i2c = to_i2c_client(dev);
        struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
 
        disable_irq_wake(info->irq);
index 7aac3cc7efd79ff121850252c9f24ec434ee37d3..df769a17e736fcadae1fbf05543fd6837ebd0f13 100644 (file)
@@ -95,6 +95,7 @@ static struct reg_data sm5502_reg_data[] = {
 static const unsigned int sm5502_extcon_cable[] = {
        EXTCON_USB,
        EXTCON_USB_HOST,
+       EXTCON_CHG_USB_SDP,
        EXTCON_CHG_USB_DCP,
        EXTCON_NONE,
 };
@@ -411,6 +412,9 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
 
        /* Change the state of external accessory */
        extcon_set_cable_state_(info->edev, id, attached);
+       if (id == EXTCON_USB)
+               extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+                                       attached);
 
        return 0;
 }
@@ -655,7 +659,7 @@ MODULE_DEVICE_TABLE(of, sm5502_dt_match);
 #ifdef CONFIG_PM_SLEEP
 static int sm5502_muic_suspend(struct device *dev)
 {
-       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *i2c = to_i2c_client(dev);
        struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
 
        enable_irq_wake(info->irq);
@@ -665,7 +669,7 @@ static int sm5502_muic_suspend(struct device *dev)
 
 static int sm5502_muic_resume(struct device *dev)
 {
-       struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *i2c = to_i2c_client(dev);
        struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
 
        disable_irq_wake(info->irq);
index 1161d68a18635d0c43c6722c78f1594c9c992045..56dd261f7142f3957aef255ffaec43ce38a05ee7 100644 (file)
@@ -219,6 +219,21 @@ error0:
 }
 EXPORT_SYMBOL_GPL(vmbus_open);
 
+/* Used for Hyper-V Socket: a guest client's connect() to the host */
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+                                 const uuid_le *shv_host_servie_id)
+{
+       struct vmbus_channel_tl_connect_request conn_msg;
+
+       memset(&conn_msg, 0, sizeof(conn_msg));
+       conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
+       conn_msg.guest_endpoint_id = *shv_guest_servie_id;
+       conn_msg.host_service_id = *shv_host_servie_id;
+
+       return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+}
+EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+
 /*
  * create_gpadl_header - Creates a gpadl for the specified buffer
  */
@@ -624,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
        u64 aligned_data = 0;
        int ret;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
        int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
@@ -643,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
        ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
-                                 &signal);
+                                 &signal, lock);
 
        /*
         * Signalling the host is conditional on many factors:
@@ -659,6 +675,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
         * If we cannot write to the ring-buffer; signal the host
         * even if we may not have written anything. This is a rare
         * enough condition that it should not matter.
+        * NOTE: in this case, the hvsock channel is an exception, because
+        * it looks the host side's hvsock implementation has a throttling
+        * mechanism which can hurt the performance otherwise.
         */
 
        if (channel->signal_policy)
@@ -666,7 +685,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
        else
                kick_q = true;
 
-       if (((ret == 0) && kick_q && signal) || (ret))
+       if (((ret == 0) && kick_q && signal) ||
+           (ret && !is_hvsock_channel(channel)))
                vmbus_setevent(channel);
 
        return ret;
@@ -719,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
 
        if (pagecount > MAX_PAGE_BUFFER_COUNT)
                return -EINVAL;
@@ -755,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+                                 &signal, lock);
 
        /*
         * Signalling the host is conditional on many factors:
@@ -818,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
 
        packetlen = desc_size + bufferlen;
        packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -837,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+                                 &signal, lock);
 
        if (ret == 0 && signal)
                vmbus_setevent(channel);
@@ -862,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        struct kvec bufferlist[3];
        u64 aligned_data = 0;
        bool signal = false;
+       bool lock = channel->acquire_ring_lock;
        u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
                                         multi_pagebuffer->len);
 
@@ -900,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
        bufferlist[2].iov_base = &aligned_data;
        bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+       ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+                                 &signal, lock);
 
        if (ret == 0 && signal)
                vmbus_setevent(channel);
index 1c1ad47042c5d7e9a90f29d8a232bab0ad6f2a8a..38b682bab85a81a6482ec5dac0cdc29ad912488b 100644 (file)
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/hyperv.h>
 
 #include "hyperv_vmbus.h"
 
-static void init_vp_index(struct vmbus_channel *channel,
-                         const uuid_le *type_guid);
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+
+static const struct vmbus_device vmbus_devs[] = {
+       /* IDE */
+       { .dev_type = HV_IDE,
+         HV_IDE_GUID,
+         .perf_device = true,
+       },
+
+       /* SCSI */
+       { .dev_type = HV_SCSI,
+         HV_SCSI_GUID,
+         .perf_device = true,
+       },
+
+       /* Fibre Channel */
+       { .dev_type = HV_FC,
+         HV_SYNTHFC_GUID,
+         .perf_device = true,
+       },
+
+       /* Synthetic NIC */
+       { .dev_type = HV_NIC,
+         HV_NIC_GUID,
+         .perf_device = true,
+       },
+
+       /* Network Direct */
+       { .dev_type = HV_ND,
+         HV_ND_GUID,
+         .perf_device = true,
+       },
+
+       /* PCIE */
+       { .dev_type = HV_PCIE,
+         HV_PCIE_GUID,
+         .perf_device = true,
+       },
+
+       /* Synthetic Frame Buffer */
+       { .dev_type = HV_FB,
+         HV_SYNTHVID_GUID,
+         .perf_device = false,
+       },
+
+       /* Synthetic Keyboard */
+       { .dev_type = HV_KBD,
+         HV_KBD_GUID,
+         .perf_device = false,
+       },
+
+       /* Synthetic MOUSE */
+       { .dev_type = HV_MOUSE,
+         HV_MOUSE_GUID,
+         .perf_device = false,
+       },
+
+       /* KVP */
+       { .dev_type = HV_KVP,
+         HV_KVP_GUID,
+         .perf_device = false,
+       },
+
+       /* Time Synch */
+       { .dev_type = HV_TS,
+         HV_TS_GUID,
+         .perf_device = false,
+       },
+
+       /* Heartbeat */
+       { .dev_type = HV_HB,
+         HV_HEART_BEAT_GUID,
+         .perf_device = false,
+       },
+
+       /* Shutdown */
+       { .dev_type = HV_SHUTDOWN,
+         HV_SHUTDOWN_GUID,
+         .perf_device = false,
+       },
+
+       /* File copy */
+       { .dev_type = HV_FCOPY,
+         HV_FCOPY_GUID,
+         .perf_device = false,
+       },
+
+       /* Backup */
+       { .dev_type = HV_BACKUP,
+         HV_VSS_GUID,
+         .perf_device = false,
+       },
+
+       /* Dynamic Memory */
+       { .dev_type = HV_DM,
+         HV_DM_GUID,
+         .perf_device = false,
+       },
+
+       /* Unknown GUID */
+       { .dev_type = HV_UNKOWN,
+         .perf_device = false,
+       },
+};
+
+static u16 hv_get_dev_type(const uuid_le *guid)
+{
+       u16 i;
+
+       for (i = HV_IDE; i < HV_UNKOWN; i++) {
+               if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+                       return i;
+       }
+       pr_info("Unknown GUID: %pUl\n", guid);
+       return i;
+}
 
 /**
  * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
@@ -144,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
                return NULL;
 
        channel->id = atomic_inc_return(&chan_num);
+       channel->acquire_ring_lock = true;
        spin_lock_init(&channel->inbound_lock);
        spin_lock_init(&channel->lock);
 
@@ -195,6 +311,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
        vmbus_release_relid(relid);
 
        BUG_ON(!channel->rescind);
+       BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
        if (channel->target_cpu != get_cpu()) {
                put_cpu();
@@ -206,9 +323,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
        }
 
        if (channel->primary_channel == NULL) {
-               mutex_lock(&vmbus_connection.channel_mutex);
                list_del(&channel->listentry);
-               mutex_unlock(&vmbus_connection.channel_mutex);
 
                primary_channel = channel;
        } else {
@@ -251,6 +366,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        struct vmbus_channel *channel;
        bool fnew = true;
        unsigned long flags;
+       u16 dev_type;
+       int ret;
 
        /* Make sure this is a new offer */
        mutex_lock(&vmbus_connection.channel_mutex);
@@ -288,7 +405,9 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
                        goto err_free_chan;
        }
 
-       init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
+       dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
+
+       init_vp_index(newchannel, dev_type);
 
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
@@ -325,12 +444,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        if (!newchannel->device_obj)
                goto err_deq_chan;
 
+       newchannel->device_obj->device_id = dev_type;
        /*
         * Add the new device to the bus. This will kick off device-driver
         * binding which eventually invokes the device driver's AddDevice()
         * method.
         */
-       if (vmbus_device_register(newchannel->device_obj) != 0) {
+       mutex_lock(&vmbus_connection.channel_mutex);
+       ret = vmbus_device_register(newchannel->device_obj);
+       mutex_unlock(&vmbus_connection.channel_mutex);
+
+       if (ret != 0) {
                pr_err("unable to add child device object (relid %d)\n",
                        newchannel->offermsg.child_relid);
                kfree(newchannel->device_obj);
@@ -358,37 +482,6 @@ err_free_chan:
        free_channel(newchannel);
 }
 
-enum {
-       IDE = 0,
-       SCSI,
-       FC,
-       NIC,
-       ND_NIC,
-       PCIE,
-       MAX_PERF_CHN,
-};
-
-/*
- * This is an array of device_ids (device types) that are performance critical.
- * We attempt to distribute the interrupt load for these devices across
- * all available CPUs.
- */
-static const struct hv_vmbus_device_id hp_devs[] = {
-       /* IDE */
-       { HV_IDE_GUID, },
-       /* Storage - SCSI */
-       { HV_SCSI_GUID, },
-       /* Storage - FC */
-       { HV_SYNTHFC_GUID, },
-       /* Network */
-       { HV_NIC_GUID, },
-       /* NetworkDirect Guest RDMA */
-       { HV_ND_GUID, },
-       /* PCI Express Pass Through */
-       { HV_PCIE_GUID, },
-};
-
-
 /*
  * We use this state to statically distribute the channel interrupt load.
  */
@@ -405,22 +498,15 @@ static int next_numa_node_id;
  * For pre-win8 hosts or non-performance critical channels we assign the
  * first CPU in the first NUMA node.
  */
-static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
 {
        u32 cur_cpu;
-       int i;
-       bool perf_chn = false;
+       bool perf_chn = vmbus_devs[dev_type].perf_device;
        struct vmbus_channel *primary = channel->primary_channel;
        int next_node;
        struct cpumask available_mask;
        struct cpumask *alloced_mask;
 
-       for (i = IDE; i < MAX_PERF_CHN; i++) {
-               if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) {
-                       perf_chn = true;
-                       break;
-               }
-       }
        if ((vmbus_proto_version == VERSION_WS2008) ||
            (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
                /*
@@ -469,6 +555,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
                    cpumask_of_node(primary->numa_node));
 
        cur_cpu = -1;
+
+       /*
+        * Normally Hyper-V host doesn't create more subchannels than there
+        * are VCPUs on the node but it is possible when not all present VCPUs
+        * on the node are initialized by guest. Clear the alloced_cpus_in_node
+        * to start over.
+        */
+       if (cpumask_equal(&primary->alloced_cpus_in_node,
+                         cpumask_of_node(primary->numa_node)))
+               cpumask_clear(&primary->alloced_cpus_in_node);
+
        while (true) {
                cur_cpu = cpumask_next(cur_cpu, &available_mask);
                if (cur_cpu >= nr_cpu_ids) {
@@ -498,6 +595,32 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
        channel->target_vp = hv_context.vp_index[cur_cpu];
 }
 
+static void vmbus_wait_for_unload(void)
+{
+       int cpu = smp_processor_id();
+       void *page_addr = hv_context.synic_message_page[cpu];
+       struct hv_message *msg = (struct hv_message *)page_addr +
+                                 VMBUS_MESSAGE_SINT;
+       struct vmbus_channel_message_header *hdr;
+       bool unloaded = false;
+
+       while (1) {
+               if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) {
+                       mdelay(10);
+                       continue;
+               }
+
+               hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+               if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+                       unloaded = true;
+
+               vmbus_signal_eom(msg);
+
+               if (unloaded)
+                       break;
+       }
+}
+
 /*
  * vmbus_unload_response - Handler for the unload response.
  */
@@ -510,7 +633,7 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
        complete(&vmbus_connection.unload_event);
 }
 
-void vmbus_initiate_unload(void)
+void vmbus_initiate_unload(bool crash)
 {
        struct vmbus_channel_message_header hdr;
 
@@ -523,7 +646,14 @@ void vmbus_initiate_unload(void)
        hdr.msgtype = CHANNELMSG_UNLOAD;
        vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
 
-       wait_for_completion(&vmbus_connection.unload_event);
+       /*
+        * vmbus_initiate_unload() is also called on crash and the crash can be
+        * happening in an interrupt context, where scheduling is impossible.
+        */
+       if (!crash)
+               wait_for_completion(&vmbus_connection.unload_event);
+       else
+               vmbus_wait_for_unload();
 }
 
 /*
@@ -592,6 +722,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        struct device *dev;
 
        rescind = (struct vmbus_channel_rescind_offer *)hdr;
+
+       mutex_lock(&vmbus_connection.channel_mutex);
        channel = relid2channel(rescind->child_relid);
 
        if (channel == NULL) {
@@ -600,7 +732,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                 * vmbus_process_offer(), we have already invoked
                 * vmbus_release_relid() on error.
                 */
-               return;
+               goto out;
        }
 
        spin_lock_irqsave(&channel->lock, flags);
@@ -608,6 +740,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
        spin_unlock_irqrestore(&channel->lock, flags);
 
        if (channel->device_obj) {
+               if (channel->chn_rescind_callback) {
+                       channel->chn_rescind_callback(channel);
+                       goto out;
+               }
                /*
                 * We will have to unregister this device from the
                 * driver core.
@@ -621,7 +757,24 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
                hv_process_channel_removal(channel,
                        channel->offermsg.child_relid);
        }
+
+out:
+       mutex_unlock(&vmbus_connection.channel_mutex);
+}
+
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+{
+       mutex_lock(&vmbus_connection.channel_mutex);
+
+       BUG_ON(!is_hvsock_channel(channel));
+
+       channel->rescind = true;
+       vmbus_device_unregister(channel->device_obj);
+
+       mutex_unlock(&vmbus_connection.channel_mutex);
 }
+EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+
 
 /*
  * vmbus_onoffers_delivered -
@@ -825,6 +978,10 @@ struct vmbus_channel_message_table_entry
        {CHANNELMSG_VERSION_RESPONSE,           1, vmbus_onversion_response},
        {CHANNELMSG_UNLOAD,                     0, NULL},
        {CHANNELMSG_UNLOAD_RESPONSE,            1, vmbus_unload_response},
+       {CHANNELMSG_18,                         0, NULL},
+       {CHANNELMSG_19,                         0, NULL},
+       {CHANNELMSG_20,                         0, NULL},
+       {CHANNELMSG_TL_CONNECT_REQUEST,         0, NULL},
 };
 
 /*
@@ -973,3 +1130,10 @@ bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
        return ret;
 }
 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
+
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+               void (*chn_rescind_cb)(struct vmbus_channel *))
+{
+       channel->chn_rescind_callback = chn_rescind_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
index 3dc5a9c7fad6edda9fcaac0ecda2a24ca96822c5..d02f1373dd98c6d82777e744ba9679eefc6a80e0 100644 (file)
@@ -88,8 +88,16 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
         * This has been the behavior pre-win8. This is not
         * perf issue and having all channel messages delivered on CPU 0
         * would be ok.
+        * For post win8 hosts, we support receiving channel messagges on
+        * all the CPUs. This is needed for kexec to work correctly where
+        * the CPU attempting to connect may not be CPU 0.
         */
-       msg->target_vcpu = 0;
+       if (version >= VERSION_WIN8_1) {
+               msg->target_vcpu = hv_context.vp_index[get_cpu()];
+               put_cpu();
+       } else {
+               msg->target_vcpu = 0;
+       }
 
        /*
         * Add to list before we send the request since we may
@@ -236,7 +244,7 @@ void vmbus_disconnect(void)
        /*
         * First send the unload request to the host.
         */
-       vmbus_initiate_unload();
+       vmbus_initiate_unload(false);
 
        if (vmbus_connection.work_queue) {
                drain_workqueue(vmbus_connection.work_queue);
@@ -288,7 +296,8 @@ struct vmbus_channel *relid2channel(u32 relid)
        struct list_head *cur, *tmp;
        struct vmbus_channel *cur_sc;
 
-       mutex_lock(&vmbus_connection.channel_mutex);
+       BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+
        list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
                if (channel->offermsg.child_relid == relid) {
                        found_channel = channel;
@@ -307,7 +316,6 @@ struct vmbus_channel *relid2channel(u32 relid)
                        }
                }
        }
-       mutex_unlock(&vmbus_connection.channel_mutex);
 
        return found_channel;
 }
@@ -474,7 +482,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
 /*
  * vmbus_set_event - Send an event notification to the parent
  */
-int vmbus_set_event(struct vmbus_channel *channel)
+void vmbus_set_event(struct vmbus_channel *channel)
 {
        u32 child_relid = channel->offermsg.child_relid;
 
@@ -485,5 +493,5 @@ int vmbus_set_event(struct vmbus_channel *channel)
                        (child_relid >> 5));
        }
 
-       return hv_signal_event(channel->sig_event);
+       hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
 }
index 11bca51ef5ff9ea9a522c2f1a9bf9c5a682bb2cd..a1c086ba3b9a0736c522d648092d04d4ac92a3c7 100644 (file)
@@ -204,6 +204,8 @@ int hv_init(void)
               sizeof(int) * NR_CPUS);
        memset(hv_context.event_dpc, 0,
               sizeof(void *) * NR_CPUS);
+       memset(hv_context.msg_dpc, 0,
+              sizeof(void *) * NR_CPUS);
        memset(hv_context.clk_evt, 0,
               sizeof(void *) * NR_CPUS);
 
@@ -295,8 +297,14 @@ void hv_cleanup(void)
         * Cleanup the TSC page based CS.
         */
        if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
-               clocksource_change_rating(&hyperv_cs_tsc, 10);
-               clocksource_unregister(&hyperv_cs_tsc);
+               /*
+                * Crash can happen in an interrupt context and unregistering
+                * a clocksource is impossible and redundant in this case.
+                */
+               if (!oops_in_progress) {
+                       clocksource_change_rating(&hyperv_cs_tsc, 10);
+                       clocksource_unregister(&hyperv_cs_tsc);
+               }
 
                hypercall_msr.as_uint64 = 0;
                wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
@@ -337,22 +345,6 @@ int hv_post_message(union hv_connection_id connection_id,
        return status & 0xFFFF;
 }
 
-
-/*
- * hv_signal_event -
- * Signal an event on the specified connection using the hypervisor event IPC.
- *
- * This involves a hypercall.
- */
-int hv_signal_event(void *con_id)
-{
-       u64 status;
-
-       status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL);
-
-       return status & 0xFFFF;
-}
-
 static int hv_ce_set_next_event(unsigned long delta,
                                struct clock_event_device *evt)
 {
@@ -425,6 +417,13 @@ int hv_synic_alloc(void)
                }
                tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
 
+               hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+               if (hv_context.msg_dpc[cpu] == NULL) {
+                       pr_err("Unable to allocate event dpc\n");
+                       goto err;
+               }
+               tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
+
                hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
                if (hv_context.clk_evt[cpu] == NULL) {
                        pr_err("Unable to allocate clock event device\n");
@@ -466,6 +465,7 @@ err:
 static void hv_synic_free_cpu(int cpu)
 {
        kfree(hv_context.event_dpc[cpu]);
+       kfree(hv_context.msg_dpc[cpu]);
        kfree(hv_context.clk_evt[cpu]);
        if (hv_context.synic_event_page[cpu])
                free_page((unsigned long)hv_context.synic_event_page[cpu]);
index c37a71e13de093ada851cf91c9e9851bb964f68a..23c70799ad8ace23b5c83ae4a47d1e781d623313 100644 (file)
@@ -251,7 +251,6 @@ void hv_fcopy_onchannelcallback(void *context)
                 */
 
                fcopy_transaction.recv_len = recvlen;
-               fcopy_transaction.recv_channel = channel;
                fcopy_transaction.recv_req_id = requestid;
                fcopy_transaction.fcopy_msg = fcopy_msg;
 
@@ -317,6 +316,7 @@ static void fcopy_on_reset(void)
 int hv_fcopy_init(struct hv_util_service *srv)
 {
        recv_buffer = srv->recv_buffer;
+       fcopy_transaction.recv_channel = srv->channel;
 
        /*
         * When this driver loads, the user level daemon that
index d4ab81bcd51509505beda71c0beac8aa78ddaf8a..9b9b370fe22a8b4295fd217e745bd9bde15c1f9e 100644 (file)
@@ -639,7 +639,6 @@ void hv_kvp_onchannelcallback(void *context)
                         */
 
                        kvp_transaction.recv_len = recvlen;
-                       kvp_transaction.recv_channel = channel;
                        kvp_transaction.recv_req_id = requestid;
                        kvp_transaction.kvp_msg = kvp_msg;
 
@@ -688,6 +687,7 @@ int
 hv_kvp_init(struct hv_util_service *srv)
 {
        recv_buffer = srv->recv_buffer;
+       kvp_transaction.recv_channel = srv->channel;
 
        /*
         * When this driver loads, the user level daemon that
index 67def4a831c80461bd50c9ae867b112f12af414a..3fba14e88f038ffeee4f256c87bdc66fafc01b07 100644 (file)
@@ -263,7 +263,6 @@ void hv_vss_onchannelcallback(void *context)
                         */
 
                        vss_transaction.recv_len = recvlen;
-                       vss_transaction.recv_channel = channel;
                        vss_transaction.recv_req_id = requestid;
                        vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
 
@@ -337,6 +336,7 @@ hv_vss_init(struct hv_util_service *srv)
                return -ENOTSUPP;
        }
        recv_buffer = srv->recv_buffer;
+       vss_transaction.recv_channel = srv->channel;
 
        /*
         * When this driver loads, the user level daemon that
index 7994ec2e4151b085bbcb596d8ddbd4d8765b939e..d5acaa2d8e61d0a63f8dfcf65023c3aee6aa40b3 100644 (file)
@@ -322,6 +322,7 @@ static int util_probe(struct hv_device *dev,
        srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
        if (!srv->recv_buffer)
                return -ENOMEM;
+       srv->channel = dev->channel;
        if (srv->util_init) {
                ret = srv->util_init(srv);
                if (ret) {
index 4f42c0e20c20fd12f6de24d4490fb23952b7ed79..9a9983fa4531ad52a0f2918ffb51b42f9e8f2b89 100644 (file)
@@ -310,6 +310,9 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
        return hvt;
 
 err_free_hvt:
+       spin_lock(&hvt_list_lock);
+       list_del(&hvt->list);
+       spin_unlock(&hvt_list_lock);
        kfree(hvt);
        return NULL;
 }
index 2f8c0f40930bd09a414ae52e544e4bf2ed5b879a..12321b93a756b947f09b95f79dade93849794040 100644 (file)
@@ -443,10 +443,11 @@ struct hv_context {
        u32 vp_index[NR_CPUS];
        /*
         * Starting with win8, we can take channel interrupts on any CPU;
-        * we will manage the tasklet that handles events on a per CPU
+        * we will manage the tasklet that handles events messages on a per CPU
         * basis.
         */
        struct tasklet_struct *event_dpc[NR_CPUS];
+       struct tasklet_struct *msg_dpc[NR_CPUS];
        /*
         * To optimize the mapping of relid to channel, maintain
         * per-cpu list of the channels based on their CPU affinity.
@@ -495,8 +496,6 @@ extern int hv_post_message(union hv_connection_id connection_id,
                         enum hv_message_type message_type,
                         void *payload, size_t payload_size);
 
-extern int hv_signal_event(void *con_id);
-
 extern int hv_synic_alloc(void);
 
 extern void hv_synic_free(void);
@@ -525,7 +524,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
                    struct kvec *kv_list,
-                   u32 kv_count, bool *signal);
+                   u32 kv_count, bool *signal, bool lock);
 
 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
                       void *buffer, u32 buflen, u32 *buffer_actual_len,
@@ -620,6 +619,30 @@ struct vmbus_channel_message_table_entry {
 extern struct vmbus_channel_message_table_entry
        channel_message_table[CHANNELMSG_COUNT];
 
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg)
+{
+       msg->header.message_type = HVMSG_NONE;
+
+       /*
+        * Make sure the write to MessageType (ie set to
+        * HVMSG_NONE) happens before we read the
+        * MessagePending and EOMing. Otherwise, the EOMing
+        * will not deliver any more messages since there is
+        * no empty slot
+        */
+       mb();
+
+       if (msg->header.message_flags.msg_pending) {
+               /*
+                * This will cause message queue rescan to
+                * possibly deliver another msg from the
+                * hypervisor
+                */
+               wrmsrl(HV_X64_MSR_EOM, 0);
+       }
+}
+
 /* General vmbus interface */
 
 struct hv_device *vmbus_device_create(const uuid_le *type,
@@ -644,9 +667,10 @@ void vmbus_disconnect(void);
 
 int vmbus_post_msg(void *buffer, size_t buflen);
 
-int vmbus_set_event(struct vmbus_channel *channel);
+void vmbus_set_event(struct vmbus_channel *channel);
 
 void vmbus_on_event(unsigned long data);
+void vmbus_on_msg_dpc(unsigned long data);
 
 int hv_kvp_init(struct hv_util_service *);
 void hv_kvp_deinit(void);
@@ -659,7 +683,7 @@ void hv_vss_onchannelcallback(void *);
 int hv_fcopy_init(struct hv_util_service *);
 void hv_fcopy_deinit(void);
 void hv_fcopy_onchannelcallback(void *);
-void vmbus_initiate_unload(void);
+void vmbus_initiate_unload(bool crash);
 
 static inline void hv_poll_channel(struct vmbus_channel *channel,
                                   void (*cb)(void *))
index b53702ce692f35652c8efcb987cc0d79aa1b6fc0..5613e2b5cff7759861a1e2d7ad2748f7626d16da 100644 (file)
@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-                   struct kvec *kv_list, u32 kv_count, bool *signal)
+                   struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
 {
        int i = 0;
        u32 bytes_avail_towrite;
@@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
        u32 next_write_location;
        u32 old_write;
        u64 prev_indices = 0;
-       unsigned long flags;
+       unsigned long flags = 0;
 
        for (i = 0; i < kv_count; i++)
                totalbytes_towrite += kv_list[i].iov_len;
 
        totalbytes_towrite += sizeof(u64);
 
-       spin_lock_irqsave(&outring_info->ring_lock, flags);
+       if (lock)
+               spin_lock_irqsave(&outring_info->ring_lock, flags);
 
        hv_get_ringbuffer_availbytes(outring_info,
                                &bytes_avail_toread,
@@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
         * is empty since the read index == write index.
         */
        if (bytes_avail_towrite <= totalbytes_towrite) {
-               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+               if (lock)
+                       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
                return -EAGAIN;
        }
 
@@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
        hv_set_next_write_location(outring_info, next_write_location);
 
 
-       spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+       if (lock)
+               spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
        *signal = hv_need_to_signal(old_write, outring_info);
        return 0;
@@ -388,7 +391,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
        u32 bytes_avail_toread;
        u32 next_read_location = 0;
        u64 prev_indices = 0;
-       unsigned long flags;
        struct vmpacket_descriptor desc;
        u32 offset;
        u32 packetlen;
@@ -397,7 +399,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
        if (buflen <= 0)
                return -EINVAL;
 
-       spin_lock_irqsave(&inring_info->ring_lock, flags);
 
        *buffer_actual_len = 0;
        *requestid = 0;
@@ -412,7 +413,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
                 * No error is set when there is even no header, drivers are
                 * supposed to analyze buffer_actual_len.
                 */
-               goto out_unlock;
+               return ret;
        }
 
        next_read_location = hv_get_next_read_location(inring_info);
@@ -425,15 +426,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
        *buffer_actual_len = packetlen;
        *requestid = desc.trans_id;
 
-       if (bytes_avail_toread < packetlen + offset) {
-               ret = -EAGAIN;
-               goto out_unlock;
-       }
+       if (bytes_avail_toread < packetlen + offset)
+               return -EAGAIN;
 
-       if (packetlen > buflen) {
-               ret = -ENOBUFS;
-               goto out_unlock;
-       }
+       if (packetlen > buflen)
+               return -ENOBUFS;
 
        next_read_location =
                hv_get_next_readlocation_withoffset(inring_info, offset);
@@ -460,7 +457,5 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 
        *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
 
-out_unlock:
-       spin_unlock_irqrestore(&inring_info->ring_lock, flags);
        return ret;
 }
index 328e4c3808e06d146add9ade074efd2d1728e1dd..64713ff47e36c73e70d429dd6650d27d3e2e1d97 100644 (file)
@@ -45,7 +45,6 @@
 
 static struct acpi_device  *hv_acpi_dev;
 
-static struct tasklet_struct msg_dpc;
 static struct completion probe_event;
 
 
@@ -477,6 +476,24 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(channel_vp_mapping);
 
+static ssize_t vendor_show(struct device *dev,
+                          struct device_attribute *dev_attr,
+                          char *buf)
+{
+       struct hv_device *hv_dev = device_to_hv_device(dev);
+       return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t device_show(struct device *dev,
+                          struct device_attribute *dev_attr,
+                          char *buf)
+{
+       struct hv_device *hv_dev = device_to_hv_device(dev);
+       return sprintf(buf, "0x%x\n", hv_dev->device_id);
+}
+static DEVICE_ATTR_RO(device);
+
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
 static struct attribute *vmbus_attrs[] = {
        &dev_attr_id.attr,
@@ -502,6 +519,8 @@ static struct attribute *vmbus_attrs[] = {
        &dev_attr_in_read_bytes_avail.attr,
        &dev_attr_in_write_bytes_avail.attr,
        &dev_attr_channel_vp_mapping.attr,
+       &dev_attr_vendor.attr,
+       &dev_attr_device.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(vmbus);
@@ -562,6 +581,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
        struct hv_driver *drv = drv_to_hv_drv(driver);
        struct hv_device *hv_dev = device_to_hv_device(device);
 
+       /* The hv_sock driver handles all hv_sock offers. */
+       if (is_hvsock_channel(hv_dev->channel))
+               return drv->hvsock;
+
        if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
                return 1;
 
@@ -685,28 +708,10 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
        if (dev->event_handler)
                dev->event_handler(dev);
 
-       msg->header.message_type = HVMSG_NONE;
-
-       /*
-        * Make sure the write to MessageType (ie set to
-        * HVMSG_NONE) happens before we read the
-        * MessagePending and EOMing. Otherwise, the EOMing
-        * will not deliver any more messages since there is
-        * no empty slot
-        */
-       mb();
-
-       if (msg->header.message_flags.msg_pending) {
-               /*
-                * This will cause message queue rescan to
-                * possibly deliver another msg from the
-                * hypervisor
-                */
-               wrmsrl(HV_X64_MSR_EOM, 0);
-       }
+       vmbus_signal_eom(msg);
 }
 
-static void vmbus_on_msg_dpc(unsigned long data)
+void vmbus_on_msg_dpc(unsigned long data)
 {
        int cpu = smp_processor_id();
        void *page_addr = hv_context.synic_message_page[cpu];
@@ -716,52 +721,32 @@ static void vmbus_on_msg_dpc(unsigned long data)
        struct vmbus_channel_message_table_entry *entry;
        struct onmessage_work_context *ctx;
 
-       while (1) {
-               if (msg->header.message_type == HVMSG_NONE)
-                       /* no msg */
-                       break;
+       if (msg->header.message_type == HVMSG_NONE)
+               /* no msg */
+               return;
 
-               hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+       hdr = (struct vmbus_channel_message_header *)msg->u.payload;
 
-               if (hdr->msgtype >= CHANNELMSG_COUNT) {
-                       WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
-                       goto msg_handled;
-               }
+       if (hdr->msgtype >= CHANNELMSG_COUNT) {
+               WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+               goto msg_handled;
+       }
 
-               entry = &channel_message_table[hdr->msgtype];
-               if (entry->handler_type == VMHT_BLOCKING) {
-                       ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
-                       if (ctx == NULL)
-                               continue;
+       entry = &channel_message_table[hdr->msgtype];
+       if (entry->handler_type == VMHT_BLOCKING) {
+               ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+               if (ctx == NULL)
+                       return;
 
-                       INIT_WORK(&ctx->work, vmbus_onmessage_work);
-                       memcpy(&ctx->msg, msg, sizeof(*msg));
+               INIT_WORK(&ctx->work, vmbus_onmessage_work);
+               memcpy(&ctx->msg, msg, sizeof(*msg));
 
-                       queue_work(vmbus_connection.work_queue, &ctx->work);
-               } else
-                       entry->message_handler(hdr);
+               queue_work(vmbus_connection.work_queue, &ctx->work);
+       } else
+               entry->message_handler(hdr);
 
 msg_handled:
-               msg->header.message_type = HVMSG_NONE;
-
-               /*
-                * Make sure the write to MessageType (ie set to
-                * HVMSG_NONE) happens before we read the
-                * MessagePending and EOMing. Otherwise, the EOMing
-                * will not deliver any more messages since there is
-                * no empty slot
-                */
-               mb();
-
-               if (msg->header.message_flags.msg_pending) {
-                       /*
-                        * This will cause message queue rescan to
-                        * possibly deliver another msg from the
-                        * hypervisor
-                        */
-                       wrmsrl(HV_X64_MSR_EOM, 0);
-               }
-       }
+       vmbus_signal_eom(msg);
 }
 
 static void vmbus_isr(void)
@@ -814,7 +799,7 @@ static void vmbus_isr(void)
                if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
                        hv_process_timer_expiration(msg, cpu);
                else
-                       tasklet_schedule(&msg_dpc);
+                       tasklet_schedule(hv_context.msg_dpc[cpu]);
        }
 }
 
@@ -838,8 +823,6 @@ static int vmbus_bus_init(void)
                return ret;
        }
 
-       tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
-
        ret = bus_register(&hv_bus);
        if (ret)
                goto err_cleanup;
@@ -957,6 +940,7 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
        memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
        memcpy(&child_device_obj->dev_instance, instance,
               sizeof(uuid_le));
+       child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
 
 
        return child_device_obj;
@@ -1268,7 +1252,7 @@ static void hv_kexec_handler(void)
        int cpu;
 
        hv_synic_clockevents_cleanup();
-       vmbus_initiate_unload();
+       vmbus_initiate_unload(false);
        for_each_online_cpu(cpu)
                smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
        hv_cleanup();
@@ -1276,7 +1260,7 @@ static void hv_kexec_handler(void)
 
 static void hv_crash_handler(struct pt_regs *regs)
 {
-       vmbus_initiate_unload();
+       vmbus_initiate_unload(true);
        /*
         * In crash handler we can't schedule synic cleanup for all CPUs,
         * doing the cleanup for current CPU only. This should be sufficient
@@ -1334,7 +1318,8 @@ static void __exit vmbus_exit(void)
        hv_synic_clockevents_cleanup();
        vmbus_disconnect();
        hv_remove_vmbus_irq();
-       tasklet_kill(&msg_dpc);
+       for_each_online_cpu(cpu)
+               tasklet_kill(hv_context.msg_dpc[cpu]);
        vmbus_free_channels();
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                unregister_die_notifier(&hyperv_die_block);
index c85935f3525a8543dd5e2f350f6c7785772219ad..db0541031c72c502a416d563e1318584365023aa 100644 (file)
@@ -4,6 +4,7 @@
 menuconfig CORESIGHT
        bool "CoreSight Tracing Support"
        select ARM_AMBA
+       select PERF_EVENTS
        help
          This framework provides a kernel interface for the CoreSight debug
          and trace drivers to register themselves with. It's intended to build
index 99f8e5f6256e25c438862c2f93b80f4e59eb0f64..cf8c6d68974710001c27cfcb71d855c371ce2786 100644 (file)
@@ -8,6 +8,8 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
 obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
 obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
                                           coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
+                                       coresight-etm3x-sysfs.o \
+                                       coresight-etm-perf.o
 obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
index 77d0f9c1118dfdfcc29a2d0435f3311a12793414..acbce79934d6373d3d58bc6e96df8b0581b044b4 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Embedded Trace Buffer driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,8 +12,8 @@
  * GNU General Public License for more details.
  */
 
+#include <asm/local.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/coresight.h>
 #include <linux/amba/bus.h>
 #include <linux/clk.h>
+#include <linux/circ_buf.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+
+#include <asm/local.h>
 
 #include "coresight-priv.h"
 
 #define ETB_FFSR_BIT           1
 #define ETB_FRAME_SIZE_WORDS   4
 
+/**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur:       index of the current buffer
+ * @nr_pages:  max number of pages granted to us
+ * @offset:    offset within the current buffer
+ * @data_size: how much we collected in this run
+ * @lost:      other than zero if we had a HW buffer wrap around
+ * @snapshot:  is this run in snapshot mode
+ * @data_pages:        a handle the ring buffer
+ */
+struct cs_buffers {
+       unsigned int            cur;
+       unsigned int            nr_pages;
+       unsigned long           offset;
+       local_t                 data_size;
+       local_t                 lost;
+       bool                    snapshot;
+       void                    **data_pages;
+};
+
 /**
  * struct etb_drvdata - specifics associated to an ETB component
  * @base:      memory mapped base address for this component.
  * @csdev:     component vitals needed by the framework.
  * @miscdev:   specifics to handle "/dev/xyz.etb" entry.
  * @spinlock:  only one at a time pls.
- * @in_use:    synchronise user space access to etb buffer.
+ * @reading:   synchronise user space access to etb buffer.
+ * @mode:      this ETB is being used.
  * @buf:       area of memory where ETB buffer content gets sent.
  * @buffer_depth: size of @buf.
- * @enable:    this ETB is being used.
  * @trigger_cntr: amount of words to store after a trigger.
  */
 struct etb_drvdata {
@@ -84,10 +111,10 @@ struct etb_drvdata {
        struct coresight_device *csdev;
        struct miscdevice       miscdev;
        spinlock_t              spinlock;
-       atomic_t                in_use;
+       local_t                 reading;
+       local_t                 mode;
        u8                      *buf;
        u32                     buffer_depth;
-       bool                    enable;
        u32                     trigger_cntr;
 };
 
@@ -132,18 +159,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
        CS_LOCK(drvdata->base);
 }
 
-static int etb_enable(struct coresight_device *csdev)
+static int etb_enable(struct coresight_device *csdev, u32 mode)
 {
-       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       u32 val;
        unsigned long flags;
+       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
+       val = local_cmpxchg(&drvdata->mode,
+                           CS_MODE_DISABLED, mode);
+       /*
+        * When accessing from Perf, a HW buffer can be handled
+        * by a single trace entity.  In sysFS mode many tracers
+        * can be logging to the same HW buffer.
+        */
+       if (val == CS_MODE_PERF)
+               return -EBUSY;
+
+       /* Nothing to do, the tracer is already enabled. */
+       if (val == CS_MODE_SYSFS)
+               goto out;
 
        spin_lock_irqsave(&drvdata->spinlock, flags);
        etb_enable_hw(drvdata);
-       drvdata->enable = true;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+out:
        dev_info(drvdata->dev, "ETB enabled\n");
        return 0;
 }
@@ -244,17 +284,225 @@ static void etb_disable(struct coresight_device *csdev)
        spin_lock_irqsave(&drvdata->spinlock, flags);
        etb_disable_hw(drvdata);
        etb_dump_hw(drvdata);
-       drvdata->enable = false;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-       pm_runtime_put(drvdata->dev);
+       local_set(&drvdata->mode, CS_MODE_DISABLED);
 
        dev_info(drvdata->dev, "ETB disabled\n");
 }
 
+static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
+                             void **pages, int nr_pages, bool overwrite)
+{
+       int node;
+       struct cs_buffers *buf;
+
+       if (cpu == -1)
+               cpu = smp_processor_id();
+       node = cpu_to_node(cpu);
+
+       buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+       if (!buf)
+               return NULL;
+
+       buf->snapshot = overwrite;
+       buf->nr_pages = nr_pages;
+       buf->data_pages = pages;
+
+       return buf;
+}
+
+static void etb_free_buffer(void *config)
+{
+       struct cs_buffers *buf = config;
+
+       kfree(buf);
+}
+
+static int etb_set_buffer(struct coresight_device *csdev,
+                         struct perf_output_handle *handle,
+                         void *sink_config)
+{
+       int ret = 0;
+       unsigned long head;
+       struct cs_buffers *buf = sink_config;
+
+       /* wrap head around to the amount of space we have */
+       head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+       /* find the page to write to */
+       buf->cur = head / PAGE_SIZE;
+
+       /* and offset within that page */
+       buf->offset = head % PAGE_SIZE;
+
+       local_set(&buf->data_size, 0);
+
+       return ret;
+}
+
+static unsigned long etb_reset_buffer(struct coresight_device *csdev,
+                                     struct perf_output_handle *handle,
+                                     void *sink_config, bool *lost)
+{
+       unsigned long size = 0;
+       struct cs_buffers *buf = sink_config;
+
+       if (buf) {
+               /*
+                * In snapshot mode ->data_size holds the new address of the
+                * ring buffer's head.  The size itself is the whole address
+                * range since we want the latest information.
+                */
+               if (buf->snapshot)
+                       handle->head = local_xchg(&buf->data_size,
+                                                 buf->nr_pages << PAGE_SHIFT);
+
+               /*
+                * Tell the tracer PMU how much we got in this run and if
+                * something went wrong along the way.  Nobody else can use
+                * this cs_buffers instance until we are done.  As such
+                * resetting parameters here and squaring off with the ring
+                * buffer API in the tracer PMU is fine.
+                */
+               *lost = !!local_xchg(&buf->lost, 0);
+               size = local_xchg(&buf->data_size, 0);
+       }
+
+       return size;
+}
+
+static void etb_update_buffer(struct coresight_device *csdev,
+                             struct perf_output_handle *handle,
+                             void *sink_config)
+{
+       int i, cur;
+       u8 *buf_ptr;
+       u32 read_ptr, write_ptr, capacity;
+       u32 status, read_data, to_read;
+       unsigned long offset;
+       struct cs_buffers *buf = sink_config;
+       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       if (!buf)
+               return;
+
+       capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+
+       CS_UNLOCK(drvdata->base);
+       etb_disable_hw(drvdata);
+
+       /* unit is in words, not bytes */
+       read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+       write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+       /*
+        * Entries should be aligned to the frame size.  If they are not
+        * go back to the last alignement point to give decoding tools a
+        * chance to fix things.
+        */
+       if (write_ptr % ETB_FRAME_SIZE_WORDS) {
+               dev_err(drvdata->dev,
+                       "write_ptr: %lu not aligned to formatter frame size\n",
+                       (unsigned long)write_ptr);
+
+               write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
+               local_inc(&buf->lost);
+       }
+
+       /*
+        * Get a hold of the status register and see if a wrap around
+        * has occurred.  If so adjust things accordingly.  Otherwise
+        * start at the beginning and go until the write pointer has
+        * been reached.
+        */
+       status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+       if (status & ETB_STATUS_RAM_FULL) {
+               local_inc(&buf->lost);
+               to_read = capacity;
+               read_ptr = write_ptr;
+       } else {
+               to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
+               to_read *= ETB_FRAME_SIZE_WORDS;
+       }
+
+       /*
+        * Make sure we don't overwrite data that hasn't been consumed yet.
+        * It is entirely possible that the HW buffer has more data than the
+        * ring buffer can currently handle.  If so adjust the start address
+        * to take only the last traces.
+        *
+        * In snapshot mode we are looking to get the latest traces only and as
+        * such, we don't care about not overwriting data that hasn't been
+        * processed by user space.
+        */
+       if (!buf->snapshot && to_read > handle->size) {
+               u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
+
+               /* The new read pointer must be frame size aligned */
+               to_read -= handle->size & mask;
+               /*
+                * Move the RAM read pointer up, keeping in mind that
+                * everything is in frame size units.
+                */
+               read_ptr = (write_ptr + drvdata->buffer_depth) -
+                                       to_read / ETB_FRAME_SIZE_WORDS;
+               /* Wrap around if need be*/
+               read_ptr &= ~(drvdata->buffer_depth - 1);
+               /* let the decoder know we've skipped ahead */
+               local_inc(&buf->lost);
+       }
+
+       /* finally tell HW where we want to start reading from */
+       writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+       cur = buf->cur;
+       offset = buf->offset;
+       for (i = 0; i < to_read; i += 4) {
+               buf_ptr = buf->data_pages[cur] + offset;
+               read_data = readl_relaxed(drvdata->base +
+                                         ETB_RAM_READ_DATA_REG);
+               *buf_ptr++ = read_data >> 0;
+               *buf_ptr++ = read_data >> 8;
+               *buf_ptr++ = read_data >> 16;
+               *buf_ptr++ = read_data >> 24;
+
+               offset += 4;
+               if (offset >= PAGE_SIZE) {
+                       offset = 0;
+                       cur++;
+                       /* wrap around at the end of the buffer */
+                       cur &= buf->nr_pages - 1;
+               }
+       }
+
+       /* reset ETB buffer for next run */
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+
+       /*
+        * In snapshot mode all we have to do is communicate to
+        * perf_aux_output_end() the address of the current head.  In full
+        * trace mode the same function expects a size to move rb->aux_head
+        * forward.
+        */
+       if (buf->snapshot)
+               local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+       else
+               local_add(to_read, &buf->data_size);
+
+       etb_enable_hw(drvdata);
+       CS_LOCK(drvdata->base);
+}
+
 static const struct coresight_ops_sink etb_sink_ops = {
        .enable         = etb_enable,
        .disable        = etb_disable,
+       .alloc_buffer   = etb_alloc_buffer,
+       .free_buffer    = etb_free_buffer,
+       .set_buffer     = etb_set_buffer,
+       .reset_buffer   = etb_reset_buffer,
+       .update_buffer  = etb_update_buffer,
 };
 
 static const struct coresight_ops etb_cs_ops = {
@@ -266,7 +514,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
        unsigned long flags;
 
        spin_lock_irqsave(&drvdata->spinlock, flags);
-       if (drvdata->enable) {
+       if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
                etb_disable_hw(drvdata);
                etb_dump_hw(drvdata);
                etb_enable_hw(drvdata);
@@ -281,7 +529,7 @@ static int etb_open(struct inode *inode, struct file *file)
        struct etb_drvdata *drvdata = container_of(file->private_data,
                                                   struct etb_drvdata, miscdev);
 
-       if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+       if (local_cmpxchg(&drvdata->reading, 0, 1))
                return -EBUSY;
 
        dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -317,7 +565,7 @@ static int etb_release(struct inode *inode, struct file *file)
 {
        struct etb_drvdata *drvdata = container_of(file->private_data,
                                                   struct etb_drvdata, miscdev);
-       atomic_set(&drvdata->in_use, 0);
+       local_set(&drvdata->reading, 0);
 
        dev_dbg(drvdata->dev, "%s: released\n", __func__);
        return 0;
@@ -489,15 +737,6 @@ err_misc_register:
        return ret;
 }
 
-static int etb_remove(struct amba_device *adev)
-{
-       struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
-       misc_deregister(&drvdata->miscdev);
-       coresight_unregister(drvdata->csdev);
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int etb_runtime_suspend(struct device *dev)
 {
@@ -537,14 +776,10 @@ static struct amba_driver etb_driver = {
                .name   = "coresight-etb10",
                .owner  = THIS_MODULE,
                .pm     = &etb_dev_pm_ops,
+               .suppress_bind_attrs = true,
 
        },
        .probe          = etb_probe,
-       .remove         = etb_remove,
        .id_table       = etb_ids,
 };
-
-module_amba_driver(etb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
+builtin_amba_driver(etb_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
new file mode 100644 (file)
index 0000000..755125f
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "coresight-priv.h"
+
+static struct pmu etm_pmu;
+static bool etm_perf_up;
+
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work:              Handle to free allocated memory outside IRQ context.
+ * @mask:              Hold the CPU(s) this event was set for.
+ * @snk_config:                The sink configuration.
+ * @path:              An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+       struct work_struct work;
+       cpumask_t mask;
+       void *snk_config;
+       struct list_head **path;
+};
+
+static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
+
+/* ETMv3.5/PTM's ETMCR is 'config' */
+PMU_FORMAT_ATTR(cycacc,                "config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(timestamp,     "config:" __stringify(ETM_OPT_TS));
+
+static struct attribute *etm_config_formats_attr[] = {
+       &format_attr_cycacc.attr,
+       &format_attr_timestamp.attr,
+       NULL,
+};
+
+static struct attribute_group etm_pmu_format_group = {
+       .name   = "format",
+       .attrs  = etm_config_formats_attr,
+};
+
+static const struct attribute_group *etm_pmu_attr_groups[] = {
+       &etm_pmu_format_group,
+       NULL,
+};
+
+static void etm_event_read(struct perf_event *event) {}
+
+static int etm_event_init(struct perf_event *event)
+{
+       if (event->attr.type != etm_pmu.type)
+               return -ENOENT;
+
+       return 0;
+}
+
+static void free_event_data(struct work_struct *work)
+{
+       int cpu;
+       cpumask_t *mask;
+       struct etm_event_data *event_data;
+       struct coresight_device *sink;
+
+       event_data = container_of(work, struct etm_event_data, work);
+       mask = &event_data->mask;
+       /*
+        * First deal with the sink configuration.  See comment in
+        * etm_setup_aux() about why we take the first available path.
+        */
+       if (event_data->snk_config) {
+               cpu = cpumask_first(mask);
+               sink = coresight_get_sink(event_data->path[cpu]);
+               if (sink_ops(sink)->free_buffer)
+                       sink_ops(sink)->free_buffer(event_data->snk_config);
+       }
+
+       for_each_cpu(cpu, mask) {
+               if (event_data->path[cpu])
+                       coresight_release_path(event_data->path[cpu]);
+       }
+
+       kfree(event_data->path);
+       kfree(event_data);
+}
+
+static void *alloc_event_data(int cpu)
+{
+       int size;
+       cpumask_t *mask;
+       struct etm_event_data *event_data;
+
+       /* First get memory for the session's data */
+       event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
+       if (!event_data)
+               return NULL;
+
+       /* Make sure nothing disappears under us */
+       get_online_cpus();
+       size = num_online_cpus();
+
+       mask = &event_data->mask;
+       if (cpu != -1)
+               cpumask_set_cpu(cpu, mask);
+       else
+               cpumask_copy(mask, cpu_online_mask);
+       put_online_cpus();
+
+       /*
+        * Each CPU has a single path between source and destination.  As such
+        * allocate an array using CPU numbers as indexes.  That way a path
+        * for any CPU can easily be accessed at any given time.  We proceed
+        * the same way for sessions involving a single CPU.  The cost of
+        * unused memory when dealing with single CPU trace scenarios is small
+        * compared to the cost of searching through an optimized array.
+        */
+       event_data->path = kcalloc(size,
+                                  sizeof(struct list_head *), GFP_KERNEL);
+       if (!event_data->path) {
+               kfree(event_data);
+               return NULL;
+       }
+
+       return event_data;
+}
+
+static void etm_free_aux(void *data)
+{
+       struct etm_event_data *event_data = data;
+
+       schedule_work(&event_data->work);
+}
+
+static void *etm_setup_aux(int event_cpu, void **pages,
+                          int nr_pages, bool overwrite)
+{
+       int cpu;
+       cpumask_t *mask;
+       struct coresight_device *sink;
+       struct etm_event_data *event_data = NULL;
+
+       event_data = alloc_event_data(event_cpu);
+       if (!event_data)
+               return NULL;
+
+       INIT_WORK(&event_data->work, free_event_data);
+
+       mask = &event_data->mask;
+
+       /* Setup the path for each CPU in a trace session */
+       for_each_cpu(cpu, mask) {
+               struct coresight_device *csdev;
+
+               csdev = per_cpu(csdev_src, cpu);
+               if (!csdev)
+                       goto err;
+
+               /*
+                * Building a path doesn't enable it, it simply builds a
+                * list of devices from source to sink that can be
+                * referenced later when the path is actually needed.
+                */
+               event_data->path[cpu] = coresight_build_path(csdev);
+               if (!event_data->path[cpu])
+                       goto err;
+       }
+
+       /*
+        * In theory nothing prevent tracers in a trace session from being
+        * associated with different sinks, nor having a sink per tracer.  But
+        * until we have HW with this kind of topology and a way to convey
+        * sink assignement from the perf cmd line we need to assume tracers
+        * in a trace session are using the same sink.  Therefore pick the sink
+        * found at the end of the first available path.
+        */
+       cpu = cpumask_first(mask);
+       /* Grab the sink at the end of the path */
+       sink = coresight_get_sink(event_data->path[cpu]);
+       if (!sink)
+               goto err;
+
+       if (!sink_ops(sink)->alloc_buffer)
+               goto err;
+
+       /* Get the AUX specific data from the sink buffer */
+       event_data->snk_config =
+                       sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+                                                    nr_pages, overwrite);
+       if (!event_data->snk_config)
+               goto err;
+
+out:
+       return event_data;
+
+err:
+       etm_free_aux(event_data);
+       event_data = NULL;
+       goto out;
+}
+
+static void etm_event_start(struct perf_event *event, int flags)
+{
+       int cpu = smp_processor_id();
+       struct etm_event_data *event_data;
+       struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+       struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+
+       if (!csdev)
+               goto fail;
+
+       /*
+        * Deal with the ring buffer API and get a handle on the
+        * session's information.
+        */
+       event_data = perf_aux_output_begin(handle, event);
+       if (!event_data)
+               goto fail;
+
+       /* We need a sink, no need to continue without one */
+       sink = coresight_get_sink(event_data->path[cpu]);
+       if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
+               goto fail_end_stop;
+
+       /* Configure the sink */
+       if (sink_ops(sink)->set_buffer(sink, handle,
+                                      event_data->snk_config))
+               goto fail_end_stop;
+
+       /* Nothing will happen without a path */
+       if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+               goto fail_end_stop;
+
+       /* Tell the perf core the event is alive */
+       event->hw.state = 0;
+
+       /* Finally enable the tracer */
+       if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+               goto fail_end_stop;
+
+out:
+       return;
+
+fail_end_stop:
+       perf_aux_output_end(handle, 0, true);
+fail:
+       event->hw.state = PERF_HES_STOPPED;
+       goto out;
+}
+
+static void etm_event_stop(struct perf_event *event, int mode)
+{
+       bool lost;
+       int cpu = smp_processor_id();
+       unsigned long size;
+       struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+       struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+       struct etm_event_data *event_data = perf_get_aux(handle);
+
+       if (event->hw.state == PERF_HES_STOPPED)
+               return;
+
+       if (!csdev)
+               return;
+
+       sink = coresight_get_sink(event_data->path[cpu]);
+       if (!sink)
+               return;
+
+       /* stop tracer */
+       source_ops(csdev)->disable(csdev);
+
+       /* tell the core */
+       event->hw.state = PERF_HES_STOPPED;
+
+       if (mode & PERF_EF_UPDATE) {
+               if (WARN_ON_ONCE(handle->event != event))
+                       return;
+
+               /* update trace information */
+               if (!sink_ops(sink)->update_buffer)
+                       return;
+
+               sink_ops(sink)->update_buffer(sink, handle,
+                                             event_data->snk_config);
+
+               if (!sink_ops(sink)->reset_buffer)
+                       return;
+
+               size = sink_ops(sink)->reset_buffer(sink, handle,
+                                                   event_data->snk_config,
+                                                   &lost);
+
+               perf_aux_output_end(handle, size, lost);
+       }
+
+       /* Disabling the path make its elements available to other sessions */
+       coresight_disable_path(event_data->path[cpu]);
+}
+
+static int etm_event_add(struct perf_event *event, int mode)
+{
+       int ret = 0;
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (mode & PERF_EF_START) {
+               etm_event_start(event, 0);
+               if (hwc->state & PERF_HES_STOPPED)
+                       ret = -EINVAL;
+       } else {
+               hwc->state = PERF_HES_STOPPED;
+       }
+
+       return ret;
+}
+
+static void etm_event_del(struct perf_event *event, int mode)
+{
+       etm_event_stop(event, PERF_EF_UPDATE);
+}
+
+int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{
+       char entry[sizeof("cpu9999999")];
+       int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
+       struct device *pmu_dev = etm_pmu.dev;
+       struct device *cs_dev = &csdev->dev;
+
+       sprintf(entry, "cpu%d", cpu);
+
+       if (!etm_perf_up)
+               return -EPROBE_DEFER;
+
+       if (link) {
+               ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
+               if (ret)
+                       return ret;
+               per_cpu(csdev_src, cpu) = csdev;
+       } else {
+               sysfs_remove_link(&pmu_dev->kobj, entry);
+               per_cpu(csdev_src, cpu) = NULL;
+       }
+
+       return 0;
+}
+
+static int __init etm_perf_init(void)
+{
+       int ret;
+
+       etm_pmu.capabilities    = PERF_PMU_CAP_EXCLUSIVE;
+
+       etm_pmu.attr_groups     = etm_pmu_attr_groups;
+       etm_pmu.task_ctx_nr     = perf_sw_context;
+       etm_pmu.read            = etm_event_read;
+       etm_pmu.event_init      = etm_event_init;
+       etm_pmu.setup_aux       = etm_setup_aux;
+       etm_pmu.free_aux        = etm_free_aux;
+       etm_pmu.start           = etm_event_start;
+       etm_pmu.stop            = etm_event_stop;
+       etm_pmu.add             = etm_event_add;
+       etm_pmu.del             = etm_event_del;
+
+       ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+       if (ret == 0)
+               etm_perf_up = true;
+
+       return ret;
+}
+device_initcall(etm_perf_init);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
new file mode 100644 (file)
index 0000000..87f5a13
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_ETM_PERF_H
+#define _CORESIGHT_ETM_PERF_H
+
+struct coresight_device;
+
+#ifdef CONFIG_CORESIGHT
+int etm_perf_symlink(struct coresight_device *csdev, bool link);
+
+#else
+static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{ return -EINVAL; }
+
+#endif /* CONFIG_CORESIGHT */
+
+#endif
index b4481eb29304a1ddf4a2b5a86d9244daa3c05f14..51597cb2c08af69293c0b17d8698019fc8af949e 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _CORESIGHT_CORESIGHT_ETM_H
 #define _CORESIGHT_CORESIGHT_ETM_H
 
+#include <asm/local.h>
 #include <linux/spinlock.h>
 #include "coresight-priv.h"
 
 #define ETM_MODE_STALL         BIT(2)
 #define ETM_MODE_TIMESTAMP     BIT(3)
 #define ETM_MODE_CTXID         BIT(4)
-#define ETM_MODE_ALL           0x1f
+#define ETM_MODE_ALL           (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
+                                ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+                                ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
+                                ETM_MODE_EXCL_USER)
 
 #define ETM_SQR_MASK           0x3
 #define ETM_TRACEID_MASK       0x3f
 #define ETM_DEFAULT_EVENT_VAL  (ETM_HARD_WIRE_RES_A    |       \
                                 ETM_ADD_COMP_0         |       \
                                 ETM_EVENT_NOT_A)
+
 /**
- * struct etm_drvdata - specifics associated to an ETM component
- * @base:      memory mapped base address for this component.
- * @dev:       the device entity associated to this component.
- * @atclk:     optional clock for the core parts of the ETM.
- * @csdev:     component vitals needed by the framework.
- * @spinlock:  only one at a time pls.
- * @cpu:       the cpu this component is affined to.
- * @port_size: port size as reported by ETMCR bit 4-6 and 21.
- * @arch:      ETM/PTM version number.
- * @use_cpu14: true if management registers need to be accessed via CP14.
- * @enable:    is this ETM/PTM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:true if we should start tracing at boot time.
- * @os_unlock: true if access to management registers is allowed.
- * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
- * @nr_cntr:   Number of counters as found in ETMCCR bit 13-15.
- * @nr_ext_inp:        Number of external input as found in ETMCCR bit 17-19.
- * @nr_ext_out:        Number of external output as found in ETMCCR bit 20-22.
- * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
- * @etmccr:    value of register ETMCCR.
- * @etmccer:   value of register ETMCCER.
- * @traceid:   value of the current ID for this component.
+ * struct etm_config - configuration information related to an ETM
  * @mode:      controls various modes supported by this ETM/PTM.
  * @ctrl:      used in conjunction with @mode.
  * @trigger_event: setting for register ETMTRIGGER.
  * @startstop_ctrl: setting for register ETMTSSCR.
  * @enable_event: setting for register ETMTEEVR.
  * @enable_ctrl1: setting for register ETMTECR1.
+ * @enable_ctrl2: setting for register ETMTECR2.
  * @fifofull_level: setting for register ETMFFLR.
  * @addr_idx:  index for the address comparator selection.
  * @addr_val:  value for address comparator register.
  * @ctxid_mask: mask applicable to all the context IDs.
  * @sync_freq: Synchronisation frequency.
  * @timestamp_event: Defines an event that requests the insertion
                   of a timestamp into the trace stream.
*                  of a timestamp into the trace stream.
  */
-struct etm_drvdata {
-       void __iomem                    *base;
-       struct device                   *dev;
-       struct clk                      *atclk;
-       struct coresight_device         *csdev;
-       spinlock_t                      spinlock;
-       int                             cpu;
-       int                             port_size;
-       u8                              arch;
-       bool                            use_cp14;
-       bool                            enable;
-       bool                            sticky_enable;
-       bool                            boot_enable;
-       bool                            os_unlock;
-       u8                              nr_addr_cmp;
-       u8                              nr_cntr;
-       u8                              nr_ext_inp;
-       u8                              nr_ext_out;
-       u8                              nr_ctxid_cmp;
-       u32                             etmccr;
-       u32                             etmccer;
-       u32                             traceid;
+struct etm_config {
        u32                             mode;
        u32                             ctrl;
        u32                             trigger_event;
        u32                             startstop_ctrl;
        u32                             enable_event;
        u32                             enable_ctrl1;
+       u32                             enable_ctrl2;
        u32                             fifofull_level;
        u8                              addr_idx;
        u32                             addr_val[ETM_MAX_ADDR_CMP];
@@ -244,6 +209,56 @@ struct etm_drvdata {
        u32                             timestamp_event;
 };
 
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @atclk:     optional clock for the core parts of the ETM.
+ * @csdev:     component vitals needed by the framework.
+ * @spinlock:  only one at a time pls.
+ * @cpu:       the cpu this component is affined to.
+ * @port_size: port size as reported by ETMCR bit 4-6 and 21.
+ * @arch:      ETM/PTM version number.
+ * @use_cpu14: true if management registers need to be accessed via CP14.
+ * @mode:      this tracer's mode, i.e sysFS, Perf or disabled.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock: true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr:   Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp:        Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out:        Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr:    value of register ETMCCR.
+ * @etmccer:   value of register ETMCCER.
+ * @traceid:   value of the current ID for this component.
+ * @config:    structure holding configuration parameters.
+ */
+struct etm_drvdata {
+       void __iomem                    *base;
+       struct device                   *dev;
+       struct clk                      *atclk;
+       struct coresight_device         *csdev;
+       spinlock_t                      spinlock;
+       int                             cpu;
+       int                             port_size;
+       u8                              arch;
+       bool                            use_cp14;
+       local_t                         mode;
+       bool                            sticky_enable;
+       bool                            boot_enable;
+       bool                            os_unlock;
+       u8                              nr_addr_cmp;
+       u8                              nr_cntr;
+       u8                              nr_ext_inp;
+       u8                              nr_ext_out;
+       u8                              nr_ctxid_cmp;
+       u32                             etmccr;
+       u32                             etmccer;
+       u32                             traceid;
+       struct etm_config               config;
+};
+
 enum etm_addr_type {
        ETM_ADDR_TYPE_NONE,
        ETM_ADDR_TYPE_SINGLE,
@@ -251,4 +266,39 @@ enum etm_addr_type {
        ETM_ADDR_TYPE_START,
        ETM_ADDR_TYPE_STOP,
 };
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+                             u32 val, u32 off)
+{
+       if (drvdata->use_cp14) {
+               if (etm_writel_cp14(off, val)) {
+                       dev_err(drvdata->dev,
+                               "invalid CP14 access to ETM reg: %#x", off);
+               }
+       } else {
+               writel_relaxed(val, drvdata->base + off);
+       }
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+       u32 val;
+
+       if (drvdata->use_cp14) {
+               if (etm_readl_cp14(off, &val)) {
+                       dev_err(drvdata->dev,
+                               "invalid CP14 access to ETM reg: %#x", off);
+               }
+       } else {
+               val = readl_relaxed(drvdata->base + off);
+       }
+
+       return val;
+}
+
+extern const struct attribute_group *coresight_etm_groups[];
+int etm_get_trace_id(struct etm_drvdata *drvdata);
+void etm_set_default(struct etm_config *config);
+void etm_config_trace_mode(struct etm_config *config);
+struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
 #endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
new file mode 100644 (file)
index 0000000..cbb4046
--- /dev/null
@@ -0,0 +1,1272 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm.h"
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_addr_cmp;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{      unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_cntr;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_ctxid_cmp;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       unsigned long flags, val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       pm_runtime_get_sync(drvdata->dev);
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       CS_UNLOCK(drvdata->base);
+
+       val = etm_readl(drvdata, ETMSR);
+
+       CS_LOCK(drvdata->base);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       pm_runtime_put(drvdata->dev);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       int i, ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val) {
+               spin_lock(&drvdata->spinlock);
+               memset(config, 0, sizeof(struct etm_config));
+               config->mode = ETM_MODE_EXCLUDE;
+               config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+               for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+                       config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+               }
+
+               etm_set_default(config);
+               spin_unlock(&drvdata->spinlock);
+       }
+
+       return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->mode;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->mode = val & ETM_MODE_ALL;
+
+       if (config->mode & ETM_MODE_EXCLUDE)
+               config->enable_ctrl1 |= ETMTECR1_INC_EXC;
+       else
+               config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+       if (config->mode & ETM_MODE_CYCACC)
+               config->ctrl |= ETMCR_CYC_ACC;
+       else
+               config->ctrl &= ~ETMCR_CYC_ACC;
+
+       if (config->mode & ETM_MODE_STALL) {
+               if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+                       dev_warn(drvdata->dev, "stall mode not supported\n");
+                       ret = -EINVAL;
+                       goto err_unlock;
+               }
+               config->ctrl |= ETMCR_STALL_MODE;
+        } else
+               config->ctrl &= ~ETMCR_STALL_MODE;
+
+       if (config->mode & ETM_MODE_TIMESTAMP) {
+               if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+                       dev_warn(drvdata->dev, "timestamp not supported\n");
+                       ret = -EINVAL;
+                       goto err_unlock;
+               }
+               config->ctrl |= ETMCR_TIMESTAMP_EN;
+       } else
+               config->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+       if (config->mode & ETM_MODE_CTXID)
+               config->ctrl |= ETMCR_CTXID_SIZE;
+       else
+               config->ctrl &= ~ETMCR_CTXID_SIZE;
+
+       if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+               etm_config_trace_mode(config);
+
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+
+err_unlock:
+       spin_unlock(&drvdata->spinlock);
+       return ret;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->trigger_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->trigger_event = val & ETM_EVENT_MASK;
+
+       return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->enable_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->enable_event = val & ETM_EVENT_MASK;
+
+       return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->fifofull_level;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->fifofull_level = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->addr_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_addr_cmp)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->addr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       val = config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       config->addr_val[idx] = val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val1 = config->addr_val[idx];
+       val2 = config->addr_val[idx + 1];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+               return -EINVAL;
+       /* Lower address comparator cannot have a higher address value */
+       if (val1 > val2)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = val1;
+       config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+       config->addr_val[idx + 1] = val2;
+       config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+       config->enable_ctrl1 |= (1 << (idx/2));
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_START;
+       config->startstop_ctrl |= (1 << idx);
+       config->enable_ctrl1 |= BIT(25);
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = config->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = config->addr_idx;
+       if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       config->addr_val[idx] = val;
+       config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+       config->startstop_ctrl |= (1 << (idx + 16));
+       config->enable_ctrl1 |= ETMTECR1_START_STOP;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->addr_acctype[config->addr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->addr_acctype[config->addr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->cntr_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_cntr)
+               return -EINVAL;
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->cntr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->cntr_rld_val[config->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_rld_val[config->cntr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->cntr_event[config->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->cntr_rld_event[config->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       int i, ret = 0;
+       u32 val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       if (!local_read(&drvdata->mode)) {
+               spin_lock(&drvdata->spinlock);
+               for (i = 0; i < drvdata->nr_cntr; i++)
+                       ret += sprintf(buf, "counter %d: %x\n",
+                                      i, config->cntr_val[i]);
+               spin_unlock(&drvdata->spinlock);
+               return ret;
+       }
+
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               val = etm_readl(drvdata, ETMCNTVRn(i));
+               ret += sprintf(buf, "counter %d: %x\n", i, val);
+       }
+
+       return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       config->cntr_val[config->cntr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_12_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_12_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_21_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_21_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_23_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_23_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_31_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_31_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_32_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_32_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->seq_13_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->seq_13_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val, flags;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       if (!local_read(&drvdata->mode)) {
+               val = config->seq_curr_state;
+               goto out;
+       }
+
+       pm_runtime_get_sync(drvdata->dev);
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       CS_UNLOCK(drvdata->base);
+       val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+       CS_LOCK(drvdata->base);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       pm_runtime_put(drvdata->dev);
+out:
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val > ETM_SEQ_STATE_MAX_VAL)
+               return -EINVAL;
+
+       config->seq_curr_state = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->ctxid_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_ctxid_cmp)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       config->ctxid_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       spin_lock(&drvdata->spinlock);
+       val = config->ctxid_vpid[config->ctxid_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       int ret;
+       unsigned long vpid, pid;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &vpid);
+       if (ret)
+               return ret;
+
+       pid = coresight_vpid_to_pid(vpid);
+
+       spin_lock(&drvdata->spinlock);
+       config->ctxid_pid[config->ctxid_idx] = pid;
+       config->ctxid_vpid[config->ctxid_idx] = vpid;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->ctxid_mask;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->ctxid_mask = val;
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->sync_freq;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->sync_freq = val & ETM_SYNC_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       val = config->timestamp_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       config->timestamp_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t cpu_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       int val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->cpu;
+       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static ssize_t traceid_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = etm_get_trace_id(drvdata);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->traceid = val & ETM_TRACEID_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+       &dev_attr_nr_addr_cmp.attr,
+       &dev_attr_nr_cntr.attr,
+       &dev_attr_nr_ctxid_cmp.attr,
+       &dev_attr_etmsr.attr,
+       &dev_attr_reset.attr,
+       &dev_attr_mode.attr,
+       &dev_attr_trigger_event.attr,
+       &dev_attr_enable_event.attr,
+       &dev_attr_fifofull_level.attr,
+       &dev_attr_addr_idx.attr,
+       &dev_attr_addr_single.attr,
+       &dev_attr_addr_range.attr,
+       &dev_attr_addr_start.attr,
+       &dev_attr_addr_stop.attr,
+       &dev_attr_addr_acctype.attr,
+       &dev_attr_cntr_idx.attr,
+       &dev_attr_cntr_rld_val.attr,
+       &dev_attr_cntr_event.attr,
+       &dev_attr_cntr_rld_event.attr,
+       &dev_attr_cntr_val.attr,
+       &dev_attr_seq_12_event.attr,
+       &dev_attr_seq_21_event.attr,
+       &dev_attr_seq_23_event.attr,
+       &dev_attr_seq_31_event.attr,
+       &dev_attr_seq_32_event.attr,
+       &dev_attr_seq_13_event.attr,
+       &dev_attr_seq_curr_state.attr,
+       &dev_attr_ctxid_idx.attr,
+       &dev_attr_ctxid_pid.attr,
+       &dev_attr_ctxid_mask.attr,
+       &dev_attr_sync_freq.attr,
+       &dev_attr_timestamp_event.attr,
+       &dev_attr_traceid.attr,
+       &dev_attr_cpu.attr,
+       NULL,
+};
+
+#define coresight_simple_func(name, offset)                             \
+static ssize_t name##_show(struct device *_dev,                         \
+                          struct device_attribute *attr, char *buf)    \
+{                                                                       \
+       struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
+       return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
+                        readl_relaxed(drvdata->base + offset));        \
+}                                                                       \
+DEVICE_ATTR_RO(name)
+
+coresight_simple_func(etmccr, ETMCCR);
+coresight_simple_func(etmccer, ETMCCER);
+coresight_simple_func(etmscr, ETMSCR);
+coresight_simple_func(etmidr, ETMIDR);
+coresight_simple_func(etmcr, ETMCR);
+coresight_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_simple_func(etmteevr, ETMTEEVR);
+coresight_simple_func(etmtssvr, ETMTSSCR);
+coresight_simple_func(etmtecr1, ETMTECR1);
+coresight_simple_func(etmtecr2, ETMTECR2);
+
+static struct attribute *coresight_etm_mgmt_attrs[] = {
+       &dev_attr_etmccr.attr,
+       &dev_attr_etmccer.attr,
+       &dev_attr_etmscr.attr,
+       &dev_attr_etmidr.attr,
+       &dev_attr_etmcr.attr,
+       &dev_attr_etmtraceidr.attr,
+       &dev_attr_etmteevr.attr,
+       &dev_attr_etmtssvr.attr,
+       &dev_attr_etmtecr1.attr,
+       &dev_attr_etmtecr2.attr,
+       NULL,
+};
+
+static const struct attribute_group coresight_etm_group = {
+       .attrs = coresight_etm_attrs,
+};
+
+static const struct attribute_group coresight_etm_mgmt_group = {
+       .attrs = coresight_etm_mgmt_attrs,
+       .name = "mgmt",
+};
+
+const struct attribute_group *coresight_etm_groups[] = {
+       &coresight_etm_group,
+       &coresight_etm_mgmt_group,
+       NULL,
+};
index d630b7ece73521ccf8cd7b320ebd75ecc92eb1d3..d83ab82672e4e136ffb443d033c5c23ccba697f5 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Program Flow Trace driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
 #include <linux/amba/bus.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm.h"
+#include "coresight-etm-perf.h"
 
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
 static int boot_enable;
 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 
@@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
 static int etm_count;
 static struct etm_drvdata *etmdrvdata[NR_CPUS];
 
-static inline void etm_writel(struct etm_drvdata *drvdata,
-                             u32 val, u32 off)
-{
-       if (drvdata->use_cp14) {
-               if (etm_writel_cp14(off, val)) {
-                       dev_err(drvdata->dev,
-                               "invalid CP14 access to ETM reg: %#x", off);
-               }
-       } else {
-               writel_relaxed(val, drvdata->base + off);
-       }
-}
-
-static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
-{
-       u32 val;
-
-       if (drvdata->use_cp14) {
-               if (etm_readl_cp14(off, &val)) {
-                       dev_err(drvdata->dev,
-                               "invalid CP14 access to ETM reg: %#x", off);
-               }
-       } else {
-               val = readl_relaxed(drvdata->base + off);
-       }
-
-       return val;
-}
-
 /*
  * Memory mapped writes to clear os lock are not supported on some processors
  * and OS lock must be unlocked before any memory mapped access on such
  * processors, otherwise memory mapped reads/writes will be invalid.
  */
-static void etm_os_unlock(void *info)
+static void etm_os_unlock(struct etm_drvdata *drvdata)
 {
-       struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
        /* Writing any value to ETMOSLAR unlocks the trace registers */
        etm_writel(drvdata, 0x0, ETMOSLAR);
+       drvdata->os_unlock = true;
        isb();
 }
 
@@ -215,1431 +195,450 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
        }
 }
 
-static void etm_set_default(struct etm_drvdata *drvdata)
-{
-       int i;
-
-       drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->enable_event = ETM_HARD_WIRE_RES_A;
-
-       drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
-       drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
-
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               drvdata->cntr_rld_val[i] = 0x0;
-               drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
-               drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
-               drvdata->cntr_val[i] = 0x0;
-       }
-
-       drvdata->seq_curr_state = 0x0;
-       drvdata->ctxid_idx = 0x0;
-       for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
-               drvdata->ctxid_pid[i] = 0x0;
-               drvdata->ctxid_vpid[i] = 0x0;
-       }
-
-       drvdata->ctxid_mask = 0x0;
-}
-
-static void etm_enable_hw(void *info)
+void etm_set_default(struct etm_config *config)
 {
        int i;
-       u32 etmcr;
-       struct etm_drvdata *drvdata = info;
 
-       CS_UNLOCK(drvdata->base);
-
-       /* Turn engine on */
-       etm_clr_pwrdwn(drvdata);
-       /* Apply power to trace registers */
-       etm_set_pwrup(drvdata);
-       /* Make sure all registers are accessible */
-       etm_os_unlock(drvdata);
-
-       etm_set_prog(drvdata);
-
-       etmcr = etm_readl(drvdata, ETMCR);
-       etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
-       etmcr |= drvdata->port_size;
-       etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
-       etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
-       etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
-       etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
-       etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
-       etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
-       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-               etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
-               etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
-       }
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
-               etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
-               etm_writel(drvdata, drvdata->cntr_rld_event[i],
-                          ETMCNTRLDEVRn(i));
-               etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
-       }
-       etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
-       etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
-       etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
-       etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
-       etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
-       etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
-       etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
-       for (i = 0; i < drvdata->nr_ext_out; i++)
-               etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
-       for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-               etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
-       etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
-       etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
-       /* No external input selected */
-       etm_writel(drvdata, 0x0, ETMEXTINSELR);
-       etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
-       /* No auxiliary control selected */
-       etm_writel(drvdata, 0x0, ETMAUXCR);
-       etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
-       /* No VMID comparator value selected */
-       etm_writel(drvdata, 0x0, ETMVMIDCVR);
-
-       /* Ensures trace output is enabled from this ETM */
-       etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
-
-       etm_clr_prog(drvdata);
-       CS_LOCK(drvdata->base);
-
-       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
-}
-
-static int etm_trace_id(struct coresight_device *csdev)
-{
-       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-       unsigned long flags;
-       int trace_id = -1;
-
-       if (!drvdata->enable)
-               return drvdata->traceid;
-       pm_runtime_get_sync(csdev->dev.parent);
-
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-
-       CS_UNLOCK(drvdata->base);
-       trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
-       CS_LOCK(drvdata->base);
-
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(csdev->dev.parent);
-
-       return trace_id;
-}
-
-static int etm_enable(struct coresight_device *csdev)
-{
-       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-       int ret;
-
-       pm_runtime_get_sync(csdev->dev.parent);
-       spin_lock(&drvdata->spinlock);
+       if (WARN_ON_ONCE(!config))
+               return;
 
        /*
-        * Configure the ETM only if the CPU is online.  If it isn't online
-        * hw configuration will take place when 'CPU_STARTING' is received
-        * in @etm_cpu_callback.
+        * Taken verbatim from the TRM:
+        *
+        * To trace all memory:
+        *  set bit [24] in register 0x009, the ETMTECR1, to 1
+        *  set all other bits in register 0x009, the ETMTECR1, to 0
+        *  set all bits in register 0x007, the ETMTECR2, to 0
+        *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
         */
-       if (cpu_online(drvdata->cpu)) {
-               ret = smp_call_function_single(drvdata->cpu,
-                                              etm_enable_hw, drvdata, 1);
-               if (ret)
-                       goto err;
-       }
-
-       drvdata->enable = true;
-       drvdata->sticky_enable = true;
+       config->enable_ctrl1 = BIT(24);
+       config->enable_ctrl2 = 0x0;
+       config->enable_event = ETM_HARD_WIRE_RES_A;
 
-       spin_unlock(&drvdata->spinlock);
-
-       dev_info(drvdata->dev, "ETM tracing enabled\n");
-       return 0;
-err:
-       spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(csdev->dev.parent);
-       return ret;
-}
+       config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+       config->enable_event = ETM_HARD_WIRE_RES_A;
 
-static void etm_disable_hw(void *info)
-{
-       int i;
-       struct etm_drvdata *drvdata = info;
-
-       CS_UNLOCK(drvdata->base);
-       etm_set_prog(drvdata);
-
-       /* Program trace enable to low by using always false event */
-       etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
-
-       /* Read back sequencer and counters for post trace analysis */
-       drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
-
-       for (i = 0; i < drvdata->nr_cntr; i++)
-               drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
-
-       etm_set_pwrdwn(drvdata);
-       CS_LOCK(drvdata->base);
-
-       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
-}
-
-static void etm_disable(struct coresight_device *csdev)
-{
-       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
-       /*
-        * Taking hotplug lock here protects from clocks getting disabled
-        * with tracing being left on (crash scenario) if user disable occurs
-        * after cpu online mask indicates the cpu is offline but before the
-        * DYING hotplug callback is serviced by the ETM driver.
-        */
-       get_online_cpus();
-       spin_lock(&drvdata->spinlock);
-
-       /*
-        * Executing etm_disable_hw on the cpu whose ETM is being disabled
-        * ensures that register writes occur when cpu is powered.
-        */
-       smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
-       drvdata->enable = false;
-
-       spin_unlock(&drvdata->spinlock);
-       put_online_cpus();
-       pm_runtime_put(csdev->dev.parent);
-
-       dev_info(drvdata->dev, "ETM tracing disabled\n");
-}
-
-static const struct coresight_ops_source etm_source_ops = {
-       .trace_id       = etm_trace_id,
-       .enable         = etm_enable,
-       .disable        = etm_disable,
-};
-
-static const struct coresight_ops etm_cs_ops = {
-       .source_ops     = &etm_source_ops,
-};
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_addr_cmp;
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{      unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_cntr;
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ctxid_cmp_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->nr_ctxid_cmp;
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ctxid_cmp);
-
-static ssize_t etmsr_show(struct device *dev,
-                         struct device_attribute *attr, char *buf)
-{
-       unsigned long flags, val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       CS_UNLOCK(drvdata->base);
-
-       val = etm_readl(drvdata, ETMSR);
-
-       CS_LOCK(drvdata->base);
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(etmsr);
-
-static ssize_t reset_store(struct device *dev,
-                          struct device_attribute *attr,
-                          const char *buf, size_t size)
-{
-       int i, ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val) {
-               spin_lock(&drvdata->spinlock);
-               drvdata->mode = ETM_MODE_EXCLUDE;
-               drvdata->ctrl = 0x0;
-               drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
-               drvdata->startstop_ctrl = 0x0;
-               drvdata->addr_idx = 0x0;
-               for (i = 0; i < drvdata->nr_addr_cmp; i++) {
-                       drvdata->addr_val[i] = 0x0;
-                       drvdata->addr_acctype[i] = 0x0;
-                       drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
-               }
-               drvdata->cntr_idx = 0x0;
-
-               etm_set_default(drvdata);
-               spin_unlock(&drvdata->spinlock);
-       }
-
-       return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->mode;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->mode = val & ETM_MODE_ALL;
-
-       if (drvdata->mode & ETM_MODE_EXCLUDE)
-               drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
-       else
-               drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
-
-       if (drvdata->mode & ETM_MODE_CYCACC)
-               drvdata->ctrl |= ETMCR_CYC_ACC;
-       else
-               drvdata->ctrl &= ~ETMCR_CYC_ACC;
-
-       if (drvdata->mode & ETM_MODE_STALL) {
-               if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
-                       dev_warn(drvdata->dev, "stall mode not supported\n");
-                       ret = -EINVAL;
-                       goto err_unlock;
-               }
-               drvdata->ctrl |= ETMCR_STALL_MODE;
-        } else
-               drvdata->ctrl &= ~ETMCR_STALL_MODE;
-
-       if (drvdata->mode & ETM_MODE_TIMESTAMP) {
-               if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
-                       dev_warn(drvdata->dev, "timestamp not supported\n");
-                       ret = -EINVAL;
-                       goto err_unlock;
-               }
-               drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
-       } else
-               drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
-
-       if (drvdata->mode & ETM_MODE_CTXID)
-               drvdata->ctrl |= ETMCR_CTXID_SIZE;
-       else
-               drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-
-err_unlock:
-       spin_unlock(&drvdata->spinlock);
-       return ret;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t trigger_event_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->trigger_event;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t trigger_event_store(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->trigger_event = val & ETM_EVENT_MASK;
-
-       return size;
-}
-static DEVICE_ATTR_RW(trigger_event);
-
-static ssize_t enable_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->enable_event;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t enable_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->enable_event = val & ETM_EVENT_MASK;
-
-       return size;
-}
-static DEVICE_ATTR_RW(enable_event);
-
-static ssize_t fifofull_level_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->fifofull_level;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t fifofull_level_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->fifofull_level = val;
-
-       return size;
-}
-static DEVICE_ATTR_RW(fifofull_level);
-
-static ssize_t addr_idx_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->addr_idx;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val >= drvdata->nr_addr_cmp)
-               return -EINVAL;
-
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->addr_idx = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_single_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-
-       val = drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t size)
-{
-       u8 idx;
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EINVAL;
-       }
-
-       drvdata->addr_val[idx] = val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val1, val2;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (idx % 2 != 0) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val1 = drvdata->addr_val[idx];
-       val2 = drvdata->addr_val[idx + 1];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       u8 idx;
-       unsigned long val1, val2;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
-               return -EINVAL;
-       /* Lower address comparator cannot have a higher address value */
-       if (val1 > val2)
-               return -EINVAL;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (idx % 2 != 0) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
-             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
-              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = val1;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
-       drvdata->addr_val[idx + 1] = val2;
-       drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
-       drvdata->enable_ctrl1 |= (1 << (idx/2));
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val = drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       u8 idx;
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
-       drvdata->startstop_ctrl |= (1 << idx);
-       drvdata->enable_ctrl1 |= BIT(25);
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       u8 idx;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       val = drvdata->addr_val[idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       u8 idx;
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       idx = drvdata->addr_idx;
-       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
-             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-               spin_unlock(&drvdata->spinlock);
-               return -EPERM;
-       }
-
-       drvdata->addr_val[idx] = val;
-       drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
-       drvdata->startstop_ctrl |= (1 << (idx + 16));
-       drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_acctype_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->addr_acctype[drvdata->addr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_acctype_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->addr_acctype[drvdata->addr_idx] = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(addr_acctype);
-
-static ssize_t cntr_idx_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->cntr_idx;
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val >= drvdata->nr_cntr)
-               return -EINVAL;
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_idx = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntr_rld_val_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->cntr_rld_val[drvdata->cntr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_val_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_val);
-
-static ssize_t cntr_event_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->cntr_event[drvdata->cntr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_event_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_event);
-
-static ssize_t cntr_rld_event_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       spin_lock(&drvdata->spinlock);
-       val = drvdata->cntr_rld_event[drvdata->cntr_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_event_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
-       spin_unlock(&drvdata->spinlock);
-
-       return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_event);
-
-static ssize_t cntr_val_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       int i, ret = 0;
-       u32 val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       if (!drvdata->enable) {
-               spin_lock(&drvdata->spinlock);
-               for (i = 0; i < drvdata->nr_cntr; i++)
-                       ret += sprintf(buf, "counter %d: %x\n",
-                                      i, drvdata->cntr_val[i]);
-               spin_unlock(&drvdata->spinlock);
-               return ret;
-       }
-
-       for (i = 0; i < drvdata->nr_cntr; i++) {
-               val = etm_readl(drvdata, ETMCNTVRn(i));
-               ret += sprintf(buf, "counter %d: %x\n", i, val);
-       }
-
-       return ret;
-}
-
-static ssize_t cntr_val_store(struct device *dev,
-                             struct device_attribute *attr,
-                             const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+       config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+       config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       for (i = 0; i < ETM_MAX_CNTR; i++) {
+               config->cntr_rld_val[i] = 0x0;
+               config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+               config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+               config->cntr_val[i] = 0x0;
+       }
 
-       spin_lock(&drvdata->spinlock);
-       drvdata->cntr_val[drvdata->cntr_idx] = val;
-       spin_unlock(&drvdata->spinlock);
+       config->seq_curr_state = 0x0;
+       config->ctxid_idx = 0x0;
+       for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+               config->ctxid_pid[i] = 0x0;
+               config->ctxid_vpid[i] = 0x0;
+       }
 
-       return size;
+       config->ctxid_mask = 0x0;
 }
-static DEVICE_ATTR_RW(cntr_val);
 
-static ssize_t seq_12_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+void etm_config_trace_mode(struct etm_config *config)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       u32 flags, mode;
 
-       val = drvdata->seq_12_event;
-       return sprintf(buf, "%#lx\n", val);
-}
+       mode = config->mode;
 
-static ssize_t seq_12_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /* excluding kernel AND user space doesn't make sense */
+       if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+               return;
 
-       drvdata->seq_12_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_12_event);
+       /* nothing to do if neither flags are set */
+       if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+               return;
 
-static ssize_t seq_21_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       flags = (1 << 0 |       /* instruction execute */
+                3 << 3 |       /* ARM instruction */
+                0 << 5 |       /* No data value comparison */
+                0 << 7 |       /* No exact mach */
+                0 << 8);       /* Ignore context ID */
 
-       val = drvdata->seq_21_event;
-       return sprintf(buf, "%#lx\n", val);
-}
+       /* No need to worry about single address comparators. */
+       config->enable_ctrl2 = 0x0;
 
-static ssize_t seq_21_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Bit 0 is address range comparator 1 */
+       config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /*
+        * On ETMv3.5:
+        * ETMACTRn[13,11] == Non-secure state comparison control
+        * ETMACTRn[12,10] == Secure state comparison control
+        *
+        * b00 == Match in all modes in this state
+        * b01 == Do not match in any more in this state
+        * b10 == Match in all modes excepts user mode in this state
+        * b11 == Match only in user mode in this state
+        */
 
-       drvdata->seq_21_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_21_event);
+       /* Tracing in secure mode is not supported at this time */
+       flags |= (0 << 12 | 1 << 10);
 
-static ssize_t seq_23_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       if (mode & ETM_MODE_EXCL_USER) {
+               /* exclude user, match all modes except user mode */
+               flags |= (1 << 13 | 0 << 11);
+       } else {
+               /* exclude kernel, match only in user mode */
+               flags |= (1 << 13 | 1 << 11);
+       }
 
-       val = drvdata->seq_23_event;
-       return sprintf(buf, "%#lx\n", val);
+       /*
+        * The ETMEEVR register is already set to "hard wire A".  As such
+        * all there is to do is setup an address comparator that spans
+        * the entire address range and configure the state and mode bits.
+        */
+       config->addr_val[0] = (u32) 0x0;
+       config->addr_val[1] = (u32) ~0x0;
+       config->addr_acctype[0] = flags;
+       config->addr_acctype[1] = flags;
+       config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+       config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 }
 
-static ssize_t seq_23_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+
+static int etm_parse_event_config(struct etm_drvdata *drvdata,
+                                 struct perf_event_attr *attr)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_config *config = &drvdata->config;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       if (!attr)
+               return -EINVAL;
 
-       drvdata->seq_23_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_23_event);
+       /* Clear configuration from previous run */
+       memset(config, 0, sizeof(struct etm_config));
 
-static ssize_t seq_31_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       if (attr->exclude_kernel)
+               config->mode = ETM_MODE_EXCL_KERN;
 
-       val = drvdata->seq_31_event;
-       return sprintf(buf, "%#lx\n", val);
-}
+       if (attr->exclude_user)
+               config->mode = ETM_MODE_EXCL_USER;
 
-static ssize_t seq_31_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Always start from the default config */
+       etm_set_default(config);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /*
+        * By default the tracers are configured to trace the whole address
+        * range.  Narrow the field only if requested by user space.
+        */
+       if (config->mode)
+               etm_config_trace_mode(config);
 
-       drvdata->seq_31_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_31_event);
+       /*
+        * At this time only cycle accurate and timestamp options are
+        * available.
+        */
+       if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
+               return -EINVAL;
 
-static ssize_t seq_32_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       config->ctrl = attr->config;
 
-       val = drvdata->seq_32_event;
-       return sprintf(buf, "%#lx\n", val);
+       return 0;
 }
 
-static ssize_t seq_32_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
+static void etm_enable_hw(void *info)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       int i;
+       u32 etmcr;
+       struct etm_drvdata *drvdata = info;
+       struct etm_config *config = &drvdata->config;
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       CS_UNLOCK(drvdata->base);
 
-       drvdata->seq_32_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(seq_32_event);
+       /* Turn engine on */
+       etm_clr_pwrdwn(drvdata);
+       /* Apply power to trace registers */
+       etm_set_pwrup(drvdata);
+       /* Make sure all registers are accessible */
+       etm_os_unlock(drvdata);
 
-static ssize_t seq_13_event_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       etm_set_prog(drvdata);
+
+       etmcr = etm_readl(drvdata, ETMCR);
+       /* Clear setting from a previous run if need be */
+       etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
+       etmcr |= drvdata->port_size;
+       etmcr |= ETMCR_ETM_EN;
+       etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
+       etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
+       etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
+       etm_writel(drvdata, config->enable_event, ETMTEEVR);
+       etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
+       etm_writel(drvdata, config->fifofull_level, ETMFFLR);
+       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+               etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
+               etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
+       }
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
+               etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
+               etm_writel(drvdata, config->cntr_rld_event[i],
+                          ETMCNTRLDEVRn(i));
+               etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
+       }
+       etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
+       etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
+       etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
+       etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
+       etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
+       etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
+       etm_writel(drvdata, config->seq_curr_state, ETMSQR);
+       for (i = 0; i < drvdata->nr_ext_out; i++)
+               etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
+       for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+               etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
+       etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
+       etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
+       /* No external input selected */
+       etm_writel(drvdata, 0x0, ETMEXTINSELR);
+       etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
+       /* No auxiliary control selected */
+       etm_writel(drvdata, 0x0, ETMAUXCR);
+       etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
+       /* No VMID comparator value selected */
+       etm_writel(drvdata, 0x0, ETMVMIDCVR);
+
+       etm_clr_prog(drvdata);
+       CS_LOCK(drvdata->base);
 
-       val = drvdata->seq_13_event;
-       return sprintf(buf, "%#lx\n", val);
+       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t seq_13_event_store(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t size)
+static int etm_cpu_id(struct coresight_device *csdev)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       drvdata->seq_13_event = val & ETM_EVENT_MASK;
-       return size;
+       return drvdata->cpu;
 }
-static DEVICE_ATTR_RW(seq_13_event);
 
-static ssize_t seq_curr_state_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
+int etm_get_trace_id(struct etm_drvdata *drvdata)
 {
-       unsigned long val, flags;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long flags;
+       int trace_id = -1;
 
-       if (!drvdata->enable) {
-               val = drvdata->seq_curr_state;
+       if (!drvdata)
                goto out;
-       }
+
+       if (!local_read(&drvdata->mode))
+               return drvdata->traceid;
 
        pm_runtime_get_sync(drvdata->dev);
+
        spin_lock_irqsave(&drvdata->spinlock, flags);
 
        CS_UNLOCK(drvdata->base);
-       val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+       trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
        CS_LOCK(drvdata->base);
 
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
        pm_runtime_put(drvdata->dev);
-out:
-       return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_curr_state_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       if (val > ETM_SEQ_STATE_MAX_VAL)
-               return -EINVAL;
 
-       drvdata->seq_curr_state = val;
+out:
+       return trace_id;
 
-       return size;
 }
-static DEVICE_ATTR_RW(seq_curr_state);
 
-static ssize_t ctxid_idx_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static int etm_trace_id(struct coresight_device *csdev)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       val = drvdata->ctxid_idx;
-       return sprintf(buf, "%#lx\n", val);
+       return etm_get_trace_id(drvdata);
 }
 
-static ssize_t ctxid_idx_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
+static int etm_enable_perf(struct coresight_device *csdev,
+                          struct perf_event_attr *attr)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       if (val >= drvdata->nr_ctxid_cmp)
+       if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
                return -EINVAL;
 
-       /*
-        * Use spinlock to ensure index doesn't change while it gets
-        * dereferenced multiple times within a spinlock block elsewhere.
-        */
-       spin_lock(&drvdata->spinlock);
-       drvdata->ctxid_idx = val;
-       spin_unlock(&drvdata->spinlock);
+       /* Configure the tracer based on the session's specifics */
+       etm_parse_event_config(drvdata, attr);
+       /* And enable it */
+       etm_enable_hw(drvdata);
 
-       return size;
+       return 0;
 }
-static DEVICE_ATTR_RW(ctxid_idx);
 
-static ssize_t ctxid_pid_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static int etm_enable_sysfs(struct coresight_device *csdev)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
 
        spin_lock(&drvdata->spinlock);
-       val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
-       spin_unlock(&drvdata->spinlock);
-
-       return sprintf(buf, "%#lx\n", val);
-}
 
-static ssize_t ctxid_pid_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       int ret;
-       unsigned long vpid, pid;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /*
+        * Configure the ETM only if the CPU is online.  If it isn't online
+        * hw configuration will take place when 'CPU_STARTING' is received
+        * in @etm_cpu_callback.
+        */
+       if (cpu_online(drvdata->cpu)) {
+               ret = smp_call_function_single(drvdata->cpu,
+                                              etm_enable_hw, drvdata, 1);
+               if (ret)
+                       goto err;
+       }
 
-       ret = kstrtoul(buf, 16, &vpid);
-       if (ret)
-               return ret;
+       drvdata->sticky_enable = true;
+       spin_unlock(&drvdata->spinlock);
 
-       pid = coresight_vpid_to_pid(vpid);
+       dev_info(drvdata->dev, "ETM tracing enabled\n");
+       return 0;
 
-       spin_lock(&drvdata->spinlock);
-       drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
-       drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
+err:
        spin_unlock(&drvdata->spinlock);
-
-       return size;
+       return ret;
 }
-static DEVICE_ATTR_RW(ctxid_pid);
 
-static ssize_t ctxid_mask_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
+static int etm_enable(struct coresight_device *csdev,
+                     struct perf_event_attr *attr, u32 mode)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       int ret;
+       u32 val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       val = drvdata->ctxid_mask;
-       return sprintf(buf, "%#lx\n", val);
-}
+       val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
 
-static ssize_t ctxid_mask_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Someone is already using the tracer */
+       if (val)
+               return -EBUSY;
+
+       switch (mode) {
+       case CS_MODE_SYSFS:
+               ret = etm_enable_sysfs(csdev);
+               break;
+       case CS_MODE_PERF:
+               ret = etm_enable_perf(csdev, attr);
+               break;
+       default:
+               ret = -EINVAL;
+       }
 
-       ret = kstrtoul(buf, 16, &val);
+       /* The tracer didn't start */
        if (ret)
-               return ret;
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
 
-       drvdata->ctxid_mask = val;
-       return size;
+       return ret;
 }
-static DEVICE_ATTR_RW(ctxid_mask);
 
-static ssize_t sync_freq_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static void etm_disable_hw(void *info)
 {
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       val = drvdata->sync_freq;
-       return sprintf(buf, "%#lx\n", val);
-}
+       int i;
+       struct etm_drvdata *drvdata = info;
+       struct etm_config *config = &drvdata->config;
 
-static ssize_t sync_freq_store(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t size)
-{
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       CS_UNLOCK(drvdata->base);
+       etm_set_prog(drvdata);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       /* Read back sequencer and counters for post trace analysis */
+       config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 
-       drvdata->sync_freq = val & ETM_SYNC_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(sync_freq);
+       for (i = 0; i < drvdata->nr_cntr; i++)
+               config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
 
-static ssize_t timestamp_event_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       etm_set_pwrdwn(drvdata);
+       CS_LOCK(drvdata->base);
 
-       val = drvdata->timestamp_event;
-       return sprintf(buf, "%#lx\n", val);
+       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
 }
 
-static ssize_t timestamp_event_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t size)
+static void etm_disable_perf(struct coresight_device *csdev)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
+       if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+               return;
 
-       drvdata->timestamp_event = val & ETM_EVENT_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(timestamp_event);
+       CS_UNLOCK(drvdata->base);
 
-static ssize_t cpu_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       int val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       /* Setting the prog bit disables tracing immediately */
+       etm_set_prog(drvdata);
 
-       val = drvdata->cpu;
-       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+       /*
+        * There is no way to know when the tracer will be used again so
+        * power down the tracer.
+        */
+       etm_set_pwrdwn(drvdata);
 
+       CS_LOCK(drvdata->base);
 }
-static DEVICE_ATTR_RO(cpu);
 
-static ssize_t traceid_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+static void etm_disable_sysfs(struct coresight_device *csdev)
 {
-       unsigned long val, flags;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       if (!drvdata->enable) {
-               val = drvdata->traceid;
-               goto out;
-       }
+       /*
+        * Taking hotplug lock here protects from clocks getting disabled
+        * with tracing being left on (crash scenario) if user disable occurs
+        * after cpu online mask indicates the cpu is offline but before the
+        * DYING hotplug callback is serviced by the ETM driver.
+        */
+       get_online_cpus();
+       spin_lock(&drvdata->spinlock);
 
-       pm_runtime_get_sync(drvdata->dev);
-       spin_lock_irqsave(&drvdata->spinlock, flags);
-       CS_UNLOCK(drvdata->base);
+       /*
+        * Executing etm_disable_hw on the cpu whose ETM is being disabled
+        * ensures that register writes occur when cpu is powered.
+        */
+       smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 
-       val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+       spin_unlock(&drvdata->spinlock);
+       put_online_cpus();
 
-       CS_LOCK(drvdata->base);
-       spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
-out:
-       return sprintf(buf, "%#lx\n", val);
+       dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
-static ssize_t traceid_store(struct device *dev,
-                            struct device_attribute *attr,
-                            const char *buf, size_t size)
+static void etm_disable(struct coresight_device *csdev)
 {
-       int ret;
-       unsigned long val;
-       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
-       ret = kstrtoul(buf, 16, &val);
-       if (ret)
-               return ret;
-
-       drvdata->traceid = val & ETM_TRACEID_MASK;
-       return size;
-}
-static DEVICE_ATTR_RW(traceid);
-
-static struct attribute *coresight_etm_attrs[] = {
-       &dev_attr_nr_addr_cmp.attr,
-       &dev_attr_nr_cntr.attr,
-       &dev_attr_nr_ctxid_cmp.attr,
-       &dev_attr_etmsr.attr,
-       &dev_attr_reset.attr,
-       &dev_attr_mode.attr,
-       &dev_attr_trigger_event.attr,
-       &dev_attr_enable_event.attr,
-       &dev_attr_fifofull_level.attr,
-       &dev_attr_addr_idx.attr,
-       &dev_attr_addr_single.attr,
-       &dev_attr_addr_range.attr,
-       &dev_attr_addr_start.attr,
-       &dev_attr_addr_stop.attr,
-       &dev_attr_addr_acctype.attr,
-       &dev_attr_cntr_idx.attr,
-       &dev_attr_cntr_rld_val.attr,
-       &dev_attr_cntr_event.attr,
-       &dev_attr_cntr_rld_event.attr,
-       &dev_attr_cntr_val.attr,
-       &dev_attr_seq_12_event.attr,
-       &dev_attr_seq_21_event.attr,
-       &dev_attr_seq_23_event.attr,
-       &dev_attr_seq_31_event.attr,
-       &dev_attr_seq_32_event.attr,
-       &dev_attr_seq_13_event.attr,
-       &dev_attr_seq_curr_state.attr,
-       &dev_attr_ctxid_idx.attr,
-       &dev_attr_ctxid_pid.attr,
-       &dev_attr_ctxid_mask.attr,
-       &dev_attr_sync_freq.attr,
-       &dev_attr_timestamp_event.attr,
-       &dev_attr_traceid.attr,
-       &dev_attr_cpu.attr,
-       NULL,
-};
+       u32 mode;
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-#define coresight_simple_func(name, offset)                             \
-static ssize_t name##_show(struct device *_dev,                         \
-                          struct device_attribute *attr, char *buf)    \
-{                                                                       \
-       struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent);    \
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
-                        readl_relaxed(drvdata->base + offset));        \
-}                                                                       \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
-
-static struct attribute *coresight_etm_mgmt_attrs[] = {
-       &dev_attr_etmccr.attr,
-       &dev_attr_etmccer.attr,
-       &dev_attr_etmscr.attr,
-       &dev_attr_etmidr.attr,
-       &dev_attr_etmcr.attr,
-       &dev_attr_etmtraceidr.attr,
-       &dev_attr_etmteevr.attr,
-       &dev_attr_etmtssvr.attr,
-       &dev_attr_etmtecr1.attr,
-       &dev_attr_etmtecr2.attr,
-       NULL,
-};
+       /*
+        * For as long as the tracer isn't disabled another entity can't
+        * change its status.  As such we can read the status here without
+        * fearing it will change under us.
+        */
+       mode = local_read(&drvdata->mode);
 
-static const struct attribute_group coresight_etm_group = {
-       .attrs = coresight_etm_attrs,
-};
+       switch (mode) {
+       case CS_MODE_DISABLED:
+               break;
+       case CS_MODE_SYSFS:
+               etm_disable_sysfs(csdev);
+               break;
+       case CS_MODE_PERF:
+               etm_disable_perf(csdev);
+               break;
+       default:
+               WARN_ON_ONCE(mode);
+               return;
+       }
 
+       if (mode)
+               local_set(&drvdata->mode, CS_MODE_DISABLED);
+}
 
-static const struct attribute_group coresight_etm_mgmt_group = {
-       .attrs = coresight_etm_mgmt_attrs,
-       .name = "mgmt",
+static const struct coresight_ops_source etm_source_ops = {
+       .cpu_id         = etm_cpu_id,
+       .trace_id       = etm_trace_id,
+       .enable         = etm_enable,
+       .disable        = etm_disable,
 };
 
-static const struct attribute_group *coresight_etm_groups[] = {
-       &coresight_etm_group,
-       &coresight_etm_mgmt_group,
-       NULL,
+static const struct coresight_ops etm_cs_ops = {
+       .source_ops     = &etm_source_ops,
 };
 
 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
                        etmdrvdata[cpu]->os_unlock = true;
                }
 
-               if (etmdrvdata[cpu]->enable)
+               if (local_read(&etmdrvdata[cpu]->mode))
                        etm_enable_hw(etmdrvdata[cpu]);
                spin_unlock(&etmdrvdata[cpu]->spinlock);
                break;
@@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
 
        case CPU_DYING:
                spin_lock(&etmdrvdata[cpu]->spinlock);
-               if (etmdrvdata[cpu]->enable)
+               if (local_read(&etmdrvdata[cpu]->mode))
                        etm_disable_hw(etmdrvdata[cpu]);
                spin_unlock(&etmdrvdata[cpu]->spinlock);
                break;
@@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info)
        u32 etmccr;
        struct etm_drvdata *drvdata = info;
 
+       /* Make sure all registers are accessible */
+       etm_os_unlock(drvdata);
+
        CS_UNLOCK(drvdata->base);
 
        /* First dummy read */
@@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info)
        CS_LOCK(drvdata->base);
 }
 
-static void etm_init_default_data(struct etm_drvdata *drvdata)
+static void etm_init_trace_id(struct etm_drvdata *drvdata)
 {
-       /*
-        * A trace ID of value 0 is invalid, so let's start at some
-        * random value that fits in 7 bits and will be just as good.
-        */
-       static int etm3x_traceid = 0x10;
-
-       u32 flags = (1 << 0 | /* instruction execute*/
-                    3 << 3 | /* ARM instruction */
-                    0 << 5 | /* No data value comparison */
-                    0 << 7 | /* No exact mach */
-                    0 << 8 | /* Ignore context ID */
-                    0 << 10); /* Security ignored */
-
-       /*
-        * Initial configuration only - guarantees sources handled by
-        * this driver have a unique ID at startup time but not between
-        * all other types of sources.  For that we lean on the core
-        * framework.
-        */
-       drvdata->traceid = etm3x_traceid++;
-       drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
-       drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
-       if (drvdata->nr_addr_cmp >= 2) {
-               drvdata->addr_val[0] = (u32) _stext;
-               drvdata->addr_val[1] = (u32) _etext;
-               drvdata->addr_acctype[0] = flags;
-               drvdata->addr_acctype[1] = flags;
-               drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
-               drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
-       }
-
-       etm_set_default(drvdata);
+       drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
 }
 
 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
        get_online_cpus();
        etmdrvdata[drvdata->cpu] = drvdata;
 
-       if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
-               drvdata->os_unlock = true;
-
        if (smp_call_function_single(drvdata->cpu,
                                     etm_init_arch_data,  drvdata, 1))
                dev_err(dev, "ETM arch init failed\n");
@@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
                ret = -EINVAL;
                goto err_arch_supported;
        }
-       etm_init_default_data(drvdata);
+
+       etm_init_trace_id(drvdata);
+       etm_set_default(&drvdata->config);
 
        desc->type = CORESIGHT_DEV_TYPE_SOURCE;
        desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
                goto err_arch_supported;
        }
 
+       ret = etm_perf_symlink(drvdata->csdev, true);
+       if (ret) {
+               coresight_unregister(drvdata->csdev);
+               goto err_arch_supported;
+       }
+
        pm_runtime_put(&adev->dev);
        dev_info(dev, "%s initialized\n", (char *)id->data);
 
@@ -1877,17 +853,6 @@ err_arch_supported:
        return ret;
 }
 
-static int etm_remove(struct amba_device *adev)
-{
-       struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
-       if (--etm_count == 0)
-               unregister_hotcpu_notifier(&etm_cpu_notifier);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int etm_runtime_suspend(struct device *dev)
 {
@@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = {
                .name   = "coresight-etm3x",
                .owner  = THIS_MODULE,
                .pm     = &etm_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = etm_probe,
-       .remove         = etm_remove,
        .id_table       = etm_ids,
 };
-
-module_amba_driver(etm_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
+builtin_amba_driver(etm_driver);
index a6707642bb238a68db73aec536ec4e8de6d92e39..1c59bd36834c37f8a5ac2d086c7e2896c741dd02 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -32,6 +31,7 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
+#include <linux/perf_event.h>
 #include <asm/sections.h>
 
 #include "coresight-etm4x.h"
@@ -63,6 +63,13 @@ static bool etm4_arch_supported(u8 arch)
        return true;
 }
 
+static int etm4_cpu_id(struct coresight_device *csdev)
+{
+       struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       return drvdata->cpu;
+}
+
 static int etm4_trace_id(struct coresight_device *csdev)
 {
        struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -72,7 +79,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
        if (!drvdata->enable)
                return drvdata->trcid;
 
-       pm_runtime_get_sync(drvdata->dev);
        spin_lock_irqsave(&drvdata->spinlock, flags);
 
        CS_UNLOCK(drvdata->base);
@@ -81,7 +87,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
        CS_LOCK(drvdata->base);
 
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
-       pm_runtime_put(drvdata->dev);
 
        return trace_id;
 }
@@ -182,12 +187,12 @@ static void etm4_enable_hw(void *info)
        dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 }
 
-static int etm4_enable(struct coresight_device *csdev)
+static int etm4_enable(struct coresight_device *csdev,
+                      struct perf_event_attr *attr, u32 mode)
 {
        struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
        int ret;
 
-       pm_runtime_get_sync(drvdata->dev);
        spin_lock(&drvdata->spinlock);
 
        /*
@@ -207,7 +212,6 @@ static int etm4_enable(struct coresight_device *csdev)
        return 0;
 err:
        spin_unlock(&drvdata->spinlock);
-       pm_runtime_put(drvdata->dev);
        return ret;
 }
 
@@ -256,12 +260,11 @@ static void etm4_disable(struct coresight_device *csdev)
        spin_unlock(&drvdata->spinlock);
        put_online_cpus();
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
 
 static const struct coresight_ops_source etm4_source_ops = {
+       .cpu_id         = etm4_cpu_id,
        .trace_id       = etm4_trace_id,
        .enable         = etm4_enable,
        .disable        = etm4_disable,
@@ -2219,7 +2222,7 @@ static ssize_t name##_show(struct device *_dev,                           \
        return scnprintf(buf, PAGE_SIZE, "0x%x\n",                      \
                         readl_relaxed(drvdata->base + offset));        \
 }                                                                      \
-DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
 
 coresight_simple_func(trcoslsr, TRCOSLSR);
 coresight_simple_func(trcpdcr, TRCPDCR);
@@ -2684,17 +2687,6 @@ err_coresight_register:
        return ret;
 }
 
-static int etm4_remove(struct amba_device *adev)
-{
-       struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
-       if (--etm4_count == 0)
-               unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
-       return 0;
-}
-
 static struct amba_id etm4_ids[] = {
        {       /* ETM 4.0 - Qualcomm */
                .id     = 0x0003b95d,
@@ -2712,10 +2704,9 @@ static struct amba_id etm4_ids[] = {
 static struct amba_driver etm4x_driver = {
        .drv = {
                .name   = "coresight-etm4x",
+               .suppress_bind_attrs = true,
        },
        .probe          = etm4_probe,
-       .remove         = etm4_remove,
        .id_table       = etm4_ids,
 };
-
-module_amba_driver(etm4x_driver);
+builtin_amba_driver(etm4x_driver);
index 2e36bde7fcb41bbfe3972502be5d1d11aa4c49a5..0600ca30649d204c7e96ee1f9ecde5b52fcaf003 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Funnel driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
 {
        struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
        funnel_enable_hw(drvdata, inport);
 
        dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
        struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
        funnel_disable_hw(drvdata, inport);
-       pm_runtime_put(drvdata->dev);
 
        dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
 }
@@ -226,14 +225,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
        return 0;
 }
 
-static int funnel_remove(struct amba_device *adev)
-{
-       struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int funnel_runtime_suspend(struct device *dev)
 {
@@ -273,13 +264,9 @@ static struct amba_driver funnel_driver = {
                .name   = "coresight-funnel",
                .owner  = THIS_MODULE,
                .pm     = &funnel_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = funnel_probe,
-       .remove         = funnel_remove,
        .id_table       = funnel_ids,
 };
-
-module_amba_driver(funnel_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
+builtin_amba_driver(funnel_driver);
index 62fcd98cc7cfc76798316c3a37a547063ed57955..333eddaed33930cb5bda16089c8732b2f4e1fd97 100644 (file)
 #define TIMEOUT_US             100
 #define BMVAL(val, lsb, msb)   ((val & GENMASK(msb, lsb)) >> lsb)
 
+#define ETM_MODE_EXCL_KERN     BIT(30)
+#define ETM_MODE_EXCL_USER     BIT(31)
+
+enum cs_mode {
+       CS_MODE_DISABLED,
+       CS_MODE_SYSFS,
+       CS_MODE_PERF,
+};
+
 static inline void CS_LOCK(void __iomem *addr)
 {
        do {
@@ -52,6 +61,12 @@ static inline void CS_UNLOCK(void __iomem *addr)
        } while (0);
 }
 
+void coresight_disable_path(struct list_head *path);
+int coresight_enable_path(struct list_head *path, u32 mode);
+struct coresight_device *coresight_get_sink(struct list_head *path);
+struct list_head *coresight_build_path(struct coresight_device *csdev);
+void coresight_release_path(struct list_head *path);
+
 #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
 extern int etm_readl_cp14(u32 off, unsigned int *val);
 extern int etm_writel_cp14(u32 off, u32 val);
index 584059e9e8660f228f785cb87b9200e3b315d675..700f710e4bfa6cc4449d04b2842eaf377f3b9368 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/clk.h>
 #include <linux/coresight.h>
 #include <linux/device.h>
-#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
 {
        struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
-
        CS_UNLOCK(drvdata->base);
 
        /*
@@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
 
        CS_LOCK(drvdata->base);
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
        return 0;
 }
 
-static int replicator_remove(struct amba_device *adev)
-{
-       struct replicator_state *drvdata = amba_get_drvdata(adev);
-
-       pm_runtime_disable(&adev->dev);
-       coresight_unregister(drvdata->csdev);
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = {
        .drv = {
                .name   = "coresight-replicator-qcom",
                .pm     = &replicator_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = replicator_probe,
-       .remove         = replicator_remove,
        .id_table       = replicator_ids,
 };
-
-module_amba_driver(replicator_driver);
+builtin_amba_driver(replicator_driver);
index 963ac197c2535caf202960af34490e6abd02d4cb..4299c056934048653ac2793eb396c6db18a6fb13 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Replicator driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
 {
        struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(drvdata->dev);
        dev_info(drvdata->dev, "REPLICATOR enabled\n");
        return 0;
 }
@@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
 {
        struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_put(drvdata->dev);
        dev_info(drvdata->dev, "REPLICATOR disabled\n");
 }
 
@@ -127,20 +126,6 @@ out_disable_pm:
        return ret;
 }
 
-static int replicator_remove(struct platform_device *pdev)
-{
-       struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
-       coresight_unregister(drvdata->csdev);
-       pm_runtime_get_sync(&pdev->dev);
-       if (!IS_ERR(drvdata->atclk))
-               clk_disable_unprepare(drvdata->atclk);
-       pm_runtime_put_noidle(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int replicator_runtime_suspend(struct device *dev)
 {
@@ -175,15 +160,11 @@ static const struct of_device_id replicator_match[] = {
 
 static struct platform_driver replicator_driver = {
        .probe          = replicator_probe,
-       .remove         = replicator_remove,
        .driver         = {
                .name   = "coresight-replicator",
                .of_match_table = replicator_match,
                .pm     = &replicator_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
 };
-
 builtin_platform_driver(replicator_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Replicator driver");
index a57c7ec1661f915f9d7bc680cefc3558129d299a..1be191f5d39ca5007a2fe61d33527da35495ac94 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Memory Controller driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
@@ -124,7 +125,7 @@ struct tmc_drvdata {
        bool                    reading;
        char                    *buf;
        dma_addr_t              paddr;
-       void __iomem            *vaddr;
+       void                    *vaddr;
        u32                     size;
        bool                    enable;
        enum tmc_config_type    config_type;
@@ -242,12 +243,9 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
 {
        unsigned long flags;
 
-       pm_runtime_get_sync(drvdata->dev);
-
        spin_lock_irqsave(&drvdata->spinlock, flags);
        if (drvdata->reading) {
                spin_unlock_irqrestore(&drvdata->spinlock, flags);
-               pm_runtime_put(drvdata->dev);
                return -EBUSY;
        }
 
@@ -268,7 +266,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
        return 0;
 }
 
-static int tmc_enable_sink(struct coresight_device *csdev)
+static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
 {
        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
@@ -381,8 +379,6 @@ out:
        drvdata->enable = false;
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-       pm_runtime_put(drvdata->dev);
-
        dev_info(drvdata->dev, "TMC disabled\n");
 }
 
@@ -766,23 +762,10 @@ err_misc_register:
 err_devm_kzalloc:
        if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
                dma_free_coherent(dev, drvdata->size,
-                               &drvdata->paddr, GFP_KERNEL);
+                               drvdata->vaddr, drvdata->paddr);
        return ret;
 }
 
-static int tmc_remove(struct amba_device *adev)
-{
-       struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
-       misc_deregister(&drvdata->miscdev);
-       coresight_unregister(drvdata->csdev);
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
-               dma_free_coherent(drvdata->dev, drvdata->size,
-                                 &drvdata->paddr, GFP_KERNEL);
-
-       return 0;
-}
-
 static struct amba_id tmc_ids[] = {
        {
                .id     = 0x0003b961,
@@ -795,13 +778,9 @@ static struct amba_driver tmc_driver = {
        .drv = {
                .name   = "coresight-tmc",
                .owner  = THIS_MODULE,
+               .suppress_bind_attrs = true,
        },
        .probe          = tmc_probe,
-       .remove         = tmc_remove,
        .id_table       = tmc_ids,
 };
-
-module_amba_driver(tmc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
+builtin_amba_driver(tmc_driver);
index 7214efd10db52f9c2273ea5e0f86193034c8bb18..8fb09d9237abcb47af9642210dbd28e0778ee11d 100644 (file)
@@ -1,4 +1,6 @@
 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Port Interface Unit driver
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +13,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
        CS_LOCK(drvdata->base);
 }
 
-static int tpiu_enable(struct coresight_device *csdev)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode)
 {
        struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-       pm_runtime_get_sync(csdev->dev.parent);
        tpiu_enable_hw(drvdata);
 
        dev_info(drvdata->dev, "TPIU enabled\n");
@@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev)
        struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
        tpiu_disable_hw(drvdata);
-       pm_runtime_put(csdev->dev.parent);
 
        dev_info(drvdata->dev, "TPIU disabled\n");
 }
@@ -172,14 +171,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
        return 0;
 }
 
-static int tpiu_remove(struct amba_device *adev)
-{
-       struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
-       coresight_unregister(drvdata->csdev);
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static int tpiu_runtime_suspend(struct device *dev)
 {
@@ -223,13 +214,9 @@ static struct amba_driver tpiu_driver = {
                .name   = "coresight-tpiu",
                .owner  = THIS_MODULE,
                .pm     = &tpiu_dev_pm_ops,
+               .suppress_bind_attrs = true,
        },
        .probe          = tpiu_probe,
-       .remove         = tpiu_remove,
        .id_table       = tpiu_ids,
 };
-
-module_amba_driver(tpiu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
+builtin_amba_driver(tpiu_driver);
index 93738dfbf6313ea09f9f970ce463ec8374b4f661..2ea5961092c1888b08168eeb1eb2fe3bfcb2fcab 100644 (file)
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/device.h>
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
+#include <linux/pm_runtime.h>
 
 #include "coresight-priv.h"
 
 static DEFINE_MUTEX(coresight_mutex);
 
+/**
+ * struct coresight_node - elements of a path, from source to sink
+ * @csdev:     Address of an element.
+ * @link:      hook to the list.
+ */
+struct coresight_node {
+       struct coresight_device *csdev;
+       struct list_head link;
+};
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, sysfs_path);
+
 static int coresight_id_match(struct device *dev, void *data)
 {
        int trace_id, i_trace_id;
@@ -68,15 +84,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev)
                                 csdev, coresight_id_match);
 }
 
-static int coresight_find_link_inport(struct coresight_device *csdev)
+static int coresight_find_link_inport(struct coresight_device *csdev,
+                                     struct coresight_device *parent)
 {
        int i;
-       struct coresight_device *parent;
        struct coresight_connection *conn;
 
-       parent = container_of(csdev->path_link.next,
-                             struct coresight_device, path_link);
-
        for (i = 0; i < parent->nr_outport; i++) {
                conn = &parent->conns[i];
                if (conn->child_dev == csdev)
@@ -89,15 +102,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev)
        return 0;
 }
 
-static int coresight_find_link_outport(struct coresight_device *csdev)
+static int coresight_find_link_outport(struct coresight_device *csdev,
+                                      struct coresight_device *child)
 {
        int i;
-       struct coresight_device *child;
        struct coresight_connection *conn;
 
-       child = container_of(csdev->path_link.prev,
-                            struct coresight_device, path_link);
-
        for (i = 0; i < csdev->nr_outport; i++) {
                conn = &csdev->conns[i];
                if (conn->child_dev == child)
@@ -110,13 +120,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev)
        return 0;
 }
 
-static int coresight_enable_sink(struct coresight_device *csdev)
+static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
 {
        int ret;
 
        if (!csdev->enable) {
                if (sink_ops(csdev)->enable) {
-                       ret = sink_ops(csdev)->enable(csdev);
+                       ret = sink_ops(csdev)->enable(csdev, mode);
                        if (ret)
                                return ret;
                }
@@ -138,14 +148,19 @@ static void coresight_disable_sink(struct coresight_device *csdev)
        }
 }
 
-static int coresight_enable_link(struct coresight_device *csdev)
+static int coresight_enable_link(struct coresight_device *csdev,
+                                struct coresight_device *parent,
+                                struct coresight_device *child)
 {
        int ret;
        int link_subtype;
        int refport, inport, outport;
 
-       inport = coresight_find_link_inport(csdev);
-       outport = coresight_find_link_outport(csdev);
+       if (!parent || !child)
+               return -EINVAL;
+
+       inport = coresight_find_link_inport(csdev, parent);
+       outport = coresight_find_link_outport(csdev, child);
        link_subtype = csdev->subtype.link_subtype;
 
        if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
@@ -168,14 +183,19 @@ static int coresight_enable_link(struct coresight_device *csdev)
        return 0;
 }
 
-static void coresight_disable_link(struct coresight_device *csdev)
+static void coresight_disable_link(struct coresight_device *csdev,
+                                  struct coresight_device *parent,
+                                  struct coresight_device *child)
 {
        int i, nr_conns;
        int link_subtype;
        int refport, inport, outport;
 
-       inport = coresight_find_link_inport(csdev);
-       outport = coresight_find_link_outport(csdev);
+       if (!parent || !child)
+               return;
+
+       inport = coresight_find_link_inport(csdev, parent);
+       outport = coresight_find_link_outport(csdev, child);
        link_subtype = csdev->subtype.link_subtype;
 
        if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
@@ -201,7 +221,7 @@ static void coresight_disable_link(struct coresight_device *csdev)
        csdev->enable = false;
 }
 
-static int coresight_enable_source(struct coresight_device *csdev)
+static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 {
        int ret;
 
@@ -213,7 +233,7 @@ static int coresight_enable_source(struct coresight_device *csdev)
 
        if (!csdev->enable) {
                if (source_ops(csdev)->enable) {
-                       ret = source_ops(csdev)->enable(csdev);
+                       ret = source_ops(csdev)->enable(csdev, NULL, mode);
                        if (ret)
                                return ret;
                }
@@ -235,109 +255,188 @@ static void coresight_disable_source(struct coresight_device *csdev)
        }
 }
 
-static int coresight_enable_path(struct list_head *path)
+void coresight_disable_path(struct list_head *path)
 {
-       int ret = 0;
-       struct coresight_device *cd;
-
-       /*
-        * At this point we have a full @path, from source to sink.  The
-        * sink is the first entry and the source the last one.  Go through
-        * all the components and enable them one by one.
-        */
-       list_for_each_entry(cd, path, path_link) {
-               if (cd == list_first_entry(path, struct coresight_device,
-                                          path_link)) {
-                       ret = coresight_enable_sink(cd);
-               } else if (list_is_last(&cd->path_link, path)) {
-                       /*
-                        * Don't enable the source just yet - this needs to
-                        * happen at the very end when all links and sink
-                        * along the path have been configured properly.
-                        */
-                       ;
-               } else {
-                       ret = coresight_enable_link(cd);
+       struct coresight_node *nd;
+       struct coresight_device *csdev, *parent, *child;
+
+       list_for_each_entry(nd, path, link) {
+               csdev = nd->csdev;
+
+               switch (csdev->type) {
+               case CORESIGHT_DEV_TYPE_SINK:
+               case CORESIGHT_DEV_TYPE_LINKSINK:
+                       coresight_disable_sink(csdev);
+                       break;
+               case CORESIGHT_DEV_TYPE_SOURCE:
+                       /* sources are disabled from either sysFS or Perf */
+                       break;
+               case CORESIGHT_DEV_TYPE_LINK:
+                       parent = list_prev_entry(nd, link)->csdev;
+                       child = list_next_entry(nd, link)->csdev;
+                       coresight_disable_link(csdev, parent, child);
+                       break;
+               default:
+                       break;
                }
-               if (ret)
-                       goto err;
        }
+}
 
-       return 0;
-err:
-       list_for_each_entry_continue_reverse(cd, path, path_link) {
-               if (cd == list_first_entry(path, struct coresight_device,
-                                          path_link)) {
-                       coresight_disable_sink(cd);
-               } else if (list_is_last(&cd->path_link, path)) {
-                       ;
-               } else {
-                       coresight_disable_link(cd);
+int coresight_enable_path(struct list_head *path, u32 mode)
+{
+
+       int ret = 0;
+       struct coresight_node *nd;
+       struct coresight_device *csdev, *parent, *child;
+
+       list_for_each_entry_reverse(nd, path, link) {
+               csdev = nd->csdev;
+
+               switch (csdev->type) {
+               case CORESIGHT_DEV_TYPE_SINK:
+               case CORESIGHT_DEV_TYPE_LINKSINK:
+                       ret = coresight_enable_sink(csdev, mode);
+                       if (ret)
+                               goto err;
+                       break;
+               case CORESIGHT_DEV_TYPE_SOURCE:
+                       /* sources are enabled from either sysFS or Perf */
+                       break;
+               case CORESIGHT_DEV_TYPE_LINK:
+                       parent = list_prev_entry(nd, link)->csdev;
+                       child = list_next_entry(nd, link)->csdev;
+                       ret = coresight_enable_link(csdev, parent, child);
+                       if (ret)
+                               goto err;
+                       break;
+               default:
+                       goto err;
                }
        }
 
+out:
        return ret;
+err:
+       coresight_disable_path(path);
+       goto out;
 }
 
-static int coresight_disable_path(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct list_head *path)
 {
-       struct coresight_device *cd;
+       struct coresight_device *csdev;
 
-       list_for_each_entry_reverse(cd, path, path_link) {
-               if (cd == list_first_entry(path, struct coresight_device,
-                                          path_link)) {
-                       coresight_disable_sink(cd);
-               } else if (list_is_last(&cd->path_link, path)) {
-                       /*
-                        * The source has already been stopped, no need
-                        * to do it again here.
-                        */
-                       ;
-               } else {
-                       coresight_disable_link(cd);
+       if (!path)
+               return NULL;
+
+       csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+       if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+           csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+               return NULL;
+
+       return csdev;
+}
+
+/**
+ * _coresight_build_path - recursively build a path from a @csdev to a sink.
+ * @csdev:     The device to start from.
+ * @path:      The list to add devices to.
+ *
+ * The tree of Coresight device is traversed until an activated sink is
+ * found.  From there the sink is added to the list along with all the
+ * devices that led to that point - the end result is a list from source
+ * to sink. In that list the source is the first device and the sink the
+ * last one.
+ */
+static int _coresight_build_path(struct coresight_device *csdev,
+                                struct list_head *path)
+{
+       int i;
+       bool found = false;
+       struct coresight_node *node;
+       struct coresight_connection *conn;
+
+       /* An activated sink has been found.  Enqueue the element */
+       if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+            csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+               goto out;
+
+       /* Not a sink - recursively explore each port found on this element */
+       for (i = 0; i < csdev->nr_outport; i++) {
+               conn = &csdev->conns[i];
+               if (_coresight_build_path(conn->child_dev, path) == 0) {
+                       found = true;
+                       break;
                }
        }
 
+       if (!found)
+               return -ENODEV;
+
+out:
+       /*
+        * A path from this element to a sink has been found.  The elements
+        * leading to the sink are already enqueued, all that is left to do
+        * is tell the PM runtime core we need this element and add a node
+        * for it.
+        */
+       node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       node->csdev = csdev;
+       list_add(&node->link, path);
+       pm_runtime_get_sync(csdev->dev.parent);
+
        return 0;
 }
 
-static int coresight_build_paths(struct coresight_device *csdev,
-                                struct list_head *path,
-                                bool enable)
+struct list_head *coresight_build_path(struct coresight_device *csdev)
 {
-       int i, ret = -EINVAL;
-       struct coresight_connection *conn;
+       struct list_head *path;
 
-       list_add(&csdev->path_link, path);
+       path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+       if (!path)
+               return NULL;
 
-       if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
-           csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
-           csdev->activated) {
-               if (enable)
-                       ret = coresight_enable_path(path);
-               else
-                       ret = coresight_disable_path(path);
-       } else {
-               for (i = 0; i < csdev->nr_outport; i++) {
-                       conn = &csdev->conns[i];
-                       if (coresight_build_paths(conn->child_dev,
-                                                   path, enable) == 0)
-                               ret = 0;
-               }
+       INIT_LIST_HEAD(path);
+
+       if (_coresight_build_path(csdev, path)) {
+               kfree(path);
+               path = NULL;
        }
 
-       if (list_first_entry(path, struct coresight_device, path_link) != csdev)
-               dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+       return path;
+}
 
-       list_del(&csdev->path_link);
+/**
+ * coresight_release_path - release a previously built path.
+ * @path:      the path to release.
+ *
+ * Go through all the elements of a path and 1) removed it from the list and
+ * 2) free the memory allocated for each node.
+ */
+void coresight_release_path(struct list_head *path)
+{
+       struct coresight_device *csdev;
+       struct coresight_node *nd, *next;
 
-       return ret;
+       list_for_each_entry_safe(nd, next, path, link) {
+               csdev = nd->csdev;
+
+               pm_runtime_put_sync(csdev->dev.parent);
+               list_del(&nd->link);
+               kfree(nd);
+       }
+
+       kfree(path);
+       path = NULL;
 }
 
 int coresight_enable(struct coresight_device *csdev)
 {
        int ret = 0;
-       LIST_HEAD(path);
+       int cpu;
+       struct list_head *path;
 
        mutex_lock(&coresight_mutex);
        if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -348,22 +447,47 @@ int coresight_enable(struct coresight_device *csdev)
        if (csdev->enable)
                goto out;
 
-       if (coresight_build_paths(csdev, &path, true)) {
-               dev_err(&csdev->dev, "building path(s) failed\n");
+       path = coresight_build_path(csdev);
+       if (!path) {
+               pr_err("building path(s) failed\n");
                goto out;
        }
 
-       if (coresight_enable_source(csdev))
-               dev_err(&csdev->dev, "source enable failed\n");
+       ret = coresight_enable_path(path, CS_MODE_SYSFS);
+       if (ret)
+               goto err_path;
+
+       ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+       if (ret)
+               goto err_source;
+
+       /*
+        * When working from sysFS it is important to keep track
+        * of the paths that were created so that they can be
+        * undone in 'coresight_disable()'.  Since there can only
+        * be a single session per tracer (when working from sysFS)
+        * a per-cpu variable will do just fine.
+        */
+       cpu = source_ops(csdev)->cpu_id(csdev);
+       per_cpu(sysfs_path, cpu) = path;
+
 out:
        mutex_unlock(&coresight_mutex);
        return ret;
+
+err_source:
+       coresight_disable_path(path);
+
+err_path:
+       coresight_release_path(path);
+       goto out;
 }
 EXPORT_SYMBOL_GPL(coresight_enable);
 
 void coresight_disable(struct coresight_device *csdev)
 {
-       LIST_HEAD(path);
+       int cpu;
+       struct list_head *path;
 
        mutex_lock(&coresight_mutex);
        if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -373,9 +497,12 @@ void coresight_disable(struct coresight_device *csdev)
        if (!csdev->enable)
                goto out;
 
+       cpu = source_ops(csdev)->cpu_id(csdev);
+       path = per_cpu(sysfs_path, cpu);
        coresight_disable_source(csdev);
-       if (coresight_build_paths(csdev, &path, false))
-               dev_err(&csdev->dev, "releasing path(s) failed\n");
+       coresight_disable_path(path);
+       coresight_release_path(path);
+       per_cpu(sysfs_path, cpu) = NULL;
 
 out:
        mutex_unlock(&coresight_mutex);
@@ -481,6 +608,8 @@ static void coresight_device_release(struct device *dev)
 {
        struct coresight_device *csdev = to_coresight_device(dev);
 
+       kfree(csdev->conns);
+       kfree(csdev->refcnt);
        kfree(csdev);
 }
 
@@ -536,7 +665,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
         * are hooked-up with each newly added component.
         */
        bus_for_each_dev(&coresight_bustype, NULL,
-                                csdev, coresight_orphan_match);
+                        csdev, coresight_orphan_match);
 }
 
 
@@ -568,6 +697,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
 
                if (dev) {
                        conn->child_dev = to_coresight_device(dev);
+                       /* and put reference from 'bus_find_device()' */
+                       put_device(dev);
                } else {
                        csdev->orphan = true;
                        conn->child_dev = NULL;
@@ -575,6 +706,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
        }
 }
 
+static int coresight_remove_match(struct device *dev, void *data)
+{
+       int i;
+       struct coresight_device *csdev, *iterator;
+       struct coresight_connection *conn;
+
+       csdev = data;
+       iterator = to_coresight_device(dev);
+
+       /* No need to check oneself */
+       if (csdev == iterator)
+               return 0;
+
+       /*
+        * Circle throuch all the connection of that component.  If we find
+        * a connection whose name matches @csdev, remove it.
+        */
+       for (i = 0; i < iterator->nr_outport; i++) {
+               conn = &iterator->conns[i];
+
+               if (conn->child_dev == NULL)
+                       continue;
+
+               if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+                       iterator->orphan = true;
+                       conn->child_dev = NULL;
+                       /* No need to continue */
+                       break;
+               }
+       }
+
+       /*
+        * Returning '0' ensures that all known component on the
+        * bus will be checked.
+        */
+       return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+       bus_for_each_dev(&coresight_bustype, NULL,
+                        csdev, coresight_remove_match);
+}
+
 /**
  * coresight_timeout - loop until a bit has changed to a specific state.
  * @addr: base address of the area of interest.
@@ -713,13 +888,8 @@ EXPORT_SYMBOL_GPL(coresight_register);
 
 void coresight_unregister(struct coresight_device *csdev)
 {
-       mutex_lock(&coresight_mutex);
-
-       kfree(csdev->conns);
+       /* Remove references of that device in the topology */
+       coresight_remove_conns(csdev);
        device_unregister(&csdev->dev);
-
-       mutex_unlock(&coresight_mutex);
 }
 EXPORT_SYMBOL_GPL(coresight_unregister);
-
-MODULE_LICENSE("GPL v2");
index b0973617826f62b41b31072a7ca3a7499319d4ad..b68da1888fd515879a43df8f6173dd77cdb8754e 100644 (file)
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/err.h>
 #include <linux/slab.h>
@@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev,
                return -ENOMEM;
 
        /* Children connected to this component via @outports */
-        pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+       pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
                                          sizeof(*pdata->child_names),
                                          GFP_KERNEL);
        if (!pdata->child_names)
index b7a9073d968b57bd8f2b7f5c3d0fd92db23a1301..1b412f8a56b5ef85546b6e9700388f652c3c0c53 100644 (file)
@@ -1,5 +1,6 @@
 config INTEL_TH
        tristate "Intel(R) Trace Hub controller"
+       depends on HAS_DMA && HAS_IOMEM
        help
          Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that
          produce, switch and output trace data from multiple hardware and
index 165d3001c3015466e605b092358820bbe4327e64..4272f2ce5f6eef8e2c0b4309137f9c828bb9db14 100644 (file)
@@ -124,17 +124,34 @@ static struct device_type intel_th_source_device_type = {
        .release        = intel_th_device_release,
 };
 
+static struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+       /*
+        * subdevice tree is flat: if this one is not a switch, its
+        * parent must be
+        */
+       if (thdev->type != INTEL_TH_SWITCH)
+               thdev = to_intel_th_hub(thdev);
+
+       if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
+               return NULL;
+
+       return dev_get_drvdata(thdev->dev.parent);
+}
+
 static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
                                     kuid_t *uid, kgid_t *gid)
 {
        struct intel_th_device *thdev = to_intel_th_device(dev);
+       struct intel_th *th = to_intel_th(thdev);
        char *node;
 
        if (thdev->id >= 0)
-               node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name,
-                                thdev->id);
+               node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id,
+                                thdev->name, thdev->id);
        else
-               node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name);
+               node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id,
+                                thdev->name);
 
        return node;
 }
@@ -319,6 +336,7 @@ static struct intel_th_subdevice {
        unsigned                nres;
        unsigned                type;
        unsigned                otype;
+       unsigned                scrpd;
        int                     id;
 } intel_th_subdevices[TH_SUBDEVICE_MAX] = {
        {
@@ -352,6 +370,7 @@ static struct intel_th_subdevice {
                .id     = 0,
                .type   = INTEL_TH_OUTPUT,
                .otype  = GTH_MSU,
+               .scrpd  = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
        },
        {
                .nres   = 2,
@@ -371,6 +390,7 @@ static struct intel_th_subdevice {
                .id     = 1,
                .type   = INTEL_TH_OUTPUT,
                .otype  = GTH_MSU,
+               .scrpd  = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
        },
        {
                .nres   = 2,
@@ -403,6 +423,7 @@ static struct intel_th_subdevice {
                .name   = "pti",
                .type   = INTEL_TH_OUTPUT,
                .otype  = GTH_PTI,
+               .scrpd  = SCRPD_PTI_IS_PRIM_DEST,
        },
        {
                .nres   = 1,
@@ -477,6 +498,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
                        thdev->dev.devt = MKDEV(th->major, i);
                        thdev->output.type = subdev->otype;
                        thdev->output.port = -1;
+                       thdev->output.scratchpad = subdev->scrpd;
                }
 
                err = device_add(&thdev->dev);
@@ -579,6 +601,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
        }
        th->dev = dev;
 
+       dev_set_drvdata(dev, th);
+
        err = intel_th_populate(th, devres, ndevres, irq);
        if (err)
                goto err_chrdev;
index 2dc5378ccd3aa3bb238416396d9fa1d7cbf910a4..9beea0b54231f720bea4570ad56a6a54665b55f6 100644 (file)
@@ -146,24 +146,6 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port)
        iowrite32(val, gth->base + reg);
 }
 
-/*static int gth_master_get(struct gth_device *gth, unsigned int master)
-{
-       unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
-       unsigned int shift = (master & 0x7) * 4;
-       u32 val;
-
-       if (master >= 256) {
-               reg = REG_GTH_GSWTDEST;
-               shift = 0;
-       }
-
-       val = ioread32(gth->base + reg);
-       val &= (0xf << shift);
-       val >>= shift;
-
-       return val ? val & 0x7 : -1;
-       }*/
-
 static ssize_t master_attr_show(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
@@ -304,6 +286,10 @@ static int intel_th_gth_reset(struct gth_device *gth)
        if (scratchpad & SCRPD_DEBUGGER_IN_USE)
                return -EBUSY;
 
+       /* Always save/restore STH and TU registers in S0ix entry/exit */
+       scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+       iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+
        /* output ports */
        for (port = 0; port < 8; port++) {
                if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
@@ -506,6 +492,10 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
        if (!count)
                dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
                        output->port);
+
+       reg = ioread32(gth->base + REG_GTH_SCRPD0);
+       reg &= ~output->scratchpad;
+       iowrite32(reg, gth->base + REG_GTH_SCRPD0);
 }
 
 /**
@@ -520,7 +510,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
                                struct intel_th_output *output)
 {
        struct gth_device *gth = dev_get_drvdata(&thdev->dev);
-       u32 scr = 0xfc0000;
+       u32 scr = 0xfc0000, scrpd;
        int master;
 
        spin_lock(&gth->gth_lock);
@@ -535,6 +525,10 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
        output->active = true;
        spin_unlock(&gth->gth_lock);
 
+       scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
+       scrpd |= output->scratchpad;
+       iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
+
        iowrite32(scr, gth->base + REG_GTH_SCR);
        iowrite32(0, gth->base + REG_GTH_SCR2);
 }
index 3b714b7a61dbc1d614c7ad66917d6fd8af5e3319..56f0d2620577ca48f6b8b66e5cbf96a28e8b9318 100644 (file)
@@ -57,9 +57,6 @@ enum {
        REG_GTH_SCRPD3          = 0xec, /* ScratchPad[3] */
 };
 
-/* Externall debugger is using Intel TH */
-#define SCRPD_DEBUGGER_IN_USE  BIT(24)
-
 /* waiting for Pipeline Empty bit(s) to assert for GTH */
 #define GTH_PLE_WAITLOOP_DEPTH 10000
 
index 57fd72b20fae3135aa599475ce91e113abc4147d..eedd09332db6caf2eec08cfe69f2e1ac1160319a 100644 (file)
@@ -30,6 +30,7 @@ enum {
  * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
  * @port:      output port number, assigned by the switch
  * @type:      GTH_{MSU,CTP,PTI}
+ * @scratchpad:        scratchpad bits to flag when this output is enabled
  * @multiblock:        true for multiblock output configuration
  * @active:    true when this output is enabled
  *
@@ -41,6 +42,7 @@ enum {
 struct intel_th_output {
        int             port;
        unsigned int    type;
+       unsigned int    scratchpad;
        bool            multiblock;
        bool            active;
 };
@@ -241,4 +243,43 @@ enum {
        GTH_PTI = 4,    /* MIPI-PTI */
 };
 
+/*
+ * Scratchpad bits: tell firmware and external debuggers
+ * what we are up to.
+ */
+enum {
+       /* Memory is the primary destination */
+       SCRPD_MEM_IS_PRIM_DEST          = BIT(0),
+       /* XHCI DbC is the primary destination */
+       SCRPD_DBC_IS_PRIM_DEST          = BIT(1),
+       /* PTI is the primary destination */
+       SCRPD_PTI_IS_PRIM_DEST          = BIT(2),
+       /* BSSB is the primary destination */
+       SCRPD_BSSB_IS_PRIM_DEST         = BIT(3),
+       /* PTI is the alternate destination */
+       SCRPD_PTI_IS_ALT_DEST           = BIT(4),
+       /* BSSB is the alternate destination */
+       SCRPD_BSSB_IS_ALT_DEST          = BIT(5),
+       /* DeepSx exit occurred */
+       SCRPD_DEEPSX_EXIT               = BIT(6),
+       /* S4 exit occurred */
+       SCRPD_S4_EXIT                   = BIT(7),
+       /* S5 exit occurred */
+       SCRPD_S5_EXIT                   = BIT(8),
+       /* MSU controller 0/1 is enabled */
+       SCRPD_MSC0_IS_ENABLED           = BIT(9),
+       SCRPD_MSC1_IS_ENABLED           = BIT(10),
+       /* Sx exit occurred */
+       SCRPD_SX_EXIT                   = BIT(11),
+       /* Trigger Unit is enabled */
+       SCRPD_TRIGGER_IS_ENABLED        = BIT(12),
+       SCRPD_ODLA_IS_ENABLED           = BIT(13),
+       SCRPD_SOCHAP_IS_ENABLED         = BIT(14),
+       SCRPD_STH_IS_ENABLED            = BIT(15),
+       SCRPD_DCIH_IS_ENABLED           = BIT(16),
+       SCRPD_VER_IS_ENABLED            = BIT(17),
+       /* External debugger is using Intel TH */
+       SCRPD_DEBUGGER_IN_USE           = BIT(24),
+};
+
 #endif
index 70ca27e4560214f2f58dc93cf2fad57fbe2e8990..d9d6022c5aca42bbf959d214abb76b5ee3fe8704 100644 (file)
@@ -408,7 +408,7 @@ msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
                 * Second time (wrap_count==1), it's just like any other block,
                 * containing data in the range of [MSC_BDESC..data_bytes].
                 */
-               if (iter->block == iter->start_block && iter->wrap_count) {
+               if (iter->block == iter->start_block && iter->wrap_count == 2) {
                        tocopy = DATA_IN_PAGE - data_bytes;
                        src += data_bytes;
                }
@@ -1112,12 +1112,11 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
                size = msc->nr_pages << PAGE_SHIFT;
 
        if (!size)
-               return 0;
+               goto put_count;
 
-       if (off >= size) {
-               len = 0;
+       if (off >= size)
                goto put_count;
-       }
+
        if (off + len >= size)
                len = size - off;
 
index 641e87936064b72c4a2fca2f93eadd83c9d6cdd6..bca7a2ac00d63ab50ba04e12299d9178608f4218 100644 (file)
@@ -46,8 +46,6 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
        if (IS_ERR(th))
                return PTR_ERR(th);
 
-       pci_set_drvdata(pdev, th);
-
        return 0;
 }
 
@@ -67,6 +65,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
                .driver_data = (kernel_ulong_t)0,
        },
+       {
+               /* Apollo Lake */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e),
+               .driver_data = (kernel_ulong_t)0,
+       },
+       {
+               /* Broxton */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
+               .driver_data = (kernel_ulong_t)0,
+       },
        { 0 },
 };
 
index 56101c33e10f8d8bc668ef8a8d3a89f6fcd24343..e1aee61dd7b394425c825b649971825bbec33381 100644 (file)
@@ -94,10 +94,13 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
        case STP_PACKET_TRIG:
                if (flags & STP_PACKET_TIMESTAMPED)
                        reg += 4;
-               iowrite8(*payload, sth->base + reg);
+               writeb_relaxed(*payload, sth->base + reg);
                break;
 
        case STP_PACKET_MERR:
+               if (size > 4)
+                       size = 4;
+
                sth_iowrite(&out->MERR, payload, size);
                break;
 
@@ -107,8 +110,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
                else
                        outp = (u64 __iomem *)&out->FLAG;
 
-               size = 1;
-               sth_iowrite(outp, payload, size);
+               size = 0;
+               writeb_relaxed(0, outp);
                break;
 
        case STP_PACKET_USER:
@@ -129,6 +132,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
 
                sth_iowrite(outp, payload, size);
                break;
+       default:
+               return -ENOTSUPP;
        }
 
        return size;
index 83e9f591a54b356150a7e2090e9a14473b6f96a0..847a39b353078618fa9009691598b4ed1d5ef583 100644 (file)
@@ -1,6 +1,7 @@
 config STM
        tristate "System Trace Module devices"
        select CONFIGFS_FS
+       select SRCU
        help
          A System Trace Module (STM) is a device exporting data in System
          Trace Protocol (STP) format as defined by MIPI STP standards.
@@ -8,6 +9,8 @@ config STM
 
          Say Y here to enable System Trace Module device support.
 
+if STM
+
 config STM_DUMMY
        tristate "Dummy STM driver"
        help
@@ -24,3 +27,16 @@ config STM_SOURCE_CONSOLE
 
          If you want to send kernel console messages over STM devices,
          say Y.
+
+config STM_SOURCE_HEARTBEAT
+       tristate "Heartbeat over STM devices"
+       help
+         This is a kernel space trace source that sends periodic
+         heartbeat messages to trace hosts over STM devices. It is
+         also useful for testing stm class drivers and the stm class
+         framework itself.
+
+         If you want to send heartbeat messages over STM devices,
+         say Y.
+
+endif
index f9312c38dd7a8bcbfd1fce502c600229f4ff2930..a9ce3d487e5787d18eafddd06fa1be1b611ce457 100644 (file)
@@ -5,5 +5,7 @@ stm_core-y              := core.o policy.o
 obj-$(CONFIG_STM_DUMMY)        += dummy_stm.o
 
 obj-$(CONFIG_STM_SOURCE_CONSOLE)       += stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT)     += stm_heartbeat.o
 
 stm_console-y          := console.o
+stm_heartbeat-y                := heartbeat.o
index b6445d9e54533d224a89fbe98ad7fcfad52ac19d..de80d45d8df9667085806d51dc0aeda82f01f2a0 100644 (file)
@@ -113,6 +113,7 @@ struct stm_device *stm_find_device(const char *buf)
 
        stm = to_stm_device(dev);
        if (!try_module_get(stm->owner)) {
+               /* matches class_find_device() above */
                put_device(dev);
                return NULL;
        }
@@ -125,7 +126,7 @@ struct stm_device *stm_find_device(const char *buf)
  * @stm:       stm device, previously acquired by stm_find_device()
  *
  * This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
  */
 void stm_put_device(struct stm_device *stm)
 {
@@ -185,6 +186,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
 {
        struct stp_master *master = stm_master(stm, output->master);
 
+       lockdep_assert_held(&stm->mc_lock);
+       lockdep_assert_held(&output->lock);
+
        if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
                return;
 
@@ -199,6 +203,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
 {
        struct stp_master *master = stm_master(stm, output->master);
 
+       lockdep_assert_held(&stm->mc_lock);
+       lockdep_assert_held(&output->lock);
+
        bitmap_release_region(&master->chan_map[0], output->channel,
                              ilog2(output->nr_chans));
 
@@ -233,7 +240,7 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
        return -1;
 }
 
-static unsigned int
+static int
 stm_find_master_chan(struct stm_device *stm, unsigned int width,
                     unsigned int *mstart, unsigned int mend,
                     unsigned int *cstart, unsigned int cend)
@@ -288,12 +295,13 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
        }
 
        spin_lock(&stm->mc_lock);
+       spin_lock(&output->lock);
        /* output is already assigned -- shouldn't happen */
        if (WARN_ON_ONCE(output->nr_chans))
                goto unlock;
 
        ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
-       if (ret)
+       if (ret < 0)
                goto unlock;
 
        output->master = midx;
@@ -304,6 +312,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
 
        ret = 0;
 unlock:
+       spin_unlock(&output->lock);
        spin_unlock(&stm->mc_lock);
 
        return ret;
@@ -312,11 +321,18 @@ unlock:
 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
 {
        spin_lock(&stm->mc_lock);
+       spin_lock(&output->lock);
        if (output->nr_chans)
                stm_output_disclaim(stm, output);
+       spin_unlock(&output->lock);
        spin_unlock(&stm->mc_lock);
 }
 
+static void stm_output_init(struct stm_output *output)
+{
+       spin_lock_init(&output->lock);
+}
+
 static int major_match(struct device *dev, const void *data)
 {
        unsigned int major = *(unsigned int *)data;
@@ -339,6 +355,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
        if (!stmf)
                return -ENOMEM;
 
+       stm_output_init(&stmf->output);
        stmf->stm = to_stm_device(dev);
 
        if (!try_module_get(stmf->stm->owner))
@@ -349,6 +366,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
        return nonseekable_open(inode, file);
 
 err_free:
+       /* matches class_find_device() above */
+       put_device(dev);
        kfree(stmf);
 
        return err;
@@ -357,9 +376,19 @@ err_free:
 static int stm_char_release(struct inode *inode, struct file *file)
 {
        struct stm_file *stmf = file->private_data;
+       struct stm_device *stm = stmf->stm;
+
+       if (stm->data->unlink)
+               stm->data->unlink(stm->data, stmf->output.master,
+                                 stmf->output.channel);
 
-       stm_output_free(stmf->stm, &stmf->output);
-       stm_put_device(stmf->stm);
+       stm_output_free(stm, &stmf->output);
+
+       /*
+        * matches the stm_char_open()'s
+        * class_find_device() + try_module_get()
+        */
+       stm_put_device(stm);
        kfree(stmf);
 
        return 0;
@@ -380,8 +409,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
        return ret;
 }
 
-static void stm_write(struct stm_data *data, unsigned int master,
-                     unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+                         unsigned int channel, const char *buf, size_t count)
 {
        unsigned int flags = STP_PACKET_TIMESTAMPED;
        const unsigned char *p = buf, nil = 0;
@@ -393,9 +422,14 @@ static void stm_write(struct stm_data *data, unsigned int master,
                sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
                                  sz, p);
                flags = 0;
+
+               if (sz < 0)
+                       break;
        }
 
        data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+       return pos;
 }
 
 static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +440,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
        char *kbuf;
        int err;
 
+       if (count + 1 > PAGE_SIZE)
+               count = PAGE_SIZE - 1;
+
        /*
         * if no m/c have been assigned to this writer up to this
         * point, use "default" policy entry
@@ -430,8 +467,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
                return -EFAULT;
        }
 
-       stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
-                 count);
+       count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+                         kbuf, count);
 
        kfree(kbuf);
 
@@ -515,10 +552,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
                ret = stm->data->link(stm->data, stmf->output.master,
                                      stmf->output.channel);
 
-       if (ret) {
+       if (ret)
                stm_output_free(stmf->stm, &stmf->output);
-               stm_put_device(stmf->stm);
-       }
 
 err_free:
        kfree(id);
@@ -618,7 +653,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
        if (!stm_data->packet || !stm_data->sw_nchannels)
                return -EINVAL;
 
-       nmasters = stm_data->sw_end - stm_data->sw_start;
+       nmasters = stm_data->sw_end - stm_data->sw_start + 1;
        stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
        if (!stm)
                return -ENOMEM;
@@ -641,6 +676,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
        if (err)
                goto err_device;
 
+       mutex_init(&stm->link_mutex);
        spin_lock_init(&stm->link_lock);
        INIT_LIST_HEAD(&stm->link_list);
 
@@ -654,6 +690,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
        return 0;
 
 err_device:
+       /* matches device_initialize() above */
        put_device(&stm->dev);
 err_free:
        kfree(stm);
@@ -662,20 +699,28 @@ err_free:
 }
 EXPORT_SYMBOL_GPL(stm_register_device);
 
-static void __stm_source_link_drop(struct stm_source_device *src,
-                                  struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+                                 struct stm_device *stm);
 
 void stm_unregister_device(struct stm_data *stm_data)
 {
        struct stm_device *stm = stm_data->stm;
        struct stm_source_device *src, *iter;
-       int i;
+       int i, ret;
 
-       spin_lock(&stm->link_lock);
+       mutex_lock(&stm->link_mutex);
        list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
-               __stm_source_link_drop(src, stm);
+               ret = __stm_source_link_drop(src, stm);
+               /*
+                * src <-> stm link must not change under the same
+                * stm::link_mutex, so complain loudly if it has;
+                * also in this situation ret!=0 means this src is
+                * not connected to this stm and it should be otherwise
+                * safe to proceed with the tear-down of stm.
+                */
+               WARN_ON_ONCE(ret);
        }
-       spin_unlock(&stm->link_lock);
+       mutex_unlock(&stm->link_mutex);
 
        synchronize_srcu(&stm_source_srcu);
 
@@ -686,7 +731,7 @@ void stm_unregister_device(struct stm_data *stm_data)
                stp_policy_unbind(stm->policy);
        mutex_unlock(&stm->policy_mutex);
 
-       for (i = 0; i < stm->sw_nmasters; i++)
+       for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
                stp_master_free(stm, i);
 
        device_unregister(&stm->dev);
@@ -694,6 +739,17 @@ void stm_unregister_device(struct stm_data *stm_data)
 }
 EXPORT_SYMBOL_GPL(stm_unregister_device);
 
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ *   stm::link_mutex
+ *     stm::link_lock
+ *       src::link_lock
+ */
+
 /**
  * stm_source_link_add() - connect an stm_source device to an stm device
  * @src:       stm_source device
@@ -710,6 +766,7 @@ static int stm_source_link_add(struct stm_source_device *src,
        char *id;
        int err;
 
+       mutex_lock(&stm->link_mutex);
        spin_lock(&stm->link_lock);
        spin_lock(&src->link_lock);
 
@@ -719,6 +776,7 @@ static int stm_source_link_add(struct stm_source_device *src,
 
        spin_unlock(&src->link_lock);
        spin_unlock(&stm->link_lock);
+       mutex_unlock(&stm->link_mutex);
 
        id = kstrdup(src->data->name, GFP_KERNEL);
        if (id) {
@@ -753,9 +811,9 @@ static int stm_source_link_add(struct stm_source_device *src,
 
 fail_free_output:
        stm_output_free(stm, &src->output);
-       stm_put_device(stm);
 
 fail_detach:
+       mutex_lock(&stm->link_mutex);
        spin_lock(&stm->link_lock);
        spin_lock(&src->link_lock);
 
@@ -764,6 +822,7 @@ fail_detach:
 
        spin_unlock(&src->link_lock);
        spin_unlock(&stm->link_lock);
+       mutex_unlock(&stm->link_mutex);
 
        return err;
 }
@@ -776,28 +835,55 @@ fail_detach:
  * If @stm is @src::link, disconnect them from one another and put the
  * reference on the @stm device.
  *
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
  */
-static void __stm_source_link_drop(struct stm_source_device *src,
-                                  struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+                                 struct stm_device *stm)
 {
        struct stm_device *link;
+       int ret = 0;
+
+       lockdep_assert_held(&stm->link_mutex);
 
+       /* for stm::link_list modification, we hold both mutex and spinlock */
+       spin_lock(&stm->link_lock);
        spin_lock(&src->link_lock);
        link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
-       if (WARN_ON_ONCE(link != stm)) {
-               spin_unlock(&src->link_lock);
-               return;
+
+       /*
+        * The linked device may have changed since we last looked, because
+        * we weren't holding the src::link_lock back then; if this is the
+        * case, tell the caller to retry.
+        */
+       if (link != stm) {
+               ret = -EAGAIN;
+               goto unlock;
        }
 
        stm_output_free(link, &src->output);
-       /* caller must hold stm::link_lock */
        list_del_init(&src->link_entry);
        /* matches stm_find_device() from stm_source_link_store() */
        stm_put_device(link);
        rcu_assign_pointer(src->link, NULL);
 
+unlock:
        spin_unlock(&src->link_lock);
+       spin_unlock(&stm->link_lock);
+
+       /*
+        * Call the unlink callbacks for both source and stm, when we know
+        * that we have actually performed the unlinking.
+        */
+       if (!ret) {
+               if (src->data->unlink)
+                       src->data->unlink(src->data);
+
+               if (stm->data->unlink)
+                       stm->data->unlink(stm->data, src->output.master,
+                                         src->output.channel);
+       }
+
+       return ret;
 }
 
 /**
@@ -813,21 +899,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
 static void stm_source_link_drop(struct stm_source_device *src)
 {
        struct stm_device *stm;
-       int idx;
+       int idx, ret;
 
+retry:
        idx = srcu_read_lock(&stm_source_srcu);
+       /*
+        * The stm device will be valid for the duration of this
+        * read section, but the link may change before we grab
+        * the src::link_lock in __stm_source_link_drop().
+        */
        stm = srcu_dereference(src->link, &stm_source_srcu);
 
+       ret = 0;
        if (stm) {
-               if (src->data->unlink)
-                       src->data->unlink(src->data);
-
-               spin_lock(&stm->link_lock);
-               __stm_source_link_drop(src, stm);
-               spin_unlock(&stm->link_lock);
+               mutex_lock(&stm->link_mutex);
+               ret = __stm_source_link_drop(src, stm);
+               mutex_unlock(&stm->link_mutex);
        }
 
        srcu_read_unlock(&stm_source_srcu, idx);
+
+       /* if it did change, retry */
+       if (ret == -EAGAIN)
+               goto retry;
 }
 
 static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +956,10 @@ static ssize_t stm_source_link_store(struct device *dev,
                return -EINVAL;
 
        err = stm_source_link_add(src, link);
-       if (err)
+       if (err) {
+               /* matches the stm_find_device() above */
                stm_put_device(link);
+       }
 
        return err ? : count;
 }
@@ -925,6 +1021,7 @@ int stm_source_register_device(struct device *parent,
        if (err)
                goto err;
 
+       stm_output_init(&src->output);
        spin_lock_init(&src->link_lock);
        INIT_LIST_HEAD(&src->link_entry);
        src->data = data;
@@ -973,9 +1070,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan,
 
        stm = srcu_dereference(src->link, &stm_source_srcu);
        if (stm)
-               stm_write(stm->data, src->output.master,
-                         src->output.channel + chan,
-                         buf, count);
+               count = stm_write(stm->data, src->output.master,
+                                 src->output.channel + chan,
+                                 buf, count);
        else
                count = -ENODEV;
 
index 3709bef0b21ff2d4ffafe29347e1f9872fbe17de..310adf57e7a178034c831c7baed855117b257890 100644 (file)
@@ -40,22 +40,75 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
        return size;
 }
 
-static struct stm_data dummy_stm = {
-       .name           = "dummy_stm",
-       .sw_start       = 0x0000,
-       .sw_end         = 0xffff,
-       .sw_nchannels   = 0xffff,
-       .packet         = dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0600);
+
+static unsigned int dummy_stm_nr;
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+                         unsigned int channel)
+{
+       if (fail_mode && (channel & fail_mode))
+               return -EINVAL;
+
+       return 0;
+}
 
 static int dummy_stm_init(void)
 {
-       return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+       int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies);
+
+       if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX)
+               return -EINVAL;
+
+       for (i = 0; i < __nr_dummies; i++) {
+               dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+               if (!dummy_stm[i].name)
+                       goto fail_unregister;
+
+               dummy_stm[i].sw_start           = 0x0000;
+               dummy_stm[i].sw_end             = 0xffff;
+               dummy_stm[i].sw_nchannels       = 0xffff;
+               dummy_stm[i].packet             = dummy_stm_packet;
+               dummy_stm[i].link               = dummy_stm_link;
+
+               ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+               if (ret)
+                       goto fail_free;
+       }
+
+       dummy_stm_nr = __nr_dummies;
+
+       return 0;
+
+fail_unregister:
+       for (i--; i >= 0; i--) {
+               stm_unregister_device(&dummy_stm[i]);
+fail_free:
+               kfree(dummy_stm[i].name);
+       }
+
+       return ret;
+
 }
 
 static void dummy_stm_exit(void)
 {
-       stm_unregister_device(&dummy_stm);
+       int i;
+
+       for (i = 0; i < dummy_stm_nr; i++) {
+               stm_unregister_device(&dummy_stm[i]);
+               kfree(dummy_stm[i].name);
+       }
 }
 
 module_init(dummy_stm_init);
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
new file mode 100644 (file)
index 0000000..0133571
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX      32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0600);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+       struct stm_source_data  data;
+       struct hrtimer          hrtimer;
+       unsigned int            active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static unsigned int nr_instances;
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+       struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+                                                      hrtimer);
+
+       stm_source_write(&heartbeat->data, 0, str, sizeof str);
+       if (heartbeat->active)
+               hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+       return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+       struct stm_heartbeat *heartbeat =
+               container_of(data, struct stm_heartbeat, data);
+
+       heartbeat->active = 1;
+       hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+                     HRTIMER_MODE_ABS);
+
+       return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+       struct stm_heartbeat *heartbeat =
+               container_of(data, struct stm_heartbeat, data);
+
+       heartbeat->active = 0;
+       hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+       int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs);
+
+       if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX)
+               return -EINVAL;
+
+       for (i = 0; i < __nr_instances; i++) {
+               stm_heartbeat[i].data.name =
+                       kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+               if (!stm_heartbeat[i].data.name)
+                       goto fail_unregister;
+
+               stm_heartbeat[i].data.nr_chans  = 1;
+               stm_heartbeat[i].data.link              = stm_heartbeat_link;
+               stm_heartbeat[i].data.unlink    = stm_heartbeat_unlink;
+               hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_ABS);
+               stm_heartbeat[i].hrtimer.function =
+                       stm_heartbeat_hrtimer_handler;
+
+               ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+               if (ret)
+                       goto fail_free;
+       }
+
+       nr_instances = __nr_instances;
+
+       return 0;
+
+fail_unregister:
+       for (i--; i >= 0; i--) {
+               stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+               kfree(stm_heartbeat[i].data.name);
+       }
+
+       return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+       int i;
+
+       for (i = 0; i < nr_instances; i++) {
+               stm_source_unregister_device(&stm_heartbeat[i].data);
+               kfree(stm_heartbeat[i].data.name);
+       }
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
index 11ab6d01adf63d1490c8474801d358ada4f657a8..1db189657b2b01b194a2e06aafbd79be872fb027 100644 (file)
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
 {
        struct stm_device *stm = policy->stm;
 
+       /*
+        * stp_policy_release() will not call here if the policy is already
+        * unbound; other users should not either, as no link exists between
+        * this policy and anything else in that case
+        */
        if (WARN_ON_ONCE(!policy->stm))
                return;
 
-       mutex_lock(&stm->policy_mutex);
-       stm->policy = NULL;
-       mutex_unlock(&stm->policy_mutex);
+       lockdep_assert_held(&stm->policy_mutex);
 
+       stm->policy = NULL;
        policy->stm = NULL;
 
        stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
 static void stp_policy_release(struct config_item *item)
 {
        struct stp_policy *policy = to_stp_policy(item);
+       struct stm_device *stm = policy->stm;
 
+       /* a policy *can* be unbound and still exist in configfs tree */
+       if (!stm)
+               return;
+
+       mutex_lock(&stm->policy_mutex);
        stp_policy_unbind(policy);
+       mutex_unlock(&stm->policy_mutex);
+
        kfree(policy);
 }
 
@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name)
 
        /*
         * node must look like <device_name>.<policy_name>, where
-        * <device_name> is the name of an existing stm device and
-        * <policy_name> is an arbitrary string
+        * <device_name> is the name of an existing stm device; may
+        *               contain dots;
+        * <policy_name> is an arbitrary string; may not contain dots
         */
-       p = strchr(devname, '.');
+       p = strrchr(devname, '.');
        if (!p) {
                kfree(devname);
                return ERR_PTR(-EINVAL);
index 95ece0292c991c8ad60f81c4a15fe4f3687d4504..4e8c6926260f3e8eec0ec8da70de8f13f86f0cc6 100644 (file)
@@ -45,6 +45,7 @@ struct stm_device {
        int                     major;
        unsigned int            sw_nmasters;
        struct stm_data         *data;
+       struct mutex            link_mutex;
        spinlock_t              link_lock;
        struct list_head        link_list;
        /* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
        container_of((_d), struct stm_device, dev)
 
 struct stm_output {
+       spinlock_t              lock;
        unsigned int            master;
        unsigned int            channel;
        unsigned int            nr_chans;
index 054fc10cb3b6ab5a6b5f9d51b2ded3bb656cbf4e..15579514d120ca60c7c54d109f5b92553d387c45 100644 (file)
@@ -440,7 +440,7 @@ config ARM_CHARLCD
          still useful.
 
 config BMP085
-       bool
+       tristate
        depends on SYSFS
 
 config BMP085_I2C
@@ -470,7 +470,7 @@ config BMP085_SPI
 config PCH_PHUB
        tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
        select GENERIC_NET_UTILS
-       depends on PCI && (X86_32 || COMPILE_TEST)
+       depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
        help
          This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
          Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
index 15e88078ba1e6a70ec82b3c713fd8c19ebb425d7..fe1672747bc1fd2c3ea1d26ae6e7154dadd8d11f 100644 (file)
@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
                         */
                        value = swab16(value);
 
-                       if (dpot->uid == DPOT_UID(AD5271_ID))
+                       if (dpot->uid == DPOT_UID(AD5274_ID))
                                value = value >> 2;
                return value;
        default:
@@ -452,7 +452,7 @@ static ssize_t sysfs_set_reg(struct device *dev,
        int err;
 
        if (reg & DPOT_ADDR_OTP_EN) {
-               if (!strncmp(buf, "enabled", sizeof("enabled")))
+               if (sysfs_streq(buf, "enabled"))
                        set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
                else
                        clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
index a3e789b85cc82e441530981736ba227b0226bcf7..dfb72ecfa6046117a243703374d78fe49c3e1466 100644 (file)
@@ -1215,7 +1215,7 @@ static int apds990x_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int apds990x_suspend(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct apds990x_chip *chip = i2c_get_clientdata(client);
 
        apds990x_chip_off(chip);
@@ -1224,7 +1224,7 @@ static int apds990x_suspend(struct device *dev)
 
 static int apds990x_resume(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct apds990x_chip *chip = i2c_get_clientdata(client);
 
        /*
@@ -1240,7 +1240,7 @@ static int apds990x_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int apds990x_runtime_suspend(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct apds990x_chip *chip = i2c_get_clientdata(client);
 
        apds990x_chip_off(chip);
@@ -1249,7 +1249,7 @@ static int apds990x_runtime_suspend(struct device *dev)
 
 static int apds990x_runtime_resume(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct apds990x_chip *chip = i2c_get_clientdata(client);
 
        apds990x_chip_on(chip);
index c65b5ea5d5ef44c90713bae61e9a0c461e3be93e..b3176ee92b90d831dd57f1aa101c008dff2b02bb 100644 (file)
@@ -8,7 +8,6 @@
  * Author: Linus Walleij <triad@df.lth.se>
  */
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
@@ -328,20 +327,6 @@ out_no_resource:
        return ret;
 }
 
-static int __exit charlcd_remove(struct platform_device *pdev)
-{
-       struct charlcd *lcd = platform_get_drvdata(pdev);
-
-       if (lcd) {
-               free_irq(lcd->irq, lcd);
-               iounmap(lcd->virtbase);
-               release_mem_region(lcd->phybase, lcd->physize);
-               kfree(lcd);
-       }
-
-       return 0;
-}
-
 static int charlcd_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -376,13 +361,8 @@ static struct platform_driver charlcd_driver = {
        .driver = {
                .name = DRIVERNAME,
                .pm = &charlcd_pm_ops,
+               .suppress_bind_attrs = true,
                .of_match_table = of_match_ptr(charlcd_match),
        },
-       .remove = __exit_p(charlcd_remove),
 };
-
-module_platform_driver_probe(charlcd_driver, charlcd_probe);
-
-MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
-MODULE_DESCRIPTION("ARM Character LCD Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(charlcd_driver, charlcd_probe);
index 753d7ecdadaa78a27df6c47c54a0bd0265192ac4..845466e45b9593ece7702c574fb5a42fc6032bd4 100644 (file)
@@ -1323,7 +1323,7 @@ static int bh1770_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int bh1770_suspend(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct bh1770_chip *chip = i2c_get_clientdata(client);
 
        bh1770_chip_off(chip);
@@ -1333,7 +1333,7 @@ static int bh1770_suspend(struct device *dev)
 
 static int bh1770_resume(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct bh1770_chip *chip = i2c_get_clientdata(client);
        int ret = 0;
 
@@ -1361,7 +1361,7 @@ static int bh1770_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int bh1770_runtime_suspend(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct bh1770_chip *chip = i2c_get_clientdata(client);
 
        bh1770_chip_off(chip);
@@ -1371,7 +1371,7 @@ static int bh1770_runtime_suspend(struct device *dev)
 
 static int bh1770_runtime_resume(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct bh1770_chip *chip = i2c_get_clientdata(client);
 
        bh1770_chip_on(chip);
index cc8645b5369d315afad0296ba4f5a5bd5a99ada8..1922cb8f6b88f3d408439cc6dd0fda8b5af54092 100644 (file)
@@ -721,9 +721,7 @@ static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
                                struct bin_attribute *attr,
                                char *buffer, loff_t offset, size_t count)
 {
-       struct c2port_device *c2dev =
-                       dev_get_drvdata(container_of(kobj,
-                                               struct device, kobj));
+       struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
        ssize_t ret;
 
        /* Check the device and flash access status */
@@ -838,9 +836,7 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
                                struct bin_attribute *attr,
                                char *buffer, loff_t offset, size_t count)
 {
-       struct c2port_device *c2dev =
-                       dev_get_drvdata(container_of(kobj,
-                                               struct device, kobj));
+       struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
        int ret;
 
        /* Check the device access status */
index 02006f7109a802821c3134ec03ed7d8658f23da7..038af5d45145b741fbe1f421f3a0ec4e11afabaa 100644 (file)
@@ -386,8 +386,7 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
                               struct bin_attribute *bin_attr, char *buf,
                               loff_t off, size_t count)
 {
-       struct cxl_afu *afu = to_cxl_afu(container_of(kobj,
-                                                     struct device, kobj));
+       struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
 
        return cxl_afu_read_err_buffer(afu, buf, off, count);
 }
@@ -467,7 +466,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
                               loff_t off, size_t count)
 {
        struct afu_config_record *cr = to_cr(kobj);
-       struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
+       struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
 
        u64 i, j, val;
 
index 04f2e1fa9dd15b10871919a788c81aff782178f8..cfc493c2e30a725695c50326dbfd9db170e0cfd1 100644 (file)
@@ -3,6 +3,8 @@ menu "EEPROM support"
 config EEPROM_AT24
        tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
        depends on I2C && SYSFS
+       select REGMAP
+       select NVMEM
        help
          Enable this driver to get read/write support to most I2C EEPROMs
          and compatible devices like FRAMs, SRAMs, ROMs etc. After you
@@ -30,6 +32,8 @@ config EEPROM_AT24
 config EEPROM_AT25
        tristate "SPI EEPROMs from most vendors"
        depends on SPI && SYSFS
+       select REGMAP
+       select NVMEM
        help
          Enable this driver to get read/write support to most SPI EEPROMs,
          after you configure the board init code to know about each eeprom
@@ -74,6 +78,8 @@ config EEPROM_93CX6
 config EEPROM_93XX46
        tristate "Microwire EEPROM 93XX46 support"
        depends on SPI && SYSFS
+       select REGMAP
+       select NVMEM
        help
          Driver for the microwire EEPROM chipsets 93xx46x. The driver
          supports both read and write commands and also the command to
index 5d7c0900fa1b129c8bf103d3eddd03cbd92fe80d..089d6943f68ab1db6cfc93fe67fe0d0dd9d0bf90 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
-#include <linux/sysfs.h>
 #include <linux/mod_devicetable.h>
 #include <linux/log2.h>
 #include <linux/bitops.h>
@@ -23,6 +22,8 @@
 #include <linux/of.h>
 #include <linux/acpi.h>
 #include <linux/i2c.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
 #include <linux/platform_data/at24.h>
 
 /*
@@ -55,7 +56,6 @@
 
 struct at24_data {
        struct at24_platform_data chip;
-       struct memory_accessor macc;
        int use_smbus;
        int use_smbus_write;
 
@@ -64,12 +64,15 @@ struct at24_data {
         * but not from changes by other I2C masters.
         */
        struct mutex lock;
-       struct bin_attribute bin;
 
        u8 *writebuf;
        unsigned write_max;
        unsigned num_addresses;
 
+       struct regmap_config regmap_config;
+       struct nvmem_config nvmem_config;
+       struct nvmem_device *nvmem;
+
        /*
         * Some chips tie up multiple I2C addresses; dummy devices reserve
         * them for us, and we'll use them with SMBus calls.
@@ -283,17 +286,6 @@ static ssize_t at24_read(struct at24_data *at24,
        return retval;
 }
 
-static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t off, size_t count)
-{
-       struct at24_data *at24;
-
-       at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
-       return at24_read(at24, buf, off, count);
-}
-
-
 /*
  * Note that if the hardware write-protect pin is pulled high, the whole
  * chip is normally write protected. But there are plenty of product
@@ -414,40 +406,49 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
        return retval;
 }
 
-static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t off, size_t count)
-{
-       struct at24_data *at24;
-
-       at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
-       return at24_write(at24, buf, off, count);
-}
-
 /*-------------------------------------------------------------------------*/
 
 /*
- * This lets other kernel code access the eeprom data. For example, it
- * might hold a board's Ethernet address, or board-specific calibration
- * data generated on the manufacturing floor.
- */
-
-static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
-                        off_t offset, size_t count)
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int at24_regmap_read(void *context, const void *reg, size_t reg_size,
+                           void *val, size_t val_size)
 {
-       struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+       struct at24_data *at24 = context;
+       off_t offset = *(u32 *)reg;
+       int err;
 
-       return at24_read(at24, buf, offset, count);
+       err = at24_read(at24, val, offset, val_size);
+       if (err)
+               return err;
+       return 0;
 }
 
-static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
-                         off_t offset, size_t count)
+static int at24_regmap_write(void *context, const void *data, size_t count)
 {
-       struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+       struct at24_data *at24 = context;
+       const char *buf;
+       u32 offset;
+       size_t len;
+       int err;
 
-       return at24_write(at24, buf, offset, count);
+       memcpy(&offset, data, sizeof(offset));
+       buf = (const char *)data + sizeof(offset);
+       len = count - sizeof(offset);
+
+       err = at24_write(at24, buf, offset, len);
+       if (err)
+               return err;
+       return 0;
 }
 
+static const struct regmap_bus at24_regmap_bus = {
+       .read = at24_regmap_read,
+       .write = at24_regmap_write,
+       .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
 /*-------------------------------------------------------------------------*/
 
 #ifdef CONFIG_OF
@@ -481,6 +482,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
        struct at24_data *at24;
        int err;
        unsigned i, num_addresses;
+       struct regmap *regmap;
 
        if (client->dev.platform_data) {
                chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -573,29 +575,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
        at24->chip = chip;
        at24->num_addresses = num_addresses;
 
-       /*
-        * Export the EEPROM bytes through sysfs, since that's convenient.
-        * By default, only root should see the data (maybe passwords etc)
-        */
-       sysfs_bin_attr_init(&at24->bin);
-       at24->bin.attr.name = "eeprom";
-       at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
-       at24->bin.read = at24_bin_read;
-       at24->bin.size = chip.byte_len;
-
-       at24->macc.read = at24_macc_read;
-
        writable = !(chip.flags & AT24_FLAG_READONLY);
        if (writable) {
                if (!use_smbus || use_smbus_write) {
 
                        unsigned write_max = chip.page_size;
 
-                       at24->macc.write = at24_macc_write;
-
-                       at24->bin.write = at24_bin_write;
-                       at24->bin.attr.mode |= S_IWUSR;
-
                        if (write_max > io_limit)
                                write_max = io_limit;
                        if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX)
@@ -627,14 +612,38 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
                }
        }
 
-       err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin);
-       if (err)
+       at24->regmap_config.reg_bits = 32;
+       at24->regmap_config.val_bits = 8;
+       at24->regmap_config.reg_stride = 1;
+       at24->regmap_config.max_register = chip.byte_len - 1;
+
+       regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24,
+                                 &at24->regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&client->dev, "regmap init failed\n");
+               err = PTR_ERR(regmap);
+               goto err_clients;
+       }
+
+       at24->nvmem_config.name = dev_name(&client->dev);
+       at24->nvmem_config.dev = &client->dev;
+       at24->nvmem_config.read_only = !writable;
+       at24->nvmem_config.root_only = true;
+       at24->nvmem_config.owner = THIS_MODULE;
+       at24->nvmem_config.compat = true;
+       at24->nvmem_config.base_dev = &client->dev;
+
+       at24->nvmem = nvmem_register(&at24->nvmem_config);
+
+       if (IS_ERR(at24->nvmem)) {
+               err = PTR_ERR(at24->nvmem);
                goto err_clients;
+       }
 
        i2c_set_clientdata(client, at24);
 
-       dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
-               at24->bin.size, client->name,
+       dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
+               chip.byte_len, client->name,
                writable ? "writable" : "read-only", at24->write_max);
        if (use_smbus == I2C_SMBUS_WORD_DATA ||
            use_smbus == I2C_SMBUS_BYTE_DATA) {
@@ -645,7 +654,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        /* export data to kernel code */
        if (chip.setup)
-               chip.setup(&at24->macc, chip.context);
+               chip.setup(at24->nvmem, chip.context);
 
        return 0;
 
@@ -663,7 +672,8 @@ static int at24_remove(struct i2c_client *client)
        int i;
 
        at24 = i2c_get_clientdata(client);
-       sysfs_remove_bin_file(&client->dev.kobj, &at24->bin);
+
+       nvmem_unregister(at24->nvmem);
 
        for (i = 1; i < at24->num_addresses; i++)
                i2c_unregister_device(at24->client[i]);
index f850ef556bcc48d598f11537fffa6fe351b7eef9..fa36a6e37084df7487831c2786936c3965a1a775 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/device.h>
 #include <linux/sched.h>
 
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/eeprom.h>
 #include <linux/property.h>
 
 struct at25_data {
        struct spi_device       *spi;
-       struct memory_accessor  mem;
        struct mutex            lock;
        struct spi_eeprom       chip;
-       struct bin_attribute    bin;
        unsigned                addrlen;
+       struct regmap_config    regmap_config;
+       struct nvmem_config     nvmem_config;
+       struct nvmem_device     *nvmem;
 };
 
 #define        AT25_WREN       0x06            /* latch the write enable */
@@ -77,10 +80,10 @@ at25_ee_read(
        struct spi_message      m;
        u8                      instr;
 
-       if (unlikely(offset >= at25->bin.size))
+       if (unlikely(offset >= at25->chip.byte_len))
                return 0;
-       if ((offset + count) > at25->bin.size)
-               count = at25->bin.size - offset;
+       if ((offset + count) > at25->chip.byte_len)
+               count = at25->chip.byte_len - offset;
        if (unlikely(!count))
                return count;
 
@@ -131,21 +134,19 @@ at25_ee_read(
        return status ? status : count;
 }
 
-static ssize_t
-at25_bin_read(struct file *filp, struct kobject *kobj,
-             struct bin_attribute *bin_attr,
-             char *buf, loff_t off, size_t count)
+static int at25_regmap_read(void *context, const void *reg, size_t reg_size,
+                           void *val, size_t val_size)
 {
-       struct device           *dev;
-       struct at25_data        *at25;
+       struct at25_data *at25 = context;
+       off_t offset = *(u32 *)reg;
+       int err;
 
-       dev = container_of(kobj, struct device, kobj);
-       at25 = dev_get_drvdata(dev);
-
-       return at25_ee_read(at25, buf, off, count);
+       err = at25_ee_read(at25, val, offset, val_size);
+       if (err)
+               return err;
+       return 0;
 }
 
-
 static ssize_t
 at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
              size_t count)
@@ -155,10 +156,10 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
        unsigned                buf_size;
        u8                      *bounce;
 
-       if (unlikely(off >= at25->bin.size))
+       if (unlikely(off >= at25->chip.byte_len))
                return -EFBIG;
-       if ((off + count) > at25->bin.size)
-               count = at25->bin.size - off;
+       if ((off + count) > at25->chip.byte_len)
+               count = at25->chip.byte_len - off;
        if (unlikely(!count))
                return count;
 
@@ -265,39 +266,29 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
        return written ? written : status;
 }
 
-static ssize_t
-at25_bin_write(struct file *filp, struct kobject *kobj,
-              struct bin_attribute *bin_attr,
-              char *buf, loff_t off, size_t count)
+static int at25_regmap_write(void *context, const void *data, size_t count)
 {
-       struct device           *dev;
-       struct at25_data        *at25;
-
-       dev = container_of(kobj, struct device, kobj);
-       at25 = dev_get_drvdata(dev);
-
-       return at25_ee_write(at25, buf, off, count);
-}
+       struct at25_data *at25 = context;
+       const char *buf;
+       u32 offset;
+       size_t len;
+       int err;
 
-/*-------------------------------------------------------------------------*/
-
-/* Let in-kernel code access the eeprom data. */
-
-static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
-                        off_t offset, size_t count)
-{
-       struct at25_data *at25 = container_of(mem, struct at25_data, mem);
+       memcpy(&offset, data, sizeof(offset));
+       buf = (const char *)data + sizeof(offset);
+       len = count - sizeof(offset);
 
-       return at25_ee_read(at25, buf, offset, count);
+       err = at25_ee_write(at25, buf, offset, len);
+       if (err)
+               return err;
+       return 0;
 }
 
-static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
-                         off_t offset, size_t count)
-{
-       struct at25_data *at25 = container_of(mem, struct at25_data, mem);
-
-       return at25_ee_write(at25, buf, offset, count);
-}
+static const struct regmap_bus at25_regmap_bus = {
+       .read = at25_regmap_read,
+       .write = at25_regmap_write,
+       .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
 
 /*-------------------------------------------------------------------------*/
 
@@ -358,6 +349,7 @@ static int at25_probe(struct spi_device *spi)
 {
        struct at25_data        *at25 = NULL;
        struct spi_eeprom       chip;
+       struct regmap           *regmap;
        int                     err;
        int                     sr;
        int                     addrlen;
@@ -402,40 +394,35 @@ static int at25_probe(struct spi_device *spi)
        spi_set_drvdata(spi, at25);
        at25->addrlen = addrlen;
 
-       /* Export the EEPROM bytes through sysfs, since that's convenient.
-        * And maybe to other kernel code; it might hold a board's Ethernet
-        * address, or board-specific calibration data generated on the
-        * manufacturing floor.
-        *
-        * Default to root-only access to the data; EEPROMs often hold data
-        * that's sensitive for read and/or write, like ethernet addresses,
-        * security codes, board-specific manufacturing calibrations, etc.
-        */
-       sysfs_bin_attr_init(&at25->bin);
-       at25->bin.attr.name = "eeprom";
-       at25->bin.attr.mode = S_IRUSR;
-       at25->bin.read = at25_bin_read;
-       at25->mem.read = at25_mem_read;
-
-       at25->bin.size = at25->chip.byte_len;
-       if (!(chip.flags & EE_READONLY)) {
-               at25->bin.write = at25_bin_write;
-               at25->bin.attr.mode |= S_IWUSR;
-               at25->mem.write = at25_mem_write;
-       }
+       at25->regmap_config.reg_bits = 32;
+       at25->regmap_config.val_bits = 8;
+       at25->regmap_config.reg_stride = 1;
+       at25->regmap_config.max_register = chip.byte_len - 1;
 
-       err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
-       if (err)
-               return err;
-
-       if (chip.setup)
-               chip.setup(&at25->mem, chip.context);
+       regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25,
+                                 &at25->regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&spi->dev, "regmap init failed\n");
+               return PTR_ERR(regmap);
+       }
 
-       dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
-               (at25->bin.size < 1024)
-                       ? at25->bin.size
-                       : (at25->bin.size / 1024),
-               (at25->bin.size < 1024) ? "Byte" : "KByte",
+       at25->nvmem_config.name = dev_name(&spi->dev);
+       at25->nvmem_config.dev = &spi->dev;
+       at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+       at25->nvmem_config.root_only = true;
+       at25->nvmem_config.owner = THIS_MODULE;
+       at25->nvmem_config.compat = true;
+       at25->nvmem_config.base_dev = &spi->dev;
+
+       at25->nvmem = nvmem_register(&at25->nvmem_config);
+       if (IS_ERR(at25->nvmem))
+               return PTR_ERR(at25->nvmem);
+
+       dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n",
+               (chip.byte_len < 1024)
+                       ? chip.byte_len
+                       : (chip.byte_len / 1024),
+               (chip.byte_len < 1024) ? "Byte" : "KByte",
                at25->chip.name,
                (chip.flags & EE_READONLY) ? " (readonly)" : "",
                at25->chip.page_size);
@@ -447,7 +434,8 @@ static int at25_remove(struct spi_device *spi)
        struct at25_data        *at25;
 
        at25 = spi_get_drvdata(spi);
-       sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
+       nvmem_unregister(at25->nvmem);
+
        return 0;
 }
 
index 7342fd637031335a21ed4779442ca69eb39fb045..3d1d55157e5f3b605bb4695c797eb12a6987bfff 100644 (file)
@@ -84,7 +84,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
                           struct bin_attribute *bin_attr,
                           char *buf, loff_t off, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
+       struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
        struct eeprom_data *data = i2c_get_clientdata(client);
        u8 slice;
 
index ff63f05edc763cac7bb8aefef565e20e1463a82b..426fe2fd5238c8f529c94f3c7ce0be8289e55def 100644 (file)
 
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/sysfs.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
 #include <linux/eeprom_93xx46.h>
 
 #define OP_START       0x4
 #define ADDR_ERAL      0x20
 #define ADDR_EWEN      0x30
 
+struct eeprom_93xx46_devtype_data {
+       unsigned int quirks;
+};
+
+static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+       .quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
+                 EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+};
+
 struct eeprom_93xx46_dev {
        struct spi_device *spi;
        struct eeprom_93xx46_platform_data *pdata;
-       struct bin_attribute bin;
        struct mutex lock;
+       struct regmap_config regmap_config;
+       struct nvmem_config nvmem_config;
+       struct nvmem_device *nvmem;
        int addrlen;
+       int size;
 };
 
+static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev)
+{
+       return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ;
+}
+
+static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+{
+       return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+}
+
 static ssize_t
-eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
-                      struct bin_attribute *bin_attr,
-                      char *buf, loff_t off, size_t count)
+eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
+                  unsigned off, size_t count)
 {
-       struct eeprom_93xx46_dev *edev;
-       struct device *dev;
-       struct spi_message m;
-       struct spi_transfer t[2];
-       int bits, ret;
-       u16 cmd_addr;
+       ssize_t ret = 0;
 
-       dev = container_of(kobj, struct device, kobj);
-       edev = dev_get_drvdata(dev);
+       if (unlikely(off >= edev->size))
+               return 0;
+       if ((off + count) > edev->size)
+               count = edev->size - off;
+       if (unlikely(!count))
+               return count;
 
-       cmd_addr = OP_READ << edev->addrlen;
+       mutex_lock(&edev->lock);
 
-       if (edev->addrlen == 7) {
-               cmd_addr |= off & 0x7f;
-               bits = 10;
-       } else {
-               cmd_addr |= off & 0x3f;
-               bits = 9;
-       }
+       if (edev->pdata->prepare)
+               edev->pdata->prepare(edev);
 
-       dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
-               cmd_addr, edev->spi->max_speed_hz);
+       while (count) {
+               struct spi_message m;
+               struct spi_transfer t[2] = { { 0 } };
+               u16 cmd_addr = OP_READ << edev->addrlen;
+               size_t nbytes = count;
+               int bits;
+               int err;
+
+               if (edev->addrlen == 7) {
+                       cmd_addr |= off & 0x7f;
+                       bits = 10;
+                       if (has_quirk_single_word_read(edev))
+                               nbytes = 1;
+               } else {
+                       cmd_addr |= (off >> 1) & 0x3f;
+                       bits = 9;
+                       if (has_quirk_single_word_read(edev))
+                               nbytes = 2;
+               }
 
-       spi_message_init(&m);
-       memset(t, 0, sizeof(t));
+               dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+                       cmd_addr, edev->spi->max_speed_hz);
 
-       t[0].tx_buf = (char *)&cmd_addr;
-       t[0].len = 2;
-       t[0].bits_per_word = bits;
-       spi_message_add_tail(&t[0], &m);
+               spi_message_init(&m);
 
-       t[1].rx_buf = buf;
-       t[1].len = count;
-       t[1].bits_per_word = 8;
-       spi_message_add_tail(&t[1], &m);
+               t[0].tx_buf = (char *)&cmd_addr;
+               t[0].len = 2;
+               t[0].bits_per_word = bits;
+               spi_message_add_tail(&t[0], &m);
 
-       mutex_lock(&edev->lock);
+               t[1].rx_buf = buf;
+               t[1].len = count;
+               t[1].bits_per_word = 8;
+               spi_message_add_tail(&t[1], &m);
 
-       if (edev->pdata->prepare)
-               edev->pdata->prepare(edev);
+               err = spi_sync(edev->spi, &m);
+               /* have to wait at least Tcsl ns */
+               ndelay(250);
 
-       ret = spi_sync(edev->spi, &m);
-       /* have to wait at least Tcsl ns */
-       ndelay(250);
-       if (ret) {
-               dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
-                       count, (int)off, ret);
+               if (err) {
+                       dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+                               nbytes, (int)off, err);
+                       ret = err;
+                       break;
+               }
+
+               buf += nbytes;
+               off += nbytes;
+               count -= nbytes;
+               ret += nbytes;
        }
 
        if (edev->pdata->finish)
                edev->pdata->finish(edev);
 
        mutex_unlock(&edev->lock);
-       return ret ? : count;
+       return ret;
 }
 
 static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -110,7 +153,13 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
                bits = 9;
        }
 
-       dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+       if (has_quirk_instruction_length(edev)) {
+               cmd_addr <<= 2;
+               bits += 2;
+       }
+
+       dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
+                       is_on ? "en" : "ds", cmd_addr, bits);
 
        spi_message_init(&m);
        memset(&t, 0, sizeof(t));
@@ -155,7 +204,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
                bits = 10;
                data_len = 1;
        } else {
-               cmd_addr |= off & 0x3f;
+               cmd_addr |= (off >> 1) & 0x3f;
                bits = 9;
                data_len = 2;
        }
@@ -182,16 +231,17 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
 }
 
 static ssize_t
-eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
-                       struct bin_attribute *bin_attr,
-                       char *buf, loff_t off, size_t count)
+eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
+                   loff_t off, size_t count)
 {
-       struct eeprom_93xx46_dev *edev;
-       struct device *dev;
        int i, ret, step = 1;
 
-       dev = container_of(kobj, struct device, kobj);
-       edev = dev_get_drvdata(dev);
+       if (unlikely(off >= edev->size))
+               return -EFBIG;
+       if ((off + count) > edev->size)
+               count = edev->size - off;
+       if (unlikely(!count))
+               return count;
 
        /* only write even number of bytes on 16-bit devices */
        if (edev->addrlen == 6) {
@@ -228,6 +278,49 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
        return ret ? : count;
 }
 
+/*
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int eeprom_93xx46_regmap_read(void *context, const void *reg,
+                                    size_t reg_size, void *val,
+                                    size_t val_size)
+{
+       struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+       off_t offset = *(u32 *)reg;
+       int err;
+
+       err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size);
+       if (err)
+               return err;
+       return 0;
+}
+
+static int eeprom_93xx46_regmap_write(void *context, const void *data,
+                                     size_t count)
+{
+       struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+       const char *buf;
+       u32 offset;
+       size_t len;
+       int err;
+
+       memcpy(&offset, data, sizeof(offset));
+       buf = (const char *)data + sizeof(offset);
+       len = count - sizeof(offset);
+
+       err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len);
+       if (err)
+               return err;
+       return 0;
+}
+
+static const struct regmap_bus eeprom_93xx46_regmap_bus = {
+       .read = eeprom_93xx46_regmap_read,
+       .write = eeprom_93xx46_regmap_write,
+       .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
 static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
 {
        struct eeprom_93xx46_platform_data *pd = edev->pdata;
@@ -245,6 +338,13 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
                bits = 9;
        }
 
+       if (has_quirk_instruction_length(edev)) {
+               cmd_addr <<= 2;
+               bits += 2;
+       }
+
+       dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
+
        spi_message_init(&m);
        memset(&t, 0, sizeof(t));
 
@@ -294,12 +394,101 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev,
 }
 static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
 
+static void select_assert(void *context)
+{
+       struct eeprom_93xx46_dev *edev = context;
+
+       gpiod_set_value_cansleep(edev->pdata->select, 1);
+}
+
+static void select_deassert(void *context)
+{
+       struct eeprom_93xx46_dev *edev = context;
+
+       gpiod_set_value_cansleep(edev->pdata->select, 0);
+}
+
+static const struct of_device_id eeprom_93xx46_of_table[] = {
+       { .compatible = "eeprom-93xx46", },
+       { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+       {}
+};
+MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+
+static int eeprom_93xx46_probe_dt(struct spi_device *spi)
+{
+       const struct of_device_id *of_id =
+               of_match_device(eeprom_93xx46_of_table, &spi->dev);
+       struct device_node *np = spi->dev.of_node;
+       struct eeprom_93xx46_platform_data *pd;
+       u32 tmp;
+       int gpio;
+       enum of_gpio_flags of_flags;
+       int ret;
+
+       pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return -ENOMEM;
+
+       ret = of_property_read_u32(np, "data-size", &tmp);
+       if (ret < 0) {
+               dev_err(&spi->dev, "data-size property not found\n");
+               return ret;
+       }
+
+       if (tmp == 8) {
+               pd->flags |= EE_ADDR8;
+       } else if (tmp == 16) {
+               pd->flags |= EE_ADDR16;
+       } else {
+               dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
+               return -EINVAL;
+       }
+
+       if (of_property_read_bool(np, "read-only"))
+               pd->flags |= EE_READONLY;
+
+       gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags);
+       if (gpio_is_valid(gpio)) {
+               unsigned long flags =
+                       of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0;
+
+               ret = devm_gpio_request_one(&spi->dev, gpio, flags,
+                                           "eeprom_93xx46_select");
+               if (ret)
+                       return ret;
+
+               pd->select = gpio_to_desc(gpio);
+               pd->prepare = select_assert;
+               pd->finish = select_deassert;
+
+               gpiod_direction_output(pd->select, 0);
+       }
+
+       if (of_id->data) {
+               const struct eeprom_93xx46_devtype_data *data = of_id->data;
+
+               pd->quirks = data->quirks;
+       }
+
+       spi->dev.platform_data = pd;
+
+       return 0;
+}
+
 static int eeprom_93xx46_probe(struct spi_device *spi)
 {
        struct eeprom_93xx46_platform_data *pd;
        struct eeprom_93xx46_dev *edev;
+       struct regmap *regmap;
        int err;
 
+       if (spi->dev.of_node) {
+               err = eeprom_93xx46_probe_dt(spi);
+               if (err < 0)
+                       return err;
+       }
+
        pd = spi->dev.platform_data;
        if (!pd) {
                dev_err(&spi->dev, "missing platform data\n");
@@ -325,19 +514,34 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
        edev->spi = spi_dev_get(spi);
        edev->pdata = pd;
 
-       sysfs_bin_attr_init(&edev->bin);
-       edev->bin.attr.name = "eeprom";
-       edev->bin.attr.mode = S_IRUSR;
-       edev->bin.read = eeprom_93xx46_bin_read;
-       edev->bin.size = 128;
-       if (!(pd->flags & EE_READONLY)) {
-               edev->bin.write = eeprom_93xx46_bin_write;
-               edev->bin.attr.mode |= S_IWUSR;
+       edev->size = 128;
+
+       edev->regmap_config.reg_bits = 32;
+       edev->regmap_config.val_bits = 8;
+       edev->regmap_config.reg_stride = 1;
+       edev->regmap_config.max_register = edev->size - 1;
+
+       regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev,
+                                 &edev->regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&spi->dev, "regmap init failed\n");
+               err = PTR_ERR(regmap);
+               goto fail;
        }
 
-       err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin);
-       if (err)
+       edev->nvmem_config.name = dev_name(&spi->dev);
+       edev->nvmem_config.dev = &spi->dev;
+       edev->nvmem_config.read_only = pd->flags & EE_READONLY;
+       edev->nvmem_config.root_only = true;
+       edev->nvmem_config.owner = THIS_MODULE;
+       edev->nvmem_config.compat = true;
+       edev->nvmem_config.base_dev = &spi->dev;
+
+       edev->nvmem = nvmem_register(&edev->nvmem_config);
+       if (IS_ERR(edev->nvmem)) {
+               err = PTR_ERR(edev->nvmem);
                goto fail;
+       }
 
        dev_info(&spi->dev, "%d-bit eeprom %s\n",
                (pd->flags & EE_ADDR8) ? 8 : 16,
@@ -359,10 +563,11 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
 {
        struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
 
+       nvmem_unregister(edev->nvmem);
+
        if (!(edev->pdata->flags & EE_READONLY))
                device_remove_file(&spi->dev, &dev_attr_erase);
 
-       sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
        kfree(edev);
        return 0;
 }
@@ -370,6 +575,7 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
 static struct spi_driver eeprom_93xx46_driver = {
        .driver = {
                .name   = "93xx46",
+               .of_match_table = of_match_ptr(eeprom_93xx46_of_table),
        },
        .probe          = eeprom_93xx46_probe,
        .remove         = eeprom_93xx46_remove,
index 6ab31eff0536a2a2572bb6e39a952931959c958d..c24c9b7c1dd374eed51254d647dce21fd2587ec8 100644 (file)
@@ -278,7 +278,7 @@ static umode_t genwqe_is_visible(struct kobject *kobj,
                                 struct attribute *attr, int n)
 {
        unsigned int j;
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct genwqe_dev *cd = dev_get_drvdata(dev);
        umode_t mode = attr->mode;
 
index 5bd127727d8ebc57d0d9a3b00bf81c9a38b79175..9fea49d2e15b738225634ef6289e6be7b59e3aee 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/kref.h>
 #include <linux/device.h>
 #include <linux/input.h>
+#include <linux/time64.h>
 
 /* Driver identification */
 #define DRIVER_NAME    "ibmasm"
@@ -53,9 +54,11 @@ extern int ibmasm_debug;
 
 static inline char *get_timestamp(char *buf)
 {
-       struct timeval now;
-       do_gettimeofday(&now);
-       sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec);
+       struct timespec64 now;
+
+       ktime_get_real_ts64(&now);
+       sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec,
+                               now.tv_nsec / NSEC_PER_USEC);
        return buf;
 }
 
index 0c3bb7e3ee80d45b4a122c4f4ad05fc1b9ff1ad3..14b7d539fed6882aa2d82c200afd9c4093a2e351 100644 (file)
@@ -209,7 +209,7 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int lis3lv02d_i2c_suspend(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
        if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -219,7 +219,7 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
 
 static int lis3lv02d_i2c_resume(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
        /*
@@ -238,7 +238,7 @@ static int lis3lv02d_i2c_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int lis3_i2c_runtime_suspend(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
        lis3lv02d_poweroff(lis3);
@@ -247,7 +247,7 @@ static int lis3_i2c_runtime_suspend(struct device *dev)
 
 static int lis3_i2c_runtime_resume(struct device *dev)
 {
-       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct i2c_client *client = to_i2c_client(dev);
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
        lis3lv02d_poweron(lis3);
index 2a6eaf1122b4e9b742eb3777fb4b6b317c07b201..5f1a36b8fbb082af1d0251504e19a33e1dc8c621 100644 (file)
@@ -92,6 +92,9 @@ enum ctype {
        CT_UNALIGNED_LOAD_STORE_WRITE,
        CT_OVERWRITE_ALLOCATION,
        CT_WRITE_AFTER_FREE,
+       CT_READ_AFTER_FREE,
+       CT_WRITE_BUDDY_AFTER_FREE,
+       CT_READ_BUDDY_AFTER_FREE,
        CT_SOFTLOCKUP,
        CT_HARDLOCKUP,
        CT_SPINLOCKUP,
@@ -105,6 +108,7 @@ enum ctype {
        CT_WRITE_RO,
        CT_WRITE_RO_AFTER_INIT,
        CT_WRITE_KERN,
+       CT_WRAP_ATOMIC
 };
 
 static char* cp_name[] = {
@@ -130,6 +134,9 @@ static char* cp_type[] = {
        "UNALIGNED_LOAD_STORE_WRITE",
        "OVERWRITE_ALLOCATION",
        "WRITE_AFTER_FREE",
+       "READ_AFTER_FREE",
+       "WRITE_BUDDY_AFTER_FREE",
+       "READ_BUDDY_AFTER_FREE",
        "SOFTLOCKUP",
        "HARDLOCKUP",
        "SPINLOCKUP",
@@ -143,6 +150,7 @@ static char* cp_type[] = {
        "WRITE_RO",
        "WRITE_RO_AFTER_INIT",
        "WRITE_KERN",
+       "WRAP_ATOMIC"
 };
 
 static struct jprobe lkdtm;
@@ -338,7 +346,7 @@ static noinline void corrupt_stack(void)
        memset((void *)data, 0, 64);
 }
 
-static void execute_location(void *dst)
+static void noinline execute_location(void *dst)
 {
        void (*func)(void) = dst;
 
@@ -412,12 +420,109 @@ static void lkdtm_do_action(enum ctype which)
                break;
        }
        case CT_WRITE_AFTER_FREE: {
+               int *base, *again;
                size_t len = 1024;
-               u32 *data = kmalloc(len, GFP_KERNEL);
+               /*
+                * The slub allocator uses the first word to store the free
+                * pointer in some configurations. Use the middle of the
+                * allocation to avoid running into the freelist
+                */
+               size_t offset = (len / sizeof(*base)) / 2;
+
+               base = kmalloc(len, GFP_KERNEL);
+               pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+               pr_info("Attempting bad write to freed memory at %p\n",
+                       &base[offset]);
+               kfree(base);
+               base[offset] = 0x0abcdef0;
+               /* Attempt to notice the overwrite. */
+               again = kmalloc(len, GFP_KERNEL);
+               kfree(again);
+               if (again != base)
+                       pr_info("Hmm, didn't get the same memory range.\n");
 
-               kfree(data);
+               break;
+       }
+       case CT_READ_AFTER_FREE: {
+               int *base, *val, saw;
+               size_t len = 1024;
+               /*
+                * The slub allocator uses the first word to store the free
+                * pointer in some configurations. Use the middle of the
+                * allocation to avoid running into the freelist
+                */
+               size_t offset = (len / sizeof(*base)) / 2;
+
+               base = kmalloc(len, GFP_KERNEL);
+               if (!base)
+                       break;
+
+               val = kmalloc(len, GFP_KERNEL);
+               if (!val)
+                       break;
+
+               *val = 0x12345678;
+               base[offset] = *val;
+               pr_info("Value in memory before free: %x\n", base[offset]);
+
+               kfree(base);
+
+               pr_info("Attempting bad read from freed memory\n");
+               saw = base[offset];
+               if (saw != *val) {
+                       /* Good! Poisoning happened, so declare a win. */
+                       pr_info("Memory correctly poisoned (%x)\n", saw);
+                       BUG();
+               }
+               pr_info("Memory was not poisoned\n");
+
+               kfree(val);
+               break;
+       }
+       case CT_WRITE_BUDDY_AFTER_FREE: {
+               unsigned long p = __get_free_page(GFP_KERNEL);
+               if (!p)
+                       break;
+               pr_info("Writing to the buddy page before free\n");
+               memset((void *)p, 0x3, PAGE_SIZE);
+               free_page(p);
                schedule();
-               memset(data, 0x78, len);
+               pr_info("Attempting bad write to the buddy page after free\n");
+               memset((void *)p, 0x78, PAGE_SIZE);
+               /* Attempt to notice the overwrite. */
+               p = __get_free_page(GFP_KERNEL);
+               free_page(p);
+               schedule();
+
+               break;
+       }
+       case CT_READ_BUDDY_AFTER_FREE: {
+               unsigned long p = __get_free_page(GFP_KERNEL);
+               int saw, *val = kmalloc(1024, GFP_KERNEL);
+               int *base;
+
+               if (!p)
+                       break;
+
+               if (!val)
+                       break;
+
+               base = (int *)p;
+
+               *val = 0x12345678;
+               base[0] = *val;
+               pr_info("Value in memory before free: %x\n", base[0]);
+               free_page(p);
+               pr_info("Attempting to read from freed memory\n");
+               saw = base[0];
+               if (saw != *val) {
+                       /* Good! Poisoning happened, so declare a win. */
+                       pr_info("Memory correctly poisoned (%x)\n", saw);
+                       BUG();
+               }
+               pr_info("Buddy page was not poisoned\n");
+
+               kfree(val);
                break;
        }
        case CT_SOFTLOCKUP:
@@ -548,6 +653,17 @@ static void lkdtm_do_action(enum ctype which)
                do_overwritten();
                break;
        }
+       case CT_WRAP_ATOMIC: {
+               atomic_t under = ATOMIC_INIT(INT_MIN);
+               atomic_t over = ATOMIC_INIT(INT_MAX);
+
+               pr_info("attempting atomic underflow\n");
+               atomic_dec(&under);
+               pr_info("attempting atomic overflow\n");
+               atomic_inc(&over);
+
+               return;
+       }
        case CT_NONE:
        default:
                break;
index d23384dde73b97e3abf7ee166c1a13ef3fb83249..c49e1d2269afe9874420ea20a1b7967ab75ab77f 100644 (file)
@@ -1,6 +1,6 @@
 config INTEL_MEI
        tristate "Intel Management Engine Interface"
-       depends on X86 && PCI && WATCHDOG_CORE
+       depends on X86 && PCI
        help
          The Intel Management Engine (Intel ME) provides Manageability,
          Security and Media services for system containing Intel chipsets.
@@ -12,7 +12,7 @@ config INTEL_MEI
 config INTEL_MEI_ME
        tristate "ME Enabled Intel Chipsets"
        select INTEL_MEI
-       depends on X86 && PCI && WATCHDOG_CORE
+       depends on X86 && PCI
        help
          MEI support for ME Enabled Intel chipsets.
 
@@ -37,7 +37,7 @@ config INTEL_MEI_ME
 config INTEL_MEI_TXE
        tristate "Intel Trusted Execution Environment with ME Interface"
        select INTEL_MEI
-       depends on X86 && PCI && WATCHDOG_CORE
+       depends on X86 && PCI
        help
          MEI Support for Trusted Execution Environment device on Intel SoCs
 
index 01447ca21c262db797bce8dd3b2b7eee6f73b1fa..59e6b0aede340752acbcfd14c9f9695334e280c2 100644 (file)
@@ -9,7 +9,6 @@ mei-objs += interrupt.o
 mei-objs += client.o
 mei-objs += main.o
 mei-objs += amthif.o
-mei-objs += wd.o
 mei-objs += bus.o
 mei-objs += bus-fixup.o
 mei-$(CONFIG_DEBUG_FS) += debugfs.o
index cd0403f0926761ec7253c53e399844198728a3ea..194360a5f7823331e1cff910edb31ea0e14514ba 100644 (file)
@@ -50,7 +50,6 @@ void mei_amthif_reset_params(struct mei_device *dev)
        dev->iamthif_current_cb = NULL;
        dev->iamthif_canceled = false;
        dev->iamthif_state = MEI_IAMTHIF_IDLE;
-       dev->iamthif_timer = 0;
        dev->iamthif_stall_timer = 0;
        dev->iamthif_open_count = 0;
 }
@@ -68,11 +67,14 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
        struct mei_cl *cl = &dev->iamthif_cl;
        int ret;
 
+       if (mei_cl_is_connected(cl))
+               return 0;
+
        dev->iamthif_state = MEI_IAMTHIF_IDLE;
 
        mei_cl_init(cl, dev);
 
-       ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+       ret = mei_cl_link(cl);
        if (ret < 0) {
                dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
                return ret;
@@ -80,31 +82,9 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
 
        ret = mei_cl_connect(cl, me_cl, NULL);
 
-       dev->iamthif_state = MEI_IAMTHIF_IDLE;
-
        return ret;
 }
 
-/**
- * mei_amthif_find_read_list_entry - finds a amthilist entry for current file
- *
- * @dev: the device structure
- * @file: pointer to file object
- *
- * Return:   returned a list entry on success, NULL on failure.
- */
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
-                                               struct file *file)
-{
-       struct mei_cl_cb *cb;
-
-       list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list)
-               if (cb->file_object == file)
-                       return cb;
-       return NULL;
-}
-
-
 /**
  * mei_amthif_read - read data from AMTHIF client
  *
@@ -126,18 +106,11 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
 {
        struct mei_cl *cl = file->private_data;
        struct mei_cl_cb *cb;
-       unsigned long timeout;
        int rets;
        int wait_ret;
 
-       /* Only possible if we are in timeout */
-       if (!cl) {
-               dev_err(dev->dev, "bad file ext.\n");
-               return -ETIME;
-       }
-
        dev_dbg(dev->dev, "checking amthif data\n");
-       cb = mei_amthif_find_read_list_entry(dev, file);
+       cb = mei_cl_read_cb(cl, file);
 
        /* Check for if we can block or not*/
        if (cb == NULL && file->f_flags & O_NONBLOCK)
@@ -149,8 +122,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                /* unlock the Mutex */
                mutex_unlock(&dev->device_lock);
 
-               wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
-                       (cb = mei_amthif_find_read_list_entry(dev, file)));
+               wait_ret = wait_event_interruptible(cl->rx_wait,
+                                       !list_empty(&cl->rd_completed) ||
+                                       !mei_cl_is_connected(cl));
 
                /* Locking again the Mutex */
                mutex_lock(&dev->device_lock);
@@ -158,7 +132,12 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                if (wait_ret)
                        return -ERESTARTSYS;
 
-               dev_dbg(dev->dev, "woke up from sleep\n");
+               if (!mei_cl_is_connected(cl)) {
+                       rets = -EBUSY;
+                       goto out;
+               }
+
+               cb = mei_cl_read_cb(cl, file);
        }
 
        if (cb->status) {
@@ -168,24 +147,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
        }
 
        dev_dbg(dev->dev, "Got amthif data\n");
-       dev->iamthif_timer = 0;
-
-       timeout = cb->read_time +
-               mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-       dev_dbg(dev->dev, "amthif timeout = %lud\n",
-                       timeout);
-
-       if  (time_after(jiffies, timeout)) {
-               dev_dbg(dev->dev, "amthif Time out\n");
-               /* 15 sec for the message has expired */
-               list_del_init(&cb->list);
-               rets = -ETIME;
-               goto free;
-       }
        /* if the whole message will fit remove it from the list */
        if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
                list_del_init(&cb->list);
-       else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
+       else if (cb->buf_idx <= *offset) {
                /* end of the message has been reached */
                list_del_init(&cb->list);
                rets = 0;
@@ -195,9 +160,8 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                 * remove message from deletion list
                 */
 
-       dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
-           cb->buf.size);
-       dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
+       dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n",
+               cb->buf.size, cb->buf_idx);
 
        /* length is being truncated to PAGE_SIZE, however,
         * the buf_idx may point beyond */
@@ -229,7 +193,7 @@ out:
  *
  * Return: 0 on success, <0 on failure.
  */
-static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
+static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
 {
        struct mei_device *dev = cl->dev;
        struct mei_cl_cb *cb;
@@ -248,7 +212,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
        list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
 
        dev->iamthif_state = MEI_IAMTHIF_READING;
-       dev->iamthif_file_object = cb->file_object;
+       dev->iamthif_fp = cb->fp;
        dev->iamthif_current_cb = cb;
 
        return 0;
@@ -277,7 +241,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
 
        dev->iamthif_state = MEI_IAMTHIF_WRITING;
        dev->iamthif_current_cb = cb;
-       dev->iamthif_file_object = cb->file_object;
+       dev->iamthif_fp = cb->fp;
        dev->iamthif_canceled = false;
 
        ret = mei_cl_write(cl, cb, false);
@@ -285,7 +249,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
                return ret;
 
        if (cb->completed)
-               cb->status = mei_amthif_read_start(cl, cb->file_object);
+               cb->status = mei_amthif_read_start(cl, cb->fp);
 
        return 0;
 }
@@ -304,8 +268,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
 
        dev->iamthif_canceled = false;
        dev->iamthif_state = MEI_IAMTHIF_IDLE;
-       dev->iamthif_timer = 0;
-       dev->iamthif_file_object = NULL;
+       dev->iamthif_fp = NULL;
 
        dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
 
@@ -329,17 +292,17 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
 int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
 
-       struct mei_device *dev;
-
-       if (WARN_ON(!cl || !cl->dev))
-               return -ENODEV;
+       struct mei_device *dev = cl->dev;
 
-       if (WARN_ON(!cb))
-               return -EINVAL;
+       list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
 
-       dev = cl->dev;
+       /*
+        * The previous request is still in processing, queue this one.
+        */
+       if (dev->iamthif_state > MEI_IAMTHIF_IDLE &&
+           dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE)
+               return 0;
 
-       list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
        return mei_amthif_run_next_cmd(dev);
 }
 
@@ -360,10 +323,10 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
 {
        unsigned int mask = 0;
 
-       poll_wait(file, &dev->iamthif_cl.wait, wait);
+       poll_wait(file, &dev->iamthif_cl.rx_wait, wait);
 
        if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
-           dev->iamthif_file_object == file) {
+           dev->iamthif_fp == file) {
 
                mask |= POLLIN | POLLRDNORM;
                mei_amthif_run_next_cmd(dev);
@@ -393,7 +356,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
                return ret;
 
        if (cb->completed)
-               cb->status = mei_amthif_read_start(cl, cb->file_object);
+               cb->status = mei_amthif_read_start(cl, cb->fp);
 
        return 0;
 }
@@ -437,11 +400,12 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
 /**
  * mei_amthif_complete - complete amthif callback.
  *
- * @dev: the device structure.
+ * @cl: host client
  * @cb: callback block.
  */
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
+       struct mei_device *dev = cl->dev;
 
        if (cb->fop_type == MEI_FOP_WRITE) {
                if (!cb->status) {
@@ -453,25 +417,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
                 * in case of error enqueue the write cb to complete read list
                 * so it can be propagated to the reader
                 */
-               list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
-               wake_up_interruptible(&dev->iamthif_cl.wait);
+               list_add_tail(&cb->list, &cl->rd_completed);
+               wake_up_interruptible(&cl->rx_wait);
                return;
        }
 
        if (!dev->iamthif_canceled) {
                dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
                dev->iamthif_stall_timer = 0;
-               list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+               list_add_tail(&cb->list, &cl->rd_completed);
                dev_dbg(dev->dev, "amthif read completed\n");
-               dev->iamthif_timer = jiffies;
-               dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
-                       dev->iamthif_timer);
        } else {
                mei_amthif_run_next_cmd(dev);
        }
 
        dev_dbg(dev->dev, "completing amthif call back.\n");
-       wake_up_interruptible(&dev->iamthif_cl.wait);
+       wake_up_interruptible(&cl->rx_wait);
 }
 
 /**
@@ -497,7 +458,7 @@ static bool mei_clear_list(struct mei_device *dev,
        /* list all list member */
        list_for_each_entry_safe(cb, next, mei_cb_list, list) {
                /* check if list member associated with a file */
-               if (file == cb->file_object) {
+               if (file == cb->fp) {
                        /* check if cb equal to current iamthif cb */
                        if (dev->iamthif_current_cb == cb) {
                                dev->iamthif_current_cb = NULL;
@@ -523,13 +484,14 @@ static bool mei_clear_list(struct mei_device *dev,
  *
  * Return: true if callback removed from the list, false otherwise
  */
-static bool mei_clear_lists(struct mei_device *dev, struct file *file)
+static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
 {
        bool removed = false;
+       struct mei_cl *cl = &dev->iamthif_cl;
 
        /* remove callbacks associated with a file */
        mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
-       if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list))
+       if (mei_clear_list(dev, file, &cl->rd_completed))
                removed = true;
 
        mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
@@ -546,7 +508,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file)
        /* check if iamthif_current_cb not NULL */
        if (dev->iamthif_current_cb && !removed) {
                /* check file and iamthif current cb association */
-               if (dev->iamthif_current_cb->file_object == file) {
+               if (dev->iamthif_current_cb->fp == file) {
                        /* remove cb */
                        mei_io_cb_free(dev->iamthif_current_cb);
                        dev->iamthif_current_cb = NULL;
@@ -569,7 +531,7 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
        if (dev->iamthif_open_count > 0)
                dev->iamthif_open_count--;
 
-       if (dev->iamthif_file_object == file &&
+       if (dev->iamthif_fp == file &&
            dev->iamthif_state != MEI_IAMTHIF_IDLE) {
 
                dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
index 020de5919c2111a0711e69e79d64f85bdbe1de0e..e9e6ea3ab73cf3500657d8c6001cb4eff5712568 100644 (file)
@@ -35,6 +35,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
 #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
                        0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
 
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+                           0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
 #define MEI_UUID_ANY NULL_UUID_LE
 
 /**
@@ -48,8 +51,7 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
  */
 static void number_of_connections(struct mei_cl_device *cldev)
 {
-       dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
-                       __func__, mei_me_cl_uuid(cldev->me_cl));
+       dev_dbg(&cldev->dev, "running hook %s\n", __func__);
 
        if (cldev->me_cl->props.max_number_of_connections > 1)
                cldev->do_match = 0;
@@ -62,11 +64,36 @@ static void number_of_connections(struct mei_cl_device *cldev)
  */
 static void blacklist(struct mei_cl_device *cldev)
 {
-       dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
-                       __func__, mei_me_cl_uuid(cldev->me_cl));
+       dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
        cldev->do_match = 0;
 }
 
+/**
+ * mei_wd - wd client on the bus, change protocol version
+ *   as the API has changed.
+ *
+ * @cldev: me clients device
+ */
+#if IS_ENABLED(CONFIG_INTEL_MEI_ME)
+#include <linux/pci.h>
+#include "hw-me-regs.h"
+static void mei_wd(struct mei_cl_device *cldev)
+{
+       struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
+
+       dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+       if (pdev->device == MEI_DEV_ID_WPT_LP ||
+           pdev->device == MEI_DEV_ID_SPT ||
+           pdev->device == MEI_DEV_ID_SPT_H)
+               cldev->me_cl->props.protocol_version = 0x2;
+
+       cldev->do_match = 1;
+}
+#else
+static inline void mei_wd(struct mei_cl_device *cldev) {}
+#endif /* CONFIG_INTEL_MEI_ME */
+
 struct mei_nfc_cmd {
        u8 command;
        u8 status;
@@ -208,12 +235,11 @@ static void mei_nfc(struct mei_cl_device *cldev)
 
        bus = cldev->bus;
 
-       dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
-               __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+       dev_dbg(&cldev->dev, "running hook %s\n", __func__);
 
        mutex_lock(&bus->device_lock);
        /* we need to connect to INFO GUID */
-       cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+       cl = mei_cl_alloc_linked(bus);
        if (IS_ERR(cl)) {
                ret = PTR_ERR(cl);
                cl = NULL;
@@ -282,6 +308,7 @@ static struct mei_fixup {
        MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
        MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
        MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+       MEI_FIXUP(MEI_UUID_WD, mei_wd),
 };
 
 /**
index 0b05aa9387996484759e748831eba5304ac3a409..5d5996e39a67a724f5201c7a92e2601b41f2ff8b 100644 (file)
@@ -44,7 +44,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
                        bool blocking)
 {
        struct mei_device *bus;
-       struct mei_cl_cb *cb = NULL;
+       struct mei_cl_cb *cb;
        ssize_t rets;
 
        if (WARN_ON(!cl || !cl->dev))
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
        bus = cl->dev;
 
        mutex_lock(&bus->device_lock);
+       if (bus->dev_state != MEI_DEV_ENABLED) {
+               rets = -ENODEV;
+               goto out;
+       }
+
        if (!mei_cl_is_connected(cl)) {
                rets = -ENODEV;
                goto out;
@@ -81,8 +86,6 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 
 out:
        mutex_unlock(&bus->device_lock);
-       if (rets < 0)
-               mei_io_cb_free(cb);
 
        return rets;
 }
@@ -109,6 +112,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
        bus = cl->dev;
 
        mutex_lock(&bus->device_lock);
+       if (bus->dev_state != MEI_DEV_ENABLED) {
+               rets = -ENODEV;
+               goto out;
+       }
 
        cb = mei_cl_read_cb(cl, NULL);
        if (cb)
@@ -230,45 +237,55 @@ static void mei_cl_bus_event_work(struct work_struct *work)
  * mei_cl_bus_notify_event - schedule notify cb on bus client
  *
  * @cl: host client
+ *
+ * Return: true if event was scheduled
+ *         false if the client is not waiting for event
  */
-void mei_cl_bus_notify_event(struct mei_cl *cl)
+bool mei_cl_bus_notify_event(struct mei_cl *cl)
 {
        struct mei_cl_device *cldev = cl->cldev;
 
        if (!cldev || !cldev->event_cb)
-               return;
+               return false;
 
        if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
-               return;
+               return false;
 
        if (!cl->notify_ev)
-               return;
+               return false;
 
        set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
 
        schedule_work(&cldev->event_work);
 
        cl->notify_ev = false;
+
+       return true;
 }
 
 /**
- * mei_cl_bus_rx_event  - schedule rx evenet
+ * mei_cl_bus_rx_event  - schedule rx event
  *
  * @cl: host client
+ *
+ * Return: true if event was scheduled
+ *         false if the client is not waiting for event
  */
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+bool mei_cl_bus_rx_event(struct mei_cl *cl)
 {
        struct mei_cl_device *cldev = cl->cldev;
 
        if (!cldev || !cldev->event_cb)
-               return;
+               return false;
 
        if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
-               return;
+               return false;
 
        set_bit(MEI_CL_EVENT_RX, &cldev->events);
 
        schedule_work(&cldev->event_work);
+
+       return true;
 }
 
 /**
@@ -398,7 +415,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
        if (!cl) {
                mutex_lock(&bus->device_lock);
-               cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+               cl = mei_cl_alloc_linked(bus);
                mutex_unlock(&bus->device_lock);
                if (IS_ERR(cl))
                        return PTR_ERR(cl);
@@ -958,6 +975,22 @@ void mei_cl_bus_rescan(struct mei_device *bus)
        dev_dbg(bus->dev, "rescan end");
 }
 
+void mei_cl_bus_rescan_work(struct work_struct *work)
+{
+       struct mei_device *bus =
+               container_of(work, struct mei_device, bus_rescan_work);
+       struct mei_me_client *me_cl;
+
+       mutex_lock(&bus->device_lock);
+       me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
+       if (me_cl)
+               mei_amthif_host_init(bus, me_cl);
+       mei_me_cl_put(me_cl);
+       mutex_unlock(&bus->device_lock);
+
+       mei_cl_bus_rescan(bus);
+}
+
 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
                                struct module *owner)
 {
index a6c87c713193808365071a8e0ad34fd17cea0eab..bab17e4197b68b4763bf05a92d14ffffbaf70f4a 100644 (file)
@@ -359,7 +359,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
  * Return: mei_cl_cb pointer or NULL;
  */
 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
-                                struct file *fp)
+                                const struct file *fp)
 {
        struct mei_cl_cb *cb;
 
@@ -368,7 +368,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
                return NULL;
 
        INIT_LIST_HEAD(&cb->list);
-       cb->file_object = fp;
+       cb->fp = fp;
        cb->cl = cl;
        cb->buf_idx = 0;
        cb->fop_type = type;
@@ -455,7 +455,8 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
  * Return: cb on success and NULL on failure
  */
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
-                                 enum mei_cb_file_ops type, struct file *fp)
+                                 enum mei_cb_file_ops type,
+                                 const struct file *fp)
 {
        struct mei_cl_cb *cb;
 
@@ -485,7 +486,7 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
        struct mei_cl_cb *cb;
 
        list_for_each_entry(cb, &cl->rd_completed, list)
-               if (!fp || fp == cb->file_object)
+               if (!fp || fp == cb->fp)
                        return cb;
 
        return NULL;
@@ -503,12 +504,12 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
        struct mei_cl_cb *cb, *next;
 
        list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
-               if (!fp || fp == cb->file_object)
+               if (!fp || fp == cb->fp)
                        mei_io_cb_free(cb);
 
 
        list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
-               if (!fp || fp == cb->file_object)
+               if (!fp || fp == cb->fp)
                        mei_io_cb_free(cb);
 }
 
@@ -535,7 +536,6 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
        mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
        mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
        mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-       mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
 
        mei_cl_read_cb_flush(cl, fp);
 
@@ -587,27 +587,23 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
  * mei_cl_link - allocate host id in the host map
  *
  * @cl: host client
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  *
  * Return: 0 on success
  *     -EINVAL on incorrect values
  *     -EMFILE if open count exceeded.
  */
-int mei_cl_link(struct mei_cl *cl, int id)
+int mei_cl_link(struct mei_cl *cl)
 {
        struct mei_device *dev;
        long open_handle_count;
+       int id;
 
        if (WARN_ON(!cl || !cl->dev))
                return -EINVAL;
 
        dev = cl->dev;
 
-       /* If Id is not assigned get one*/
-       if (id == MEI_HOST_CLIENT_ID_ANY)
-               id = find_first_zero_bit(dev->host_clients_map,
-                                       MEI_CLIENTS_MAX);
-
+       id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
        if (id >= MEI_CLIENTS_MAX) {
                dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
                return -EMFILE;
@@ -648,7 +644,7 @@ int mei_cl_unlink(struct mei_cl *cl)
        if (!cl)
                return 0;
 
-       /* wd and amthif might not be initialized */
+       /* amthif might not be initialized */
        if (!cl->dev)
                return 0;
 
@@ -670,31 +666,12 @@ int mei_cl_unlink(struct mei_cl *cl)
        return 0;
 }
 
-
-void mei_host_client_init(struct work_struct *work)
+void mei_host_client_init(struct mei_device *dev)
 {
-       struct mei_device *dev =
-               container_of(work, struct mei_device, init_work);
-       struct mei_me_client *me_cl;
-
-       mutex_lock(&dev->device_lock);
-
-
-       me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
-       if (me_cl)
-               mei_amthif_host_init(dev, me_cl);
-       mei_me_cl_put(me_cl);
-
-       me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
-       if (me_cl)
-               mei_wd_host_init(dev, me_cl);
-       mei_me_cl_put(me_cl);
-
        dev->dev_state = MEI_DEV_ENABLED;
        dev->reset_count = 0;
-       mutex_unlock(&dev->device_lock);
 
-       mei_cl_bus_rescan(dev);
+       schedule_work(&dev->bus_rescan_work);
 
        pm_runtime_mark_last_busy(dev->dev);
        dev_dbg(dev->dev, "rpm: autosuspend\n");
@@ -725,6 +702,33 @@ bool mei_hbuf_acquire(struct mei_device *dev)
        return true;
 }
 
+/**
+ * mei_cl_wake_all - wake up readers, writers and event waiters so
+ *                 they can be interrupted
+ *
+ * @cl: host client
+ */
+static void mei_cl_wake_all(struct mei_cl *cl)
+{
+       struct mei_device *dev = cl->dev;
+
+       /* synchronized under device mutex */
+       if (waitqueue_active(&cl->rx_wait)) {
+               cl_dbg(dev, cl, "Waking up reading client!\n");
+               wake_up_interruptible(&cl->rx_wait);
+       }
+       /* synchronized under device mutex */
+       if (waitqueue_active(&cl->tx_wait)) {
+               cl_dbg(dev, cl, "Waking up writing client!\n");
+               wake_up_interruptible(&cl->tx_wait);
+       }
+       /* synchronized under device mutex */
+       if (waitqueue_active(&cl->ev_wait)) {
+               cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+               wake_up_interruptible(&cl->ev_wait);
+       }
+}
+
 /**
  * mei_cl_set_disconnected - set disconnected state and clear
  *   associated states and resources
@@ -740,8 +744,11 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
                return;
 
        cl->state = MEI_FILE_DISCONNECTED;
+       mei_io_list_free(&dev->write_list, cl);
+       mei_io_list_free(&dev->write_waiting_list, cl);
        mei_io_list_flush(&dev->ctrl_rd_list, cl);
        mei_io_list_flush(&dev->ctrl_wr_list, cl);
+       mei_cl_wake_all(cl);
        cl->mei_flow_ctrl_creds = 0;
        cl->timer_count = 0;
 
@@ -1034,7 +1041,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
  * Return: 0 on success, <0 on failure.
  */
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
-                  struct file *file)
+                 const struct file *file)
 {
        struct mei_device *dev;
        struct mei_cl_cb *cb;
@@ -1119,11 +1126,10 @@ nortpm:
  * mei_cl_alloc_linked - allocate and link host client
  *
  * @dev: the device structure
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  *
  * Return: cl on success ERR_PTR on failure
  */
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
 {
        struct mei_cl *cl;
        int ret;
@@ -1134,7 +1140,7 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
                goto err;
        }
 
-       ret = mei_cl_link(cl, id);
+       ret = mei_cl_link(cl);
        if (ret)
                goto err;
 
@@ -1149,11 +1155,12 @@ err:
 /**
  * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
  *
- * @cl: private data of the file object
+ * @cl: host client
+ * @fp: the file pointer associated with the pointer
  *
  * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
  */
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp)
 {
        int rets;
 
@@ -1164,7 +1171,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
                return 1;
 
        if (mei_cl_is_fixed_address(cl)) {
-               rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+               rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp);
                if (rets && rets != -EBUSY)
                        return rets;
                return 1;
@@ -1186,7 +1193,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
  *     0 on success
  *     -EINVAL when ctrl credits are <= 0
  */
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
 {
        if (WARN_ON(!cl || !cl->me_cl))
                return -EINVAL;
@@ -1283,7 +1290,8 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
  *
  * Return: 0 on such and error otherwise.
  */
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+int mei_cl_notify_request(struct mei_cl *cl,
+                         const struct file *file, u8 request)
 {
        struct mei_device *dev;
        struct mei_cl_cb *cb;
@@ -1368,12 +1376,12 @@ void mei_cl_notify(struct mei_cl *cl)
 
        cl_dbg(dev, cl, "notify event");
        cl->notify_ev = true;
-       wake_up_interruptible_all(&cl->ev_wait);
+       if (!mei_cl_bus_notify_event(cl))
+               wake_up_interruptible(&cl->ev_wait);
 
        if (cl->ev_async)
                kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
 
-       mei_cl_bus_notify_event(cl);
 }
 
 /**
@@ -1421,6 +1429,25 @@ out:
        return 0;
 }
 
+/**
+ * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control
+ *                        for given host client
+ *
+ * @cl: host client
+ *
+ * Return: true, if found at least one cb.
+ */
+static bool mei_cl_is_read_fc_cb(struct mei_cl *cl)
+{
+       struct mei_device *dev = cl->dev;
+       struct mei_cl_cb *cb;
+
+       list_for_each_entry(cb, &dev->ctrl_wr_list.list, list)
+               if (cb->fop_type == MEI_FOP_READ && cb->cl == cl)
+                       return true;
+       return false;
+}
+
 /**
  * mei_cl_read_start - the start read client message function.
  *
@@ -1430,7 +1457,7 @@ out:
  *
  * Return: 0 on success, <0 on failure.
  */
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
 {
        struct mei_device *dev;
        struct mei_cl_cb *cb;
@@ -1445,7 +1472,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
                return -ENODEV;
 
        /* HW currently supports only one pending read */
-       if (!list_empty(&cl->rd_pending))
+       if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl))
                return -EBUSY;
 
        if (!mei_me_cl_is_active(cl->me_cl)) {
@@ -1524,7 +1551,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 
        first_chunk = cb->buf_idx == 0;
 
-       rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
+       rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1;
        if (rets < 0)
                return rets;
 
@@ -1556,7 +1583,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
                return 0;
        }
 
-       cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
+       cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
                        cb->buf.size, cb->buf_idx);
 
        rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
@@ -1618,7 +1645,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
        if (rets < 0 && rets != -EINPROGRESS) {
                pm_runtime_put_noidle(dev->dev);
                cl_err(dev, cl, "rpm: get failed %d\n", rets);
-               return rets;
+               goto free;
        }
 
        cb->buf_idx = 0;
@@ -1630,7 +1657,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
        mei_hdr.msg_complete = 0;
        mei_hdr.internal = cb->internal;
 
-       rets = mei_cl_flow_ctrl_creds(cl);
+       rets = mei_cl_flow_ctrl_creds(cl, cb->fp);
        if (rets < 0)
                goto err;
 
@@ -1677,7 +1704,8 @@ out:
 
                mutex_unlock(&dev->device_lock);
                rets = wait_event_interruptible(cl->tx_wait,
-                               cl->writing_state == MEI_WRITE_COMPLETE);
+                               cl->writing_state == MEI_WRITE_COMPLETE ||
+                               (!mei_cl_is_connected(cl)));
                mutex_lock(&dev->device_lock);
                /* wait_event_interruptible returns -ERESTARTSYS */
                if (rets) {
@@ -1685,6 +1713,10 @@ out:
                                rets = -EINTR;
                        goto err;
                }
+               if (cl->writing_state != MEI_WRITE_COMPLETE) {
+                       rets = -EFAULT;
+                       goto err;
+               }
        }
 
        rets = size;
@@ -1692,6 +1724,8 @@ err:
        cl_dbg(dev, cl, "rpm: autosuspend\n");
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
+free:
+       mei_io_cb_free(cb);
 
        return rets;
 }
@@ -1721,10 +1755,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
 
        case MEI_FOP_READ:
                list_add_tail(&cb->list, &cl->rd_completed);
-               if (waitqueue_active(&cl->rx_wait))
-                       wake_up_interruptible_all(&cl->rx_wait);
-               else
-                       mei_cl_bus_rx_event(cl);
+               if (!mei_cl_bus_rx_event(cl))
+                       wake_up_interruptible(&cl->rx_wait);
                break;
 
        case MEI_FOP_CONNECT:
@@ -1753,44 +1785,3 @@ void mei_cl_all_disconnect(struct mei_device *dev)
        list_for_each_entry(cl, &dev->file_list, link)
                mei_cl_set_disconnected(cl);
 }
-
-
-/**
- * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
- *
- * @dev: mei device
- */
-void mei_cl_all_wakeup(struct mei_device *dev)
-{
-       struct mei_cl *cl;
-
-       list_for_each_entry(cl, &dev->file_list, link) {
-               if (waitqueue_active(&cl->rx_wait)) {
-                       cl_dbg(dev, cl, "Waking up reading client!\n");
-                       wake_up_interruptible(&cl->rx_wait);
-               }
-               if (waitqueue_active(&cl->tx_wait)) {
-                       cl_dbg(dev, cl, "Waking up writing client!\n");
-                       wake_up_interruptible(&cl->tx_wait);
-               }
-
-               /* synchronized under device mutex */
-               if (waitqueue_active(&cl->ev_wait)) {
-                       cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
-                       wake_up_interruptible(&cl->ev_wait);
-               }
-       }
-}
-
-/**
- * mei_cl_all_write_clear - clear all pending writes
- *
- * @dev: mei device
- */
-void mei_cl_all_write_clear(struct mei_device *dev)
-{
-       mei_io_list_free(&dev->write_list, NULL);
-       mei_io_list_free(&dev->write_waiting_list, NULL);
-}
-
-
index 04e1aa39243f79e69a76f1ef689bc5f34cb5c435..0d7a3a1fef7891ca19eb4d2bfb98d54abede69cc 100644 (file)
@@ -18,7 +18,6 @@
 #define _MEI_CLIENT_H_
 
 #include <linux/types.h>
-#include <linux/watchdog.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
 
@@ -84,7 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
  * MEI IO Functions
  */
 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
-                                struct file *fp);
+                                const struct file *fp);
 void mei_io_cb_free(struct mei_cl_cb *priv_cb);
 int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
 
@@ -108,21 +107,19 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev);
 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
 
 
-int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_link(struct mei_cl *cl);
 int mei_cl_unlink(struct mei_cl *cl);
 
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
 
 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
                                 const struct file *fp);
 void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
-                                 enum mei_cb_file_ops type, struct file *fp);
+                                 enum mei_cb_file_ops type,
+                                 const struct file *fp);
 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
 
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
-
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
 /*
  *  MEI input output function prototype
  */
@@ -217,10 +214,10 @@ void mei_cl_set_disconnected(struct mei_cl *cl);
 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
                          struct mei_cl_cb *cmpl_list);
 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
-                  struct file *file);
+                  const struct file *file);
 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
                              struct mei_cl_cb *cmpl_list);
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
 int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
                        struct mei_cl_cb *cmpl_list);
 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
@@ -229,19 +226,18 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 
 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 
-void mei_host_client_init(struct work_struct *work);
+void mei_host_client_init(struct mei_device *dev);
 
 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_notify_request(struct mei_cl *cl,
+                         const struct file *file, u8 request);
 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
                      struct mei_cl_cb *cmpl_list);
 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
 void mei_cl_notify(struct mei_cl *cl);
 
 void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_wakeup(struct mei_device *dev);
-void mei_cl_all_write_clear(struct mei_device *dev);
 
 #define MEI_CL_FMT "cl:host=%02d me=%02d "
 #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
@@ -249,6 +245,9 @@ void mei_cl_all_write_clear(struct mei_device *dev);
 #define cl_dbg(dev, cl, format, arg...) \
        dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
 
+#define cl_warn(dev, cl, format, arg...) \
+       dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
 #define cl_err(dev, cl, format, arg...) \
        dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
 
index a138d8a27ab598445bf4eed84ea3c4f98716cc02..c6c051b52f55faf1b5896164a4e5e3e251f86899 100644 (file)
@@ -50,6 +50,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
        }
 
        pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
 
        /*  if the driver is not enabled the list won't be consistent */
        if (dev->dev_state != MEI_DEV_ENABLED)
@@ -90,23 +91,37 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
 {
        struct mei_device *dev = fp->private_data;
        struct mei_cl *cl;
-       const size_t bufsz = 1024;
+       size_t bufsz = 1;
        char *buf;
        int i = 0;
        int pos = 0;
        int ret;
 
+#define HDR "   |me|host|state|rd|wr|\n"
+
        if (!dev)
                return -ENODEV;
 
+       mutex_lock(&dev->device_lock);
+
+       /*
+        * if the driver is not enabled the list won't be consistent,
+        * we output empty table
+        */
+       if (dev->dev_state == MEI_DEV_ENABLED)
+               list_for_each_entry(cl, &dev->file_list, link)
+                       bufsz++;
+
+       bufsz *= sizeof(HDR) + 1;
+
        buf = kzalloc(bufsz, GFP_KERNEL);
-       if  (!buf)
+       if  (!buf) {
+               mutex_unlock(&dev->device_lock);
                return -ENOMEM;
+       }
 
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "  |me|host|state|rd|wr|\n");
-
-       mutex_lock(&dev->device_lock);
+       pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
 
        /*  if the driver is not enabled the list won't be consistent */
        if (dev->dev_state != MEI_DEV_ENABLED)
@@ -115,7 +130,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
        list_for_each_entry(cl, &dev->file_list, link) {
 
                pos += scnprintf(buf + pos, bufsz - pos,
-                       "%2d|%2d|%4d|%5d|%2d|%2d|\n",
+                       "%3d|%2d|%4d|%5d|%2d|%2d|\n",
                        i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
                        !list_empty(&cl->rd_completed), cl->writing_state);
                i++;
@@ -150,16 +165,21 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
        pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
                        mei_hbm_state_str(dev->hbm_state));
 
-       if (dev->hbm_state == MEI_HBM_STARTED) {
+       if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
+           dev->hbm_state <= MEI_HBM_STARTED) {
                pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
                pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
                                 dev->hbm_f_pg_supported);
                pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
                                 dev->hbm_f_dc_supported);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n",
+                                dev->hbm_f_ie_supported);
                pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
                                 dev->hbm_f_dot_supported);
                pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
                                 dev->hbm_f_ev_supported);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
+                                dev->hbm_f_fa_supported);
        }
 
        pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",
@@ -175,6 +195,30 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
        .llseek = generic_file_llseek,
 };
 
+static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct mei_device *dev;
+       int ret;
+
+       dev = container_of(file->private_data,
+                          struct mei_device, allow_fixed_address);
+
+       ret = debugfs_write_file_bool(file, user_buf, count, ppos);
+       if (ret < 0)
+               return ret;
+       dev->override_fixed_address = true;
+       return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_allow_fa = {
+       .open = simple_open,
+       .read = debugfs_read_file_bool,
+       .write = mei_dbgfs_write_allow_fa,
+       .llseek = generic_file_llseek,
+};
+
 /**
  * mei_dbgfs_deregister - Remove the debugfs files and directories
  *
@@ -224,8 +268,9 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
                dev_err(dev->dev, "devstate: registration failed\n");
                goto err;
        }
-       f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
-                               &dev->allow_fixed_address);
+       f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+                               &dev->allow_fixed_address,
+                               &mei_dbgfs_fops_allow_fa);
        if (!f) {
                dev_err(dev->dev, "allow_fixed_address: registration failed\n");
                goto err;
index e7b7aad0999bc665e01da3c94b75ee91caa86c31..5e305d2605f30079bcb6f7193b28c812683ee92f 100644 (file)
@@ -301,7 +301,10 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
        enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
        memset(enum_req, 0, len);
        enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-       enum_req->allow_add = dev->hbm_f_dc_supported;
+       enum_req->flags |= dev->hbm_f_dc_supported ?
+                          MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+       enum_req->flags |= dev->hbm_f_ie_supported ?
+                          MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
 
        ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
        if (ret) {
@@ -401,6 +404,9 @@ static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
        if (ret)
                status = !MEI_HBMS_SUCCESS;
 
+       if (dev->dev_state == MEI_DEV_ENABLED)
+               schedule_work(&dev->bus_rescan_work);
+
        return mei_hbm_add_cl_resp(dev, req->me_addr, status);
 }
 
@@ -543,7 +549,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
        /* We got all client properties */
        if (next_client_index == MEI_CLIENTS_MAX) {
                dev->hbm_state = MEI_HBM_STARTED;
-               schedule_work(&dev->init_work);
+               mei_host_client_init(dev);
 
                return 0;
        }
@@ -789,8 +795,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
                cl->state = MEI_FILE_CONNECTED;
        else {
                cl->state = MEI_FILE_DISCONNECT_REPLY;
-               if (rs->status == MEI_CL_CONN_NOT_FOUND)
+               if (rs->status == MEI_CL_CONN_NOT_FOUND) {
                        mei_me_cl_del(dev, cl->me_cl);
+                       if (dev->dev_state == MEI_DEV_ENABLED)
+                               schedule_work(&dev->bus_rescan_work);
+               }
        }
        cl->status = mei_cl_conn_status_to_errno(rs->status);
 }
@@ -866,7 +875,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
 
        cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
        if (cl) {
-               cl_dbg(dev, cl, "fw disconnect request received\n");
+               cl_warn(dev, cl, "fw disconnect request received\n");
                cl->state = MEI_FILE_DISCONNECTING;
                cl->timer_count = 0;
 
@@ -972,6 +981,9 @@ static void mei_hbm_config_features(struct mei_device *dev)
        if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
                dev->hbm_f_dc_supported = 1;
 
+       if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
+               dev->hbm_f_ie_supported = 1;
+
        /* disconnect on connect timeout instead of link reset */
        if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
                dev->hbm_f_dot_supported = 1;
@@ -979,6 +991,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
        /* Notification Event Support */
        if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
                dev->hbm_f_ev_supported = 1;
+
+       /* Fixed Address Client Support */
+       if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
+               dev->hbm_f_fa_supported = 1;
 }
 
 /**
index a8a68acd326752980e8b31a61c8b0b9a891018c8..0dcb854b4bfcdd38f80bdce7b5302e71d5101a11 100644 (file)
 #define MEI_DEV_ID_SPT_2      0x9D3B  /* Sunrise Point 2 */
 #define MEI_DEV_ID_SPT_H      0xA13A  /* Sunrise Point H */
 #define MEI_DEV_ID_SPT_H_2    0xA13B  /* Sunrise Point H 2 */
+
+#define MEI_DEV_ID_BXT_M      0x1A9A  /* Broxton M */
+#define MEI_DEV_ID_APL_I      0x5A9A  /* Apollo Lake I */
+
 /*
  * MEI HW Section
  */
index 25b1997a62cbc38626ffb3b0166efe649aed9e08..e2fb44cc5c37668765e31f6e2b279cb3ee249229 100644 (file)
@@ -189,8 +189,11 @@ static int mei_me_fw_status(struct mei_device *dev,
 
        fw_status->count = fw_src->count;
        for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
-               ret = pci_read_config_dword(pdev,
-                       fw_src->status[i], &fw_status->status[i]);
+               ret = pci_read_config_dword(pdev, fw_src->status[i],
+                                           &fw_status->status[i]);
+               trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+                                      fw_src->status[i],
+                                      fw_status->status[i]);
                if (ret)
                        return ret;
        }
@@ -215,6 +218,7 @@ static void mei_me_hw_config(struct mei_device *dev)
 
        reg = 0;
        pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+       trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
        hw->d0i3_supported =
                ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
 
@@ -1248,6 +1252,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
        u32 reg;
 
        pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+       trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
        /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
        return (reg & 0x600) == 0x200;
 }
@@ -1260,6 +1265,7 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
        u32 reg;
        /* Read ME FW Status check for SPS Firmware */
        pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+       trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
        /* if bits [19:16] = 15, running SPS Firmware */
        return (reg & 0xf0000) == 0xf0000;
 }
index bae680c648ffc9dcd188eefac028028f4f0a2d2f..4a6c1b85f11e71d5cb09e0568527a0b4d494cbed 100644 (file)
@@ -28,6 +28,9 @@
 #include "client.h"
 #include "hbm.h"
 
+#include "mei-trace.h"
+
+
 /**
  * mei_txe_reg_read - Reads 32bit data from the txe device
  *
@@ -640,8 +643,11 @@ static int mei_txe_fw_status(struct mei_device *dev,
 
        fw_status->count = fw_src->count;
        for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
-               ret = pci_read_config_dword(pdev,
-                       fw_src->status[i], &fw_status->status[i]);
+               ret = pci_read_config_dword(pdev, fw_src->status[i],
+                                           &fw_status->status[i]);
+               trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+                                      fw_src->status[i],
+                                      fw_status->status[i]);
                if (ret)
                        return ret;
        }
index 4cebde85924f8f33085b45e27b7df8deae17ada5..9daf3f9aed2508e08070cb3ef96a53fc43f8d5f9 100644 (file)
@@ -29,7 +29,6 @@
 #define MEI_CLIENTS_INIT_TIMEOUT   15  /* HPS: Clients Enumeration Timeout */
 
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
-#define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 
 #define MEI_PGI_TIMEOUT             1  /* PG Isolation time response 1 sec */
 #define MEI_D0I3_TIMEOUT            5  /* D0i3 set/unset max response time */
 #define HBM_MINOR_VERSION_DC               0
 #define HBM_MAJOR_VERSION_DC               2
 
+/*
+ * MEI version with immediate reply to enum request support
+ */
+#define HBM_MINOR_VERSION_IE               0
+#define HBM_MAJOR_VERSION_IE               2
+
 /*
  * MEI version with disconnect on connection timeout support
  */
 #define HBM_MINOR_VERSION_EV               0
 #define HBM_MAJOR_VERSION_EV               2
 
+/*
+ * MEI version with fixed address client support
+ */
+#define HBM_MINOR_VERSION_FA               0
+#define HBM_MAJOR_VERSION_FA               2
+
 /* Host bus message command opcode */
 #define MEI_HBM_CMD_OP_MSK                  0x7f
 /* Host bus message command RESPONSE */
@@ -241,15 +252,26 @@ struct hbm_me_stop_request {
 } __packed;
 
 /**
- * struct hbm_host_enum_request -  enumeration request from host to fw
+ * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0)
  *
- * @hbm_cmd: bus message command header
- * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add
+ * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately
+ */
+enum hbm_host_enum_flags {
+       MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0),
+       MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1),
+};
+
+/**
+ * struct hbm_host_enum_request - enumeration request from host to fw
+ *
+ * @hbm_cmd : bus message command header
+ * @flags   : request flags
  * @reserved: reserved
  */
 struct hbm_host_enum_request {
        u8 hbm_cmd;
-       u8 allow_add;
+       u8 flags;
        u8 reserved[2];
 } __packed;
 
index 3edafc8d3ad49aac9aac9ae884c2a20856ff691f..f7c8dfdb6a1254996ac56eebdf8bc760a01dce81 100644 (file)
@@ -91,8 +91,8 @@ EXPORT_SYMBOL_GPL(mei_fw_status2str);
  */
 void mei_cancel_work(struct mei_device *dev)
 {
-       cancel_work_sync(&dev->init_work);
        cancel_work_sync(&dev->reset_work);
+       cancel_work_sync(&dev->bus_rescan_work);
 
        cancel_delayed_work(&dev->timer_work);
 }
@@ -148,16 +148,10 @@ int mei_reset(struct mei_device *dev)
            state != MEI_DEV_POWER_UP) {
 
                /* remove all waiting requests */
-               mei_cl_all_write_clear(dev);
-
                mei_cl_all_disconnect(dev);
 
-               /* wake up all readers and writers so they can be interrupted */
-               mei_cl_all_wakeup(dev);
-
                /* remove entry if already in list */
-               dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n");
-               mei_cl_unlink(&dev->wd_cl);
+               dev_dbg(dev->dev, "remove iamthif from the file list.\n");
                mei_cl_unlink(&dev->iamthif_cl);
                mei_amthif_reset_params(dev);
        }
@@ -165,7 +159,6 @@ int mei_reset(struct mei_device *dev)
        mei_hbm_reset(dev);
 
        dev->rd_msg_hdr = 0;
-       dev->wd_pending = false;
 
        if (ret) {
                dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
@@ -335,16 +328,12 @@ void mei_stop(struct mei_device *dev)
 
        mutex_lock(&dev->device_lock);
 
-       mei_wd_stop(dev);
-
        dev->dev_state = MEI_DEV_POWER_DOWN;
        mei_reset(dev);
        /* move device to disabled state unconditionally */
        dev->dev_state = MEI_DEV_DISABLED;
 
        mutex_unlock(&dev->device_lock);
-
-       mei_watchdog_unregister(dev);
 }
 EXPORT_SYMBOL_GPL(mei_stop);
 
@@ -394,7 +383,6 @@ void mei_device_init(struct mei_device *dev,
        init_waitqueue_head(&dev->wait_hw_ready);
        init_waitqueue_head(&dev->wait_pg);
        init_waitqueue_head(&dev->wait_hbm_start);
-       init_waitqueue_head(&dev->wait_stop_wd);
        dev->dev_state = MEI_DEV_INITIALIZING;
        dev->reset_count = 0;
 
@@ -404,13 +392,11 @@ void mei_device_init(struct mei_device *dev,
        mei_io_list_init(&dev->ctrl_rd_list);
 
        INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
-       INIT_WORK(&dev->init_work, mei_host_client_init);
        INIT_WORK(&dev->reset_work, mei_reset_work);
+       INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
 
-       INIT_LIST_HEAD(&dev->wd_cl.link);
        INIT_LIST_HEAD(&dev->iamthif_cl.link);
        mei_io_list_init(&dev->amthif_cmd_list);
-       mei_io_list_init(&dev->amthif_rd_complete_list);
 
        bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
        dev->open_handle_count = 0;
index 64b568a0268d7026d7305f3233c378900e5669f4..1e5cb1f704f809d56c350969953567e091203227 100644 (file)
@@ -48,7 +48,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
 
                dev_dbg(dev->dev, "completing call back.\n");
                if (cl == &dev->iamthif_cl)
-                       mei_amthif_complete(dev, cb);
+                       mei_amthif_complete(cl, cb);
                else
                        mei_cl_complete(cl, cb);
        }
@@ -104,6 +104,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
        struct mei_device *dev = cl->dev;
        struct mei_cl_cb *cb;
        unsigned char *buffer = NULL;
+       size_t buf_sz;
 
        cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
        if (!cb) {
@@ -124,11 +125,21 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
                goto out;
        }
 
-       if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
-               cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
+       buf_sz = mei_hdr->length + cb->buf_idx;
+       /* catch for integer overflow */
+       if (buf_sz < cb->buf_idx) {
+               cl_err(dev, cl, "message is too big len %d idx %zu\n",
+                      mei_hdr->length, cb->buf_idx);
+
+               list_move_tail(&cb->list, &complete_list->list);
+               cb->status = -EMSGSIZE;
+               goto out;
+       }
+
+       if (cb->buf.size < buf_sz) {
+               cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
                        cb->buf.size, mei_hdr->length, cb->buf_idx);
-               buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
-                                 GFP_KERNEL);
+               buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL);
 
                if (!buffer) {
                        cb->status = -ENOMEM;
@@ -136,7 +147,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
                        goto out;
                }
                cb->buf.data = buffer;
-               cb->buf.size = mei_hdr->length + cb->buf_idx;
+               cb->buf.size = buf_sz;
        }
 
        buffer = cb->buf.data + cb->buf_idx;
@@ -145,8 +156,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
        cb->buf_idx += mei_hdr->length;
 
        if (mei_hdr->msg_complete) {
-               cb->read_time = jiffies;
-               cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
+               cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
                list_move_tail(&cb->list, &complete_list->list);
        } else {
                pm_runtime_mark_last_busy(dev->dev);
@@ -229,6 +239,16 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
        return 0;
 }
 
+static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
+{
+       return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
+}
+
+static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
+{
+       return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
+}
+
 /**
  * mei_irq_read_handler - bottom half read routine after ISR to
  * handle the read processing.
@@ -270,7 +290,7 @@ int mei_irq_read_handler(struct mei_device *dev,
        }
 
        /*  HBM message */
-       if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
+       if (hdr_is_hbm(mei_hdr)) {
                ret = mei_hbm_dispatch(dev, mei_hdr);
                if (ret) {
                        dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
@@ -290,6 +310,14 @@ int mei_irq_read_handler(struct mei_device *dev,
 
        /* if no recipient cl was found we assume corrupted header */
        if (&cl->link == &dev->file_list) {
+               /* A message for not connected fixed address clients
+                * should be silently discarded
+                */
+               if (hdr_is_fixed(mei_hdr)) {
+                       mei_irq_discard_msg(dev, mei_hdr);
+                       ret = 0;
+                       goto reset_slots;
+               }
                dev_err(dev->dev, "no destination client found 0x%08X\n",
                                dev->rd_msg_hdr);
                ret = -EBADMSG;
@@ -360,21 +388,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
                list_move_tail(&cb->list, &cmpl_list->list);
        }
 
-       if (dev->wd_state == MEI_WD_STOPPING) {
-               dev->wd_state = MEI_WD_IDLE;
-               wake_up(&dev->wait_stop_wd);
-       }
-
-       if (mei_cl_is_connected(&dev->wd_cl)) {
-               if (dev->wd_pending &&
-                   mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
-                       ret = mei_wd_send(dev);
-                       if (ret)
-                               return ret;
-                       dev->wd_pending = false;
-               }
-       }
-
        /* complete control write list CB */
        dev_dbg(dev->dev, "complete control write list cb.\n");
        list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
@@ -462,7 +475,6 @@ static void mei_connect_timeout(struct mei_cl *cl)
  */
 void mei_timer(struct work_struct *work)
 {
-       unsigned long timeout;
        struct mei_cl *cl;
 
        struct mei_device *dev = container_of(work,
@@ -508,45 +520,15 @@ void mei_timer(struct work_struct *work)
                        mei_reset(dev);
                        dev->iamthif_canceled = false;
                        dev->iamthif_state = MEI_IAMTHIF_IDLE;
-                       dev->iamthif_timer = 0;
 
                        mei_io_cb_free(dev->iamthif_current_cb);
                        dev->iamthif_current_cb = NULL;
 
-                       dev->iamthif_file_object = NULL;
+                       dev->iamthif_fp = NULL;
                        mei_amthif_run_next_cmd(dev);
                }
        }
 
-       if (dev->iamthif_timer) {
-
-               timeout = dev->iamthif_timer +
-                       mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
-               dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
-                               dev->iamthif_timer);
-               dev_dbg(dev->dev, "timeout = %ld\n", timeout);
-               dev_dbg(dev->dev, "jiffies = %ld\n", jiffies);
-               if (time_after(jiffies, timeout)) {
-                       /*
-                        * User didn't read the AMTHI data on time (15sec)
-                        * freeing AMTHI for other requests
-                        */
-
-                       dev_dbg(dev->dev, "freeing AMTHI for other requests\n");
-
-                       mei_io_list_flush(&dev->amthif_rd_complete_list,
-                               &dev->iamthif_cl);
-                       mei_io_cb_free(dev->iamthif_current_cb);
-                       dev->iamthif_current_cb = NULL;
-
-                       dev->iamthif_file_object->private_data = NULL;
-                       dev->iamthif_file_object = NULL;
-                       dev->iamthif_timer = 0;
-                       mei_amthif_run_next_cmd(dev);
-
-               }
-       }
 out:
        if (dev->dev_state != MEI_DEV_DISABLED)
                schedule_delayed_work(&dev->timer_work, 2 * HZ);
index 80f9afcb13823282a9859e08a96c36f55901efa6..52635b063873ac1289b22c8987a5e115c37dc4cc 100644 (file)
@@ -65,7 +65,7 @@ static int mei_open(struct inode *inode, struct file *file)
                goto err_unlock;
        }
 
-       cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+       cl = mei_cl_alloc_linked(dev);
        if (IS_ERR(cl)) {
                err = PTR_ERR(cl);
                goto err_unlock;
@@ -159,27 +159,22 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
                goto out;
        }
 
+       if (ubuf == NULL) {
+               rets = -EMSGSIZE;
+               goto out;
+       }
+
        if (cl == &dev->iamthif_cl) {
                rets = mei_amthif_read(dev, file, ubuf, length, offset);
                goto out;
        }
 
        cb = mei_cl_read_cb(cl, file);
-       if (cb) {
-               /* read what left */
-               if (cb->buf_idx > *offset)
-                       goto copy_buffer;
-               /* offset is beyond buf_idx we have no more data return 0 */
-               if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
-                       rets = 0;
-                       goto free;
-               }
-               /* Offset needs to be cleaned for contiguous reads*/
-               if (cb->buf_idx == 0 && *offset > 0)
-                       *offset = 0;
-       } else if (*offset > 0) {
+       if (cb)
+               goto copy_buffer;
+
+       if (*offset > 0)
                *offset = 0;
-       }
 
        err = mei_cl_read_start(cl, length, file);
        if (err && err != -EBUSY) {
@@ -214,11 +209,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
 
        cb = mei_cl_read_cb(cl, file);
        if (!cb) {
-               if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
-                       cb = mei_cl_read_cb(cl, NULL);
-                       if (cb)
-                               goto copy_buffer;
-               }
                rets = 0;
                goto out;
        }
@@ -231,10 +221,10 @@ copy_buffer:
                goto free;
        }
 
-       cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
-           cb->buf.size, cb->buf_idx);
-       if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
-               rets = -EMSGSIZE;
+       cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
+              cb->buf.size, cb->buf_idx, *offset);
+       if (*offset >= cb->buf_idx) {
+               rets = 0;
                goto free;
        }
 
@@ -250,11 +240,13 @@ copy_buffer:
 
        rets = length;
        *offset += length;
-       if ((unsigned long)*offset < cb->buf_idx)
+       /* not all data was read, keep the cb */
+       if (*offset < cb->buf_idx)
                goto out;
 
 free:
        mei_io_cb_free(cb);
+       *offset = 0;
 
 out:
        cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
@@ -275,9 +267,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
                         size_t length, loff_t *offset)
 {
        struct mei_cl *cl = file->private_data;
-       struct mei_cl_cb *write_cb = NULL;
+       struct mei_cl_cb *cb;
        struct mei_device *dev;
-       unsigned long timeout = 0;
        int rets;
 
        if (WARN_ON(!cl || !cl->dev))
@@ -313,52 +304,31 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
                goto out;
        }
 
-       if (cl == &dev->iamthif_cl) {
-               write_cb = mei_amthif_find_read_list_entry(dev, file);
-
-               if (write_cb) {
-                       timeout = write_cb->read_time +
-                               mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
-                       if (time_after(jiffies, timeout)) {
-                               *offset = 0;
-                               mei_io_cb_free(write_cb);
-                               write_cb = NULL;
-                       }
-               }
-       }
-
        *offset = 0;
-       write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
-       if (!write_cb) {
+       cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
+       if (!cb) {
                rets = -ENOMEM;
                goto out;
        }
 
-       rets = copy_from_user(write_cb->buf.data, ubuf, length);
+       rets = copy_from_user(cb->buf.data, ubuf, length);
        if (rets) {
                dev_dbg(dev->dev, "failed to copy data from userland\n");
                rets = -EFAULT;
+               mei_io_cb_free(cb);
                goto out;
        }
 
        if (cl == &dev->iamthif_cl) {
-               rets = mei_amthif_write(cl, write_cb);
-
-               if (rets) {
-                       dev_err(dev->dev,
-                               "amthif write failed with status = %d\n", rets);
-                       goto out;
-               }
-               mutex_unlock(&dev->device_lock);
-               return length;
+               rets = mei_amthif_write(cl, cb);
+               if (!rets)
+                       rets = length;
+               goto out;
        }
 
-       rets = mei_cl_write(cl, write_cb, false);
+       rets = mei_cl_write(cl, cb, false);
 out:
        mutex_unlock(&dev->device_lock);
-       if (rets < 0)
-               mei_io_cb_free(write_cb);
        return rets;
 }
 
@@ -393,12 +363,22 @@ static int mei_ioctl_connect_client(struct file *file,
 
        /* find ME client we're trying to connect to */
        me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
-       if (!me_cl ||
-           (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
+       if (!me_cl) {
                dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
                        &data->in_client_uuid);
-               mei_me_cl_put(me_cl);
-               return  -ENOTTY;
+               rets = -ENOTTY;
+               goto end;
+       }
+
+       if (me_cl->props.fixed_address) {
+               bool forbidden = dev->override_fixed_address ?
+                        !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
+               if (forbidden) {
+                       dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
+                               &data->in_client_uuid);
+                       rets = -ENOTTY;
+                       goto end;
+               }
        }
 
        dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
@@ -454,7 +434,7 @@ end:
  *
  * Return: 0 on success , <0 on error
  */
-static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
 {
        struct mei_cl *cl = file->private_data;
 
@@ -473,7 +453,7 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
  *
  * Return: 0 on success , <0 on error
  */
-static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
 {
        struct mei_cl *cl = file->private_data;
        bool notify_ev;
index 388efb5191380bdbb30923cde50129b259666831..e19e6acb191bb10771ceaa53c1383b3f84de56c2 100644 (file)
@@ -22,4 +22,6 @@
 
 EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
 EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
 #endif /* __CHECKER__ */
index 47e1bc6551d4fa62e7b2c6f04e4687e56f23bfce..7d2d5d4a162416e1ba3c8b10279b05f17dfc3bbb 100644 (file)
@@ -60,7 +60,45 @@ TRACE_EVENT(mei_reg_write,
                __entry->offs = offs;
                __entry->val = val;
        ),
-       TP_printk("[%s] write %s[%#x] = %#x)",
+       TP_printk("[%s] write %s[%#x] = %#x",
+                 __get_str(dev), __entry->reg,  __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_read,
+       TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+       TP_ARGS(dev, reg, offs, val),
+       TP_STRUCT__entry(
+               __string(dev, dev_name(dev))
+               __field(const char *, reg)
+               __field(u32, offs)
+               __field(u32, val)
+       ),
+       TP_fast_assign(
+               __assign_str(dev, dev_name(dev))
+               __entry->reg  = reg;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%s] pci cfg read %s:[%#x] = %#x",
+                 __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_write,
+       TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+       TP_ARGS(dev, reg, offs, val),
+       TP_STRUCT__entry(
+               __string(dev, dev_name(dev))
+               __field(const char *, reg)
+               __field(u32, offs)
+               __field(u32, val)
+       ),
+       TP_fast_assign(
+               __assign_str(dev, dev_name(dev))
+               __entry->reg = reg;
+               __entry->offs = offs;
+               __entry->val = val;
+       ),
+       TP_printk("[%s] pci cfg write %s[%#x] = %#x",
                  __get_str(dev), __entry->reg,  __entry->offs, __entry->val)
 );
 
index 4250555d5e72a2e29a5721de1fb0e790bc5747de..db78e6d994568e535902fa045a21c68091d4812f 100644 (file)
@@ -18,7 +18,7 @@
 #define _MEI_DEV_H_
 
 #include <linux/types.h>
-#include <linux/watchdog.h>
+#include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
 #include <linux/mei_cl_bus.h>
 #include "hw.h"
 #include "hbm.h"
 
-/*
- * watch dog definition
- */
-#define MEI_WD_HDR_SIZE       4
-#define MEI_WD_STOP_MSG_SIZE  MEI_WD_HDR_SIZE
-#define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16)
-
-#define MEI_WD_DEFAULT_TIMEOUT   120  /* seconds */
-#define MEI_WD_MIN_TIMEOUT       120  /* seconds */
-#define MEI_WD_MAX_TIMEOUT     65535  /* seconds */
-
-#define MEI_WD_STOP_TIMEOUT      10 /* msecs */
-
-#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT       (1 << 0)
-
-#define MEI_RD_MSG_BUF_SIZE           (128 * sizeof(u32))
-
 
 /*
  * AMTHI Client UUID
  */
 extern const uuid_le mei_amthif_guid;
 
-/*
- * Watchdog Client UUID
- */
-extern const uuid_le mei_wd_guid;
+#define MEI_RD_MSG_BUF_SIZE           (128 * sizeof(u32))
 
 /*
  * Number of Maximum MEI Clients
@@ -73,15 +53,6 @@ extern const uuid_le mei_wd_guid;
  */
 #define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
 
-/*
- * Internal Clients Number
- */
-#define MEI_HOST_CLIENT_ID_ANY        (-1)
-#define MEI_HBM_HOST_CLIENT_ID         0 /* not used, just for documentation */
-#define MEI_WD_HOST_CLIENT_ID          1
-#define MEI_IAMTHIF_HOST_CLIENT_ID     2
-
-
 /* File state */
 enum file_state {
        MEI_FILE_INITIALIZING = 0,
@@ -123,12 +94,6 @@ enum mei_file_transaction_states {
        MEI_READ_COMPLETE
 };
 
-enum mei_wd_states {
-       MEI_WD_IDLE,
-       MEI_WD_RUNNING,
-       MEI_WD_STOPPING,
-};
-
 /**
  * enum mei_cb_file_ops  - file operation associated with the callback
  * @MEI_FOP_READ:       read
@@ -153,7 +118,7 @@ enum mei_cb_file_ops {
  * Intel MEI message data struct
  */
 struct mei_msg_data {
-       u32 size;
+       size_t size;
        unsigned char *data;
 };
 
@@ -206,8 +171,7 @@ struct mei_cl;
  * @fop_type: file operation type
  * @buf: buffer for data associated with the callback
  * @buf_idx: last read index
- * @read_time: last read operation time stamp (iamthif)
- * @file_object: pointer to file structure
+ * @fp: pointer to file structure
  * @status: io status of the cb
  * @internal: communication between driver and FW flag
  * @completed: the transfer or reception has completed
@@ -217,9 +181,8 @@ struct mei_cl_cb {
        struct mei_cl *cl;
        enum mei_cb_file_ops fop_type;
        struct mei_msg_data buf;
-       unsigned long buf_idx;
-       unsigned long read_time;
-       struct file *file_object;
+       size_t buf_idx;
+       const struct file *fp;
        int status;
        u32 internal:1;
        u32 completed:1;
@@ -341,12 +304,13 @@ struct mei_hw_ops {
 
 /* MEI bus API*/
 void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_bus_rescan_work(struct work_struct *work);
 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
                        bool blocking);
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
-void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_notify_event(struct mei_cl *cl);
+bool mei_cl_bus_rx_event(struct mei_cl *cl);
+bool mei_cl_bus_notify_event(struct mei_cl *cl);
 void mei_cl_bus_remove_devices(struct mei_device *bus);
 int mei_cl_bus_init(void);
 void mei_cl_bus_exit(void);
@@ -404,7 +368,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @wait_hw_ready : wait queue for receive HW ready message form FW
  * @wait_pg     : wait queue for receive PG message from FW
  * @wait_hbm_start : wait queue for receive HBM start message from FW
- * @wait_stop_wd : wait queue for receive WD stop message from FW
  *
  * @reset_count : number of consecutive resets
  * @dev_state   : device state
@@ -426,6 +389,8 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @hbm_f_dc_supported  : hbm feature dynamic clients
  * @hbm_f_dot_supported : hbm feature disconnect on timeout
  * @hbm_f_ev_supported  : hbm feature event notification
+ * @hbm_f_fa_supported  : hbm feature fixed address client
+ * @hbm_f_ie_supported  : hbm feature immediate reply to enum request
  *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
@@ -434,26 +399,19 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @me_client_index : last FW client index in enumeration
  *
  * @allow_fixed_address: allow user space to connect a fixed client
- *
- * @wd_cl       : watchdog client
- * @wd_state    : watchdog client state
- * @wd_pending  : watchdog command is pending
- * @wd_timeout  : watchdog expiration timeout
- * @wd_data     : watchdog message buffer
+ * @override_fixed_address: force allow fixed address behavior
  *
  * @amthif_cmd_list : amthif list for cmd waiting
- * @amthif_rd_complete_list : amthif list for reading completed cmd data
- * @iamthif_file_object : file for current amthif operation
+ * @iamthif_fp : file for current amthif operation
  * @iamthif_cl  : amthif host client
  * @iamthif_current_cb : amthif current operation callback
  * @iamthif_open_count : number of opened amthif connections
- * @iamthif_timer : time stamp of current amthif command completion
  * @iamthif_stall_timer : timer to detect amthif hang
  * @iamthif_state : amthif processor state
  * @iamthif_canceled : current amthif command is canceled
  *
- * @init_work   : work item for the device init
  * @reset_work  : work item for the device reset
+ * @bus_rescan_work : work item for the bus rescan
  *
  * @device_list : mei client bus list
  * @cl_bus_lock : client bus list lock
@@ -486,7 +444,6 @@ struct mei_device {
        wait_queue_head_t wait_hw_ready;
        wait_queue_head_t wait_pg;
        wait_queue_head_t wait_hbm_start;
-       wait_queue_head_t wait_stop_wd;
 
        /*
         * mei device  states
@@ -522,6 +479,8 @@ struct mei_device {
        unsigned int hbm_f_dc_supported:1;
        unsigned int hbm_f_dot_supported:1;
        unsigned int hbm_f_ev_supported:1;
+       unsigned int hbm_f_fa_supported:1;
+       unsigned int hbm_f_ie_supported:1;
 
        struct rw_semaphore me_clients_rwsem;
        struct list_head me_clients;
@@ -530,29 +489,21 @@ struct mei_device {
        unsigned long me_client_index;
 
        bool allow_fixed_address;
-
-       struct mei_cl wd_cl;
-       enum mei_wd_states wd_state;
-       bool wd_pending;
-       u16 wd_timeout;
-       unsigned char wd_data[MEI_WD_START_MSG_SIZE];
-
+       bool override_fixed_address;
 
        /* amthif list for cmd waiting */
        struct mei_cl_cb amthif_cmd_list;
        /* driver managed amthif list for reading completed amthif cmd data */
-       struct mei_cl_cb amthif_rd_complete_list;
-       struct file *iamthif_file_object;
+       const struct file *iamthif_fp;
        struct mei_cl iamthif_cl;
        struct mei_cl_cb *iamthif_current_cb;
        long iamthif_open_count;
-       unsigned long iamthif_timer;
        u32 iamthif_stall_timer;
        enum iamthif_states iamthif_state;
        bool iamthif_canceled;
 
-       struct work_struct init_work;
        struct work_struct reset_work;
+       struct work_struct bus_rescan_work;
 
        /* List of bus devices */
        struct list_head device_list;
@@ -635,46 +586,17 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
 
 int mei_amthif_release(struct mei_device *dev, struct file *file);
 
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
-                                               struct file *file);
-
 int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_amthif_run_next_cmd(struct mei_device *dev);
 int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
                        struct mei_cl_cb *cmpl_list);
 
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_amthif_irq_read_msg(struct mei_cl *cl,
                            struct mei_msg_hdr *mei_hdr,
                            struct mei_cl_cb *complete_list);
 int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
 
-/*
- * NFC functions
- */
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-void mei_nfc_host_exit(struct mei_device *dev);
-
-/*
- * NFC Client UUID
- */
-extern const uuid_le mei_nfc_guid;
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-/*
- * mei_watchdog_register  - Registering watchdog interface
- *   once we got connection to the WD Client
- * @dev: mei device
- */
-int mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister  - Unregistering watchdog interface
- * @dev: mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
 /*
  * Register Access Function
  */
index 75fc9c688df8fc6fac004c78d8067061eff70749..64e64da6da4439c470832ab32a0a0158d6c2d62a 100644 (file)
@@ -88,6 +88,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+
        /* required last entry */
        {0, }
 };
@@ -210,7 +213,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = mei_register(dev, &pdev->dev);
        if (err)
-               goto release_irq;
+               goto stop;
 
        pci_set_drvdata(pdev, dev);
 
@@ -231,6 +234,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+stop:
+       mei_stop(dev);
 release_irq:
        mei_cancel_work(dev);
        mei_disable_interrupts(dev);
index 71f8a747571756b0941a82f0ee6af99247464744..30cc30683c07186e7ff33d6395f6e8a64d2cf3c0 100644 (file)
@@ -154,7 +154,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = mei_register(dev, &pdev->dev);
        if (err)
-               goto release_irq;
+               goto stop;
 
        pci_set_drvdata(pdev, dev);
 
@@ -170,6 +170,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+stop:
+       mei_stop(dev);
 release_irq:
 
        mei_cancel_work(dev);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
deleted file mode 100644 (file)
index b346638..0000000
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/watchdog.h>
-
-#include <linux/mei.h>
-
-#include "mei_dev.h"
-#include "hbm.h"
-#include "client.h"
-
-static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
-static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
-
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-
-/* UUIDs for AMT F/W clients */
-const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
-                                               0x9D, 0xA9, 0x15, 0x14, 0xCB,
-                                               0x32, 0xAB);
-
-static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
-{
-       dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout);
-       memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE);
-       memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16));
-}
-
-/**
- * mei_wd_host_init - connect to the watchdog client
- *
- * @dev: the device structure
- * @me_cl: me client
- *
- * Return: -ENOTTY if wd client cannot be found
- *         -EIO if write has failed
- *         0 on success
- */
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
-       struct mei_cl *cl = &dev->wd_cl;
-       int ret;
-
-       mei_cl_init(cl, dev);
-
-       dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
-       dev->wd_state = MEI_WD_IDLE;
-
-       ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
-       if (ret < 0) {
-               dev_info(dev->dev, "wd: failed link client\n");
-               return ret;
-       }
-
-       ret = mei_cl_connect(cl, me_cl, NULL);
-       if (ret) {
-               dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
-               mei_cl_unlink(cl);
-               return ret;
-       }
-
-       ret = mei_watchdog_register(dev);
-       if (ret) {
-               mei_cl_disconnect(cl);
-               mei_cl_unlink(cl);
-       }
-       return ret;
-}
-
-/**
- * mei_wd_send - sends watch dog message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success,
- *     -EIO when message send fails
- *     -EINVAL when invalid message is to be sent
- *     -ENODEV on flow control failure
- */
-int mei_wd_send(struct mei_device *dev)
-{
-       struct mei_cl *cl = &dev->wd_cl;
-       struct mei_msg_hdr hdr;
-       int ret;
-
-       hdr.host_addr = cl->host_client_id;
-       hdr.me_addr = mei_cl_me_id(cl);
-       hdr.msg_complete = 1;
-       hdr.reserved = 0;
-       hdr.internal = 0;
-
-       if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
-               hdr.length = MEI_WD_START_MSG_SIZE;
-       else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
-               hdr.length = MEI_WD_STOP_MSG_SIZE;
-       else {
-               dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n");
-               return -EINVAL;
-       }
-
-       ret = mei_write_message(dev, &hdr, dev->wd_data);
-       if (ret) {
-               dev_err(dev->dev, "wd: write message failed\n");
-               return ret;
-       }
-
-       ret = mei_cl_flow_ctrl_reduce(cl);
-       if (ret) {
-               dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-/**
- * mei_wd_stop - sends watchdog stop message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success
- * on error:
- *     -EIO    when message send fails
- *     -EINVAL when invalid message is to be sent
- *     -ETIME  on message timeout
- */
-int mei_wd_stop(struct mei_device *dev)
-{
-       struct mei_cl *cl = &dev->wd_cl;
-       int ret;
-
-       if (!mei_cl_is_connected(cl) ||
-           dev->wd_state != MEI_WD_RUNNING)
-               return 0;
-
-       memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE);
-
-       dev->wd_state = MEI_WD_STOPPING;
-
-       ret = mei_cl_flow_ctrl_creds(cl);
-       if (ret < 0)
-               goto err;
-
-       if (ret && mei_hbuf_acquire(dev)) {
-               ret = mei_wd_send(dev);
-               if (ret)
-                       goto err;
-               dev->wd_pending = false;
-       } else {
-               dev->wd_pending = true;
-       }
-
-       mutex_unlock(&dev->device_lock);
-
-       ret = wait_event_timeout(dev->wait_stop_wd,
-                               dev->wd_state == MEI_WD_IDLE,
-                               msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
-       mutex_lock(&dev->device_lock);
-       if (dev->wd_state != MEI_WD_IDLE) {
-               /* timeout */
-               ret = -ETIME;
-               dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret);
-               goto err;
-       }
-       dev_dbg(dev->dev, "wd: stop completed after %u msec\n",
-                       MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
-       return 0;
-err:
-       return ret;
-}
-
-/**
- * mei_wd_ops_start - wd start command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_start(struct watchdog_device *wd_dev)
-{
-       struct mei_device *dev;
-       struct mei_cl *cl;
-       int err = -ENODEV;
-
-       dev = watchdog_get_drvdata(wd_dev);
-       if (!dev)
-               return -ENODEV;
-
-       cl = &dev->wd_cl;
-
-       mutex_lock(&dev->device_lock);
-
-       if (dev->dev_state != MEI_DEV_ENABLED) {
-               dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED  dev_state = %s\n",
-                       mei_dev_state_str(dev->dev_state));
-               goto end_unlock;
-       }
-
-       if (!mei_cl_is_connected(cl)) {
-               cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
-               goto end_unlock;
-       }
-
-       mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
-       err = 0;
-end_unlock:
-       mutex_unlock(&dev->device_lock);
-       return err;
-}
-
-/**
- * mei_wd_ops_stop -  wd stop command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
-{
-       struct mei_device *dev;
-
-       dev = watchdog_get_drvdata(wd_dev);
-       if (!dev)
-               return -ENODEV;
-
-       mutex_lock(&dev->device_lock);
-       mei_wd_stop(dev);
-       mutex_unlock(&dev->device_lock);
-
-       return 0;
-}
-
-/**
- * mei_wd_ops_ping - wd ping command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
-{
-       struct mei_device *dev;
-       struct mei_cl *cl;
-       int ret;
-
-       dev = watchdog_get_drvdata(wd_dev);
-       if (!dev)
-               return -ENODEV;
-
-       cl = &dev->wd_cl;
-
-       mutex_lock(&dev->device_lock);
-
-       if (!mei_cl_is_connected(cl)) {
-               cl_err(dev, cl, "wd: not connected.\n");
-               ret = -ENODEV;
-               goto end;
-       }
-
-       dev->wd_state = MEI_WD_RUNNING;
-
-       ret = mei_cl_flow_ctrl_creds(cl);
-       if (ret < 0)
-               goto end;
-
-       /* Check if we can send the ping to HW*/
-       if (ret && mei_hbuf_acquire(dev)) {
-               dev_dbg(dev->dev, "wd: sending ping\n");
-
-               ret = mei_wd_send(dev);
-               if (ret)
-                       goto end;
-               dev->wd_pending = false;
-       } else {
-               dev->wd_pending = true;
-       }
-
-end:
-       mutex_unlock(&dev->device_lock);
-       return ret;
-}
-
-/**
- * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- * @timeout: timeout value to set
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev,
-               unsigned int timeout)
-{
-       struct mei_device *dev;
-
-       dev = watchdog_get_drvdata(wd_dev);
-       if (!dev)
-               return -ENODEV;
-
-       /* Check Timeout value */
-       if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT)
-               return -EINVAL;
-
-       mutex_lock(&dev->device_lock);
-
-       dev->wd_timeout = timeout;
-       wd_dev->timeout = timeout;
-       mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
-       mutex_unlock(&dev->device_lock);
-
-       return 0;
-}
-
-/*
- * Watchdog Device structs
- */
-static const struct watchdog_ops wd_ops = {
-               .owner = THIS_MODULE,
-               .start = mei_wd_ops_start,
-               .stop = mei_wd_ops_stop,
-               .ping = mei_wd_ops_ping,
-               .set_timeout = mei_wd_ops_set_timeout,
-};
-static const struct watchdog_info wd_info = {
-               .identity = INTEL_AMT_WATCHDOG_ID,
-               .options = WDIOF_KEEPALIVEPING |
-                          WDIOF_SETTIMEOUT |
-                          WDIOF_ALARMONLY,
-};
-
-static struct watchdog_device amt_wd_dev = {
-               .info = &wd_info,
-               .ops = &wd_ops,
-               .timeout = MEI_WD_DEFAULT_TIMEOUT,
-               .min_timeout = MEI_WD_MIN_TIMEOUT,
-               .max_timeout = MEI_WD_MAX_TIMEOUT,
-};
-
-
-int mei_watchdog_register(struct mei_device *dev)
-{
-
-       int ret;
-
-       amt_wd_dev.parent = dev->dev;
-       /* unlock to perserve correct locking order */
-       mutex_unlock(&dev->device_lock);
-       ret = watchdog_register_device(&amt_wd_dev);
-       mutex_lock(&dev->device_lock);
-       if (ret) {
-               dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n",
-                       ret);
-               return ret;
-       }
-
-       dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n");
-       watchdog_set_drvdata(&amt_wd_dev, dev);
-       return 0;
-}
-
-void mei_watchdog_unregister(struct mei_device *dev)
-{
-       if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
-               return;
-
-       watchdog_set_drvdata(&amt_wd_dev, NULL);
-       watchdog_unregister_device(&amt_wd_dev);
-}
-
index 40677df7f996b8508939743808504b27c144cb86..2e4f3ba75c8e28663815049ad22cd95b33ab9816 100644 (file)
@@ -32,12 +32,29 @@ config SCIF_BUS
          OS and tools for MIC to use with this driver are available from
          <http://software.intel.com/en-us/mic-developer>.
 
+comment "VOP Bus Driver"
+
+config VOP_BUS
+       tristate "VOP Bus Driver"
+       depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+       help
+         This option is selected by any driver which registers a
+         device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
+         and CONFIG_INTEL_MIC_CARD.
+
+         If you are building a host/card kernel with an Intel MIC device
+         then say M (recommended) or Y, else say N. If unsure say N.
+
+         More information about the Intel MIC family as well as the Linux
+         OS and tools for MIC to use with this driver are available from
+         <http://software.intel.com/en-us/mic-developer>.
+
 comment "Intel MIC Host Driver"
 
 config INTEL_MIC_HOST
        tristate "Intel MIC Host Driver"
-       depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
-       select VHOST_RING
+       depends on 64BIT && PCI && X86
+       depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
        help
          This enables Host Driver support for the Intel Many Integrated
          Core (MIC) family of PCIe form factor coprocessor devices that
@@ -56,7 +73,8 @@ comment "Intel MIC Card Driver"
 
 config INTEL_MIC_CARD
        tristate "Intel MIC Card Driver"
-       depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
+       depends on 64BIT && X86
+       depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
        select VIRTIO
        help
          This enables card driver support for the Intel Many Integrated
@@ -107,3 +125,23 @@ config MIC_COSM
          More information about the Intel MIC family as well as the Linux
          OS and tools for MIC to use with this driver are available from
          <http://software.intel.com/en-us/mic-developer>.
+
+comment "VOP Driver"
+
+config VOP
+       tristate "VOP Driver"
+       depends on 64BIT && PCI && X86 && VOP_BUS
+       select VHOST_RING
+       help
+         This enables VOP (Virtio over PCIe) Driver support for the Intel
+         Many Integrated Core (MIC) family of PCIe form factor coprocessor
+         devices. The VOP driver allows virtio drivers, e.g. net, console
+         and block drivers, on the card connect to user space virtio
+         devices on the host.
+
+         If you are building a host kernel with an Intel MIC device then
+         say M (recommended) or Y, else say N. If unsure say N.
+
+         More information about the Intel MIC family as well as the Linux
+         OS and tools for MIC to use with this driver are available from
+         <http://software.intel.com/en-us/mic-developer>.
index e288a1106738352ee10a77e5569226ff3102edfc..f2b1323ff96c3e2de07c2180cfbdda3aa3513f3d 100644 (file)
@@ -8,3 +8,4 @@ obj-y += bus/
 obj-$(CONFIG_SCIF) += scif/
 obj-$(CONFIG_MIC_COSM) += cosm/
 obj-$(CONFIG_MIC_COSM) += cosm_client/
+obj-$(CONFIG_VOP) += vop/
index 761842b0d0bb2eb7bc34c5361e4e5b60e5f5613a..8758a7daa52c7c5c8768ddbbedad6dd954589f12 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
 obj-$(CONFIG_SCIF_BUS) += scif_bus.o
 obj-$(CONFIG_MIC_COSM) += cosm_bus.o
+obj-$(CONFIG_VOP_BUS) += vop_bus.o
index f7c57f26691622fa32ed8ae014bbc3bf2ca9f139..8b6341855dc3d635fc1ae035de41cb744bf7ede2 100644 (file)
@@ -30,6 +30,7 @@
  * @attr_group: Pointer to list of sysfs attribute groups.
  * @sdev: Device for sysfs entries.
  * @state: MIC state.
+ * @prev_state: MIC state previous to MIC_RESETTING
  * @shutdown_status: MIC status reported by card for shutdown/crashes.
  * @shutdown_status_int: Internal shutdown status maintained by the driver
  * @cosm_mutex: Mutex for synchronizing access to data structures.
@@ -55,6 +56,7 @@ struct cosm_device {
        const struct attribute_group **attr_group;
        struct device *sdev;
        u8 state;
+       u8 prev_state;
        u8 shutdown_status;
        u8 shutdown_status_int;
        struct mutex cosm_mutex;
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c
new file mode 100644 (file)
index 0000000..303da22
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) Bus driver.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_bus.h"
+
+static ssize_t device_show(struct device *d,
+                          struct device_attribute *attr, char *buf)
+{
+       struct vop_device *dev = dev_to_vop(d);
+
+       return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+                          struct device_attribute *attr, char *buf)
+{
+       struct vop_device *dev = dev_to_vop(d);
+
+       return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct vop_device *dev = dev_to_vop(d);
+
+       return sprintf(buf, "vop:d%08Xv%08X\n",
+                      dev->id.device, dev->id.vendor);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *vop_dev_attrs[] = {
+       &dev_attr_device.attr,
+       &dev_attr_vendor.attr,
+       &dev_attr_modalias.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(vop_dev);
+
+static inline int vop_id_match(const struct vop_device *dev,
+                              const struct vop_device_id *id)
+{
+       if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID)
+               return 0;
+
+       return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support.  If any of them
+ * match, we return 1 and the kernel will call vop_dev_probe().
+ */
+static int vop_dev_match(struct device *dv, struct device_driver *dr)
+{
+       unsigned int i;
+       struct vop_device *dev = dev_to_vop(dv);
+       const struct vop_device_id *ids;
+
+       ids = drv_to_vop(dr)->id_table;
+       for (i = 0; ids[i].device; i++)
+               if (vop_id_match(dev, &ids[i]))
+                       return 1;
+       return 0;
+}
+
+static int vop_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+       struct vop_device *dev = dev_to_vop(dv);
+
+       return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X",
+                             dev->id.device, dev->id.vendor);
+}
+
+static int vop_dev_probe(struct device *d)
+{
+       struct vop_device *dev = dev_to_vop(d);
+       struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+       return drv->probe(dev);
+}
+
+static int vop_dev_remove(struct device *d)
+{
+       struct vop_device *dev = dev_to_vop(d);
+       struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+       drv->remove(dev);
+       return 0;
+}
+
+static struct bus_type vop_bus = {
+       .name  = "vop_bus",
+       .match = vop_dev_match,
+       .dev_groups = vop_dev_groups,
+       .uevent = vop_uevent,
+       .probe = vop_dev_probe,
+       .remove = vop_dev_remove,
+};
+
+int vop_register_driver(struct vop_driver *driver)
+{
+       driver->driver.bus = &vop_bus;
+       return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_register_driver);
+
+void vop_unregister_driver(struct vop_driver *driver)
+{
+       driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_driver);
+
+static void vop_release_dev(struct device *d)
+{
+       put_device(d);
+}
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+                   const struct dma_map_ops *dma_ops,
+                   struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+                   struct dma_chan *chan)
+{
+       int ret;
+       struct vop_device *vdev;
+
+       vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+       if (!vdev)
+               return ERR_PTR(-ENOMEM);
+
+       vdev->dev.parent = pdev;
+       vdev->id.device = id;
+       vdev->id.vendor = VOP_DEV_ANY_ID;
+       vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops;
+       vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
+       dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
+       vdev->dev.release = vop_release_dev;
+       vdev->hw_ops = hw_ops;
+       vdev->dev.bus = &vop_bus;
+       vdev->dnode = dnode;
+       vdev->aper = aper;
+       vdev->dma_ch = chan;
+       vdev->index = dnode - 1;
+       dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
+       /*
+        * device_register() causes the bus infrastructure to look for a
+        * matching driver.
+        */
+       ret = device_register(&vdev->dev);
+       if (ret)
+               goto free_vdev;
+       return vdev;
+free_vdev:
+       kfree(vdev);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vop_register_device);
+
+void vop_unregister_device(struct vop_device *dev)
+{
+       device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_device);
+
+static int __init vop_init(void)
+{
+       return bus_register(&vop_bus);
+}
+
+static void __exit vop_exit(void)
+{
+       bus_unregister(&vop_bus);
+}
+
+core_initcall(vop_init);
+module_exit(vop_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) VOP Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
new file mode 100644 (file)
index 0000000..fff7a86
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio over PCIe Bus driver.
+ */
+#ifndef _VOP_BUS_H_
+#define _VOP_BUS_H_
+/*
+ * Everything a vop driver needs to work with any particular vop
+ * implementation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "../common/mic_dev.h"
+
+struct vop_device_id {
+       u32 device;
+       u32 vendor;
+};
+
+#define VOP_DEV_TRNSP 1
+#define VOP_DEV_ANY_ID 0xffffffff
+/*
+ * Size of the internal buffer used during DMA's as an intermediate buffer
+ * for copy to/from user. Must be an integral number of pages.
+ */
+#define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
+
+/**
+ * vop_device - representation of a device using vop
+ * @hw_ops: the hardware ops supported by this device.
+ * @id: the device type identification (used to match it with a driver).
+ * @dev: underlying device.
+ * @dnode - The destination node which this device will communicate with.
+ * @aper: Aperture memory window
+ * @dma_ch - DMA channel
+ * @index: unique position on the vop bus
+ */
+struct vop_device {
+       struct vop_hw_ops *hw_ops;
+       struct vop_device_id id;
+       struct device dev;
+       u8 dnode;
+       struct mic_mw *aper;
+       struct dma_chan *dma_ch;
+       int index;
+};
+
+/**
+ * vop_driver - operations for a vop I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found.  Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct vop_driver {
+       struct device_driver driver;
+       const struct vop_device_id *id_table;
+       int (*probe)(struct vop_device *dev);
+       void (*remove)(struct vop_device *dev);
+};
+
+/**
+ * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus.
+ *
+ * @next_db: Obtain the next available doorbell.
+ * @request_irq: Request an interrupt on a particular doorbell.
+ * @free_irq: Free an interrupt requested previously.
+ * @ack_interrupt: acknowledge an interrupt in the ISR.
+ * @get_remote_dp: Get access to the virtio device page used by the remote
+ *                 node to add/remove/configure virtio devices.
+ * @get_dp: Get access to the virtio device page used by the self
+ *          node to add/remove/configure virtio devices.
+ * @send_intr: Send an interrupt to the peer node on a specified doorbell.
+ * @ioremap: Map a buffer with the specified DMA address and length.
+ * @iounmap: Unmap a buffer previously mapped.
+ * @dma_filter: The DMA filter function to use for obtaining access to
+ *             a DMA channel on the peer node.
+ */
+struct vop_hw_ops {
+       int (*next_db)(struct vop_device *vpdev);
+       struct mic_irq *(*request_irq)(struct vop_device *vpdev,
+                                      irqreturn_t (*func)(int irq, void *data),
+                                      const char *name, void *data,
+                                      int intr_src);
+       void (*free_irq)(struct vop_device *vpdev,
+                        struct mic_irq *cookie, void *data);
+       void (*ack_interrupt)(struct vop_device *vpdev, int num);
+       void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
+       void * (*get_dp)(struct vop_device *vpdev);
+       void (*send_intr)(struct vop_device *vpdev, int db);
+       void __iomem * (*ioremap)(struct vop_device *vpdev,
+                                 dma_addr_t pa, size_t len);
+       void (*iounmap)(struct vop_device *vpdev, void __iomem *va);
+};
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+                   const struct dma_map_ops *dma_ops,
+                   struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+                   struct dma_chan *chan);
+void vop_unregister_device(struct vop_device *dev);
+int vop_register_driver(struct vop_driver *drv);
+void vop_unregister_driver(struct vop_driver *drv);
+
+/*
+ * module_vop_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_vop_driver(__vop_driver) \
+       module_driver(__vop_driver, vop_register_driver, \
+                       vop_unregister_driver)
+
+static inline struct vop_device *dev_to_vop(struct device *dev)
+{
+       return container_of(dev, struct vop_device, dev);
+}
+
+static inline struct vop_driver *drv_to_vop(struct device_driver *drv)
+{
+       return container_of(drv, struct vop_driver, driver);
+}
+#endif /* _VOP_BUS_H */
index 69d58bef92cedf6909e92242f519cdee5944b49b..6e9675e12a09033223855bea05273b18b1b9083b 100644 (file)
@@ -8,4 +8,3 @@ obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o
 mic_card-y += mic_x100.o
 mic_card-y += mic_device.o
 mic_card-y += mic_debugfs.o
-mic_card-y += mic_virtio.o
index d0edaf7e0cd586325666955c592d0431f8340754..e749af48f7369ddab489fe55193452a1abe15961 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/mic_common.h>
 #include "../common/mic_dev.h"
 #include "mic_device.h"
-#include "mic_virtio.h"
 
 static struct mic_driver *g_drv;
 
@@ -250,12 +249,82 @@ static struct scif_hw_ops scif_hw_ops = {
        .iounmap = ___mic_iounmap,
 };
 
+static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
+{
+       return dev_get_drvdata(vpdev->dev.parent);
+}
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+                 irqreturn_t (*func)(int irq, void *data),
+                  const char *name, void *data, int intr_src)
+{
+       return mic_request_card_irq(func, NULL, name, data, intr_src);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+                          struct mic_irq *cookie, void *data)
+{
+       return mic_free_card_irq(cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+       struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+       mic_ack_interrupt(&mdrv->mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+       return mic_next_card_db();
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+       struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+       return mdrv->dp;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+       struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+       mic_send_intr(&mdrv->mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+                                  dma_addr_t pa, size_t len)
+{
+       struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+       return mic_card_map(&mdrv->mdev, pa, len);
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+       struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+       mic_card_unmap(&mdrv->mdev, va);
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+       .request_irq = __mic_request_irq,
+       .free_irq = __mic_free_irq,
+       .ack_interrupt = __mic_ack_interrupt,
+       .next_db = __mic_next_db,
+       .get_remote_dp = __mic_get_remote_dp,
+       .send_intr = __mic_send_intr,
+       .ioremap = __mic_ioremap,
+       .iounmap = __mic_iounmap,
+};
+
 static int mic_request_dma_chans(struct mic_driver *mdrv)
 {
        dma_cap_mask_t mask;
        struct dma_chan *chan;
 
-       request_module("mic_x100_dma");
        dma_cap_zero(mask);
        dma_cap_set(DMA_MEMCPY, mask);
 
@@ -309,9 +378,13 @@ int __init mic_driver_init(struct mic_driver *mdrv)
                rc = -ENODEV;
                goto irq_uninit;
        }
-       rc = mic_devices_init(mdrv);
-       if (rc)
+       mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP,
+                                         NULL, &vop_hw_ops, 0,
+                                         NULL, mdrv->dma_ch[0]);
+       if (IS_ERR(mdrv->vpdev)) {
+               rc = PTR_ERR(mdrv->vpdev);
                goto dma_free;
+       }
        bootparam = mdrv->dp;
        node_id = ioread8(&bootparam->node_id);
        mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
@@ -321,13 +394,13 @@ int __init mic_driver_init(struct mic_driver *mdrv)
                                           mdrv->num_dma_ch, true);
        if (IS_ERR(mdrv->scdev)) {
                rc = PTR_ERR(mdrv->scdev);
-               goto device_uninit;
+               goto vop_remove;
        }
        mic_create_card_debug_dir(mdrv);
 done:
        return rc;
-device_uninit:
-       mic_devices_uninit(mdrv);
+vop_remove:
+       vop_unregister_device(mdrv->vpdev);
 dma_free:
        mic_free_dma_chans(mdrv);
 irq_uninit:
@@ -348,7 +421,7 @@ void mic_driver_uninit(struct mic_driver *mdrv)
 {
        mic_delete_card_debug_dir(mdrv);
        scif_unregister_device(mdrv->scdev);
-       mic_devices_uninit(mdrv);
+       vop_unregister_device(mdrv->vpdev);
        mic_free_dma_chans(mdrv);
        mic_uninit_irq();
        mic_dp_uninit();
index 1dbf83c412893503eacf957d0dfa771627b4724e..333dbed972f61dc9414ead268fdc00b7466b49e4 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/interrupt.h>
 #include <linux/mic_bus.h>
 #include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
 
 /**
  * struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -76,6 +77,7 @@ struct mic_device {
  * @dma_ch - Array of DMA channels
  * @num_dma_ch - Number of DMA channels available
  * @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
  */
 struct mic_driver {
        char name[20];
@@ -90,6 +92,7 @@ struct mic_driver {
        struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
        int num_dma_ch;
        struct scif_hw_dev *scdev;
+       struct vop_device *vpdev;
 };
 
 /**
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
deleted file mode 100644 (file)
index f6ed57d..0000000
+++ /dev/null
@@ -1,634 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Adapted from:
- *
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * Intel MIC Card driver.
- *
- */
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/virtio_config.h>
-
-#include "../common/mic_dev.h"
-#include "mic_virtio.h"
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-#define MIC_MAX_VRINGS                4
-struct mic_vdev {
-       struct virtio_device vdev;
-       struct mic_device_desc __iomem *desc;
-       struct mic_device_ctrl __iomem *dc;
-       struct mic_device *mdev;
-       void __iomem *vr[MIC_MAX_VRINGS];
-       int used_size[MIC_MAX_VRINGS];
-       struct completion reset_done;
-       struct mic_irq *virtio_cookie;
-       int c2h_vdev_db;
-};
-
-static struct mic_irq *virtio_config_cookie;
-#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev)
-
-/* Helper API to obtain the parent of the virtio device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
-       return mvdev->vdev.dev.parent;
-}
-
-/* This gets the device's feature bits. */
-static u64 mic_get_features(struct virtio_device *vdev)
-{
-       unsigned int i, bits;
-       u32 features = 0;
-       struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-       u8 __iomem *in_features = mic_vq_features(desc);
-       int feature_len = ioread8(&desc->feature_len);
-
-       bits = min_t(unsigned, feature_len, sizeof(features)) * 8;
-       for (i = 0; i < bits; i++)
-               if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
-                       features |= BIT(i);
-
-       return features;
-}
-
-static int mic_finalize_features(struct virtio_device *vdev)
-{
-       unsigned int i, bits;
-       struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-       u8 feature_len = ioread8(&desc->feature_len);
-       /* Second half of bitmap is features we accept. */
-       u8 __iomem *out_features =
-               mic_vq_features(desc) + feature_len;
-
-       /* Give virtio_ring a chance to accept features. */
-       vring_transport_features(vdev);
-
-       /* Make sure we don't have any features > 32 bits! */
-       BUG_ON((u32)vdev->features != vdev->features);
-
-       memset_io(out_features, 0, feature_len);
-       bits = min_t(unsigned, feature_len,
-               sizeof(vdev->features)) * 8;
-       for (i = 0; i < bits; i++) {
-               if (__virtio_test_bit(vdev, i))
-                       iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
-                                &out_features[i / 8]);
-       }
-
-       return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void mic_get(struct virtio_device *vdev, unsigned int offset,
-                  void *buf, unsigned len)
-{
-       struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
-       if (offset + len > ioread8(&desc->config_len))
-               return;
-       memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len);
-}
-
-static void mic_set(struct virtio_device *vdev, unsigned int offset,
-                  const void *buf, unsigned len)
-{
-       struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
-       if (offset + len > ioread8(&desc->config_len))
-               return;
-       memcpy_toio(mic_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status
- * field of the device descriptor. set_status also interrupts the host
- * to tell about status changes.
- */
-static u8 mic_get_status(struct virtio_device *vdev)
-{
-       return ioread8(&to_micvdev(vdev)->desc->status);
-}
-
-static void mic_set_status(struct virtio_device *vdev, u8 status)
-{
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-       if (!status)
-               return;
-       iowrite8(status, &mvdev->desc->status);
-       mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-}
-
-/* Inform host on a virtio device reset and wait for ack from host */
-static void mic_reset_inform_host(struct virtio_device *vdev)
-{
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-       struct mic_device_ctrl __iomem *dc = mvdev->dc;
-       int retry;
-
-       iowrite8(0, &dc->host_ack);
-       iowrite8(1, &dc->vdev_reset);
-       mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-
-       /* Wait till host completes all card accesses and acks the reset */
-       for (retry = 100; retry--;) {
-               if (ioread8(&dc->host_ack))
-                       break;
-               msleep(100);
-       };
-
-       dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
-
-       /* Reset status to 0 in case we timed out */
-       iowrite8(0, &mvdev->desc->status);
-}
-
-static void mic_reset(struct virtio_device *vdev)
-{
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-
-       dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n",
-               __func__, vdev->id.device);
-
-       mic_reset_inform_host(vdev);
-       complete_all(&mvdev->reset_done);
-}
-
-/*
- * The virtio_ring code calls this API when it wants to notify the Host.
- */
-static bool mic_notify(struct virtqueue *vq)
-{
-       struct mic_vdev *mvdev = vq->priv;
-
-       mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-       return true;
-}
-
-static void mic_del_vq(struct virtqueue *vq, int n)
-{
-       struct mic_vdev *mvdev = to_micvdev(vq->vdev);
-       struct vring *vr = (struct vring *)(vq + 1);
-
-       free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n]));
-       vring_del_virtqueue(vq);
-       mic_card_unmap(mvdev->mdev, mvdev->vr[n]);
-       mvdev->vr[n] = NULL;
-}
-
-static void mic_del_vqs(struct virtio_device *vdev)
-{
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-       struct virtqueue *vq, *n;
-       int idx = 0;
-
-       dev_dbg(mic_dev(mvdev), "%s\n", __func__);
-
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
-               mic_del_vq(vq, idx++);
-}
-
-/*
- * This routine will assign vring's allocated in host/io memory. Code in
- * virtio_ring.c however continues to access this io memory as if it were local
- * memory without io accessors.
- */
-static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
-                                    unsigned index,
-                                    void (*callback)(struct virtqueue *vq),
-                                    const char *name)
-{
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-       struct mic_vqconfig __iomem *vqconfig;
-       struct mic_vqconfig config;
-       struct virtqueue *vq;
-       void __iomem *va;
-       struct _mic_vring_info __iomem *info;
-       void *used;
-       int vr_size, _vr_size, err, magic;
-       struct vring *vr;
-       u8 type = ioread8(&mvdev->desc->type);
-
-       if (index >= ioread8(&mvdev->desc->num_vq))
-               return ERR_PTR(-ENOENT);
-
-       if (!name)
-               return ERR_PTR(-ENOENT);
-
-       /* First assign the vring's allocated in host memory */
-       vqconfig = mic_vq_config(mvdev->desc) + index;
-       memcpy_fromio(&config, vqconfig, sizeof(config));
-       _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
-       vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
-       va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
-       if (!va)
-               return ERR_PTR(-ENOMEM);
-       mvdev->vr[index] = va;
-       memset_io(va, 0x0, _vr_size);
-       vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
-                                MIC_VIRTIO_RING_ALIGN, vdev, false,
-                                (void __force *)va, mic_notify, callback,
-                                name);
-       if (!vq) {
-               err = -ENOMEM;
-               goto unmap;
-       }
-       info = va + _vr_size;
-       magic = ioread32(&info->magic);
-
-       if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
-               err = -EIO;
-               goto unmap;
-       }
-
-       /* Allocate and reassign used ring now */
-       mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
-                                            sizeof(struct vring_used_elem) *
-                                            le16_to_cpu(config.num));
-       used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                       get_order(mvdev->used_size[index]));
-       if (!used) {
-               err = -ENOMEM;
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, err);
-               goto del_vq;
-       }
-       iowrite64(virt_to_phys(used), &vqconfig->used_address);
-
-       /*
-        * To reassign the used ring here we are directly accessing
-        * struct vring_virtqueue which is a private data structure
-        * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
-        * vring_new_virtqueue() would ensure that
-        *  (&vq->vring == (struct vring *) (&vq->vq + 1));
-        */
-       vr = (struct vring *)(vq + 1);
-       vr->used = used;
-
-       vq->priv = mvdev;
-       return vq;
-del_vq:
-       vring_del_virtqueue(vq);
-unmap:
-       mic_card_unmap(mvdev->mdev, mvdev->vr[index]);
-       return ERR_PTR(err);
-}
-
-static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
-                       struct virtqueue *vqs[],
-                       vq_callback_t *callbacks[],
-                       const char * const names[])
-{
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-       struct mic_device_ctrl __iomem *dc = mvdev->dc;
-       int i, err, retry;
-
-       /* We must have this many virtqueues. */
-       if (nvqs > ioread8(&mvdev->desc->num_vq))
-               return -ENOENT;
-
-       for (i = 0; i < nvqs; ++i) {
-               dev_dbg(mic_dev(mvdev), "%s: %d: %s\n",
-                       __func__, i, names[i]);
-               vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]);
-               if (IS_ERR(vqs[i])) {
-                       err = PTR_ERR(vqs[i]);
-                       goto error;
-               }
-       }
-
-       iowrite8(1, &dc->used_address_updated);
-       /*
-        * Send an interrupt to the host to inform it that used
-        * rings have been re-assigned.
-        */
-       mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-       for (retry = 100; retry--;) {
-               if (!ioread8(&dc->used_address_updated))
-                       break;
-               msleep(100);
-       };
-
-       dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
-       if (!retry) {
-               err = -ENODEV;
-               goto error;
-       }
-
-       return 0;
-error:
-       mic_del_vqs(vdev);
-       return err;
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static struct virtio_config_ops mic_vq_config_ops = {
-       .get_features = mic_get_features,
-       .finalize_features = mic_finalize_features,
-       .get = mic_get,
-       .set = mic_set,
-       .get_status = mic_get_status,
-       .set_status = mic_set_status,
-       .reset = mic_reset,
-       .find_vqs = mic_find_vqs,
-       .del_vqs = mic_del_vqs,
-};
-
-static irqreturn_t
-mic_virtio_intr_handler(int irq, void *data)
-{
-       struct mic_vdev *mvdev = data;
-       struct virtqueue *vq;
-
-       mic_ack_interrupt(mvdev->mdev);
-       list_for_each_entry(vq, &mvdev->vdev.vqs, list)
-               vring_interrupt(0, vq);
-
-       return IRQ_HANDLED;
-}
-
-static void mic_virtio_release_dev(struct device *_d)
-{
-       /*
-        * No need for a release method similar to virtio PCI.
-        * Provide an empty one to avoid getting a warning from core.
-        */
-}
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static int mic_add_device(struct mic_device_desc __iomem *d,
-       unsigned int offset, struct mic_driver *mdrv)
-{
-       struct mic_vdev *mvdev;
-       int ret;
-       int virtio_db;
-       u8 type = ioread8(&d->type);
-
-       mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
-       if (!mvdev) {
-               dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n",
-                       offset, type);
-               return -ENOMEM;
-       }
-
-       mvdev->mdev = &mdrv->mdev;
-       mvdev->vdev.dev.parent = mdrv->dev;
-       mvdev->vdev.dev.release = mic_virtio_release_dev;
-       mvdev->vdev.id.device = type;
-       mvdev->vdev.config = &mic_vq_config_ops;
-       mvdev->desc = d;
-       mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d);
-       init_completion(&mvdev->reset_done);
-
-       virtio_db = mic_next_card_db();
-       mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
-                       NULL, "virtio intr", mvdev, virtio_db);
-       if (IS_ERR(mvdev->virtio_cookie)) {
-               ret = PTR_ERR(mvdev->virtio_cookie);
-               goto kfree;
-       }
-       iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db);
-       mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db);
-
-       ret = register_virtio_device(&mvdev->vdev);
-       if (ret) {
-               dev_err(mic_dev(mvdev),
-                       "Failed to register mic device %u type %u\n",
-                       offset, type);
-               goto free_irq;
-       }
-       iowrite64((u64)mvdev, &mvdev->dc->vdev);
-       dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n",
-               __func__, offset, type, mvdev);
-
-       return 0;
-
-free_irq:
-       mic_free_card_irq(mvdev->virtio_cookie, mvdev);
-kfree:
-       kfree(mvdev);
-       return ret;
-}
-
-/*
- * match for a mic device with a specific desc pointer
- */
-static int mic_match_desc(struct device *dev, void *data)
-{
-       struct virtio_device *vdev = dev_to_virtio(dev);
-       struct mic_vdev *mvdev = to_micvdev(vdev);
-
-       return mvdev->desc == (void __iomem *)data;
-}
-
-static void mic_handle_config_change(struct mic_device_desc __iomem *d,
-       unsigned int offset, struct mic_driver *mdrv)
-{
-       struct mic_device_ctrl __iomem *dc
-               = (void __iomem *)d + mic_aligned_desc_size(d);
-       struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
-
-       if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
-               return;
-
-       dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__);
-       virtio_config_changed(&mvdev->vdev);
-       iowrite8(1, &dc->guest_ack);
-}
-
-/*
- * removes a virtio device if a hot remove event has been
- * requested by the host.
- */
-static int mic_remove_device(struct mic_device_desc __iomem *d,
-       unsigned int offset, struct mic_driver *mdrv)
-{
-       struct mic_device_ctrl __iomem *dc
-               = (void __iomem *)d + mic_aligned_desc_size(d);
-       struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
-       u8 status;
-       int ret = -1;
-
-       if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
-               dev_dbg(mdrv->dev,
-                       "%s %d config_change %d type %d mvdev %p\n",
-                       __func__, __LINE__,
-                       ioread8(&dc->config_change), ioread8(&d->type), mvdev);
-
-               status = ioread8(&d->status);
-               reinit_completion(&mvdev->reset_done);
-               unregister_virtio_device(&mvdev->vdev);
-               mic_free_card_irq(mvdev->virtio_cookie, mvdev);
-               if (status & VIRTIO_CONFIG_S_DRIVER_OK)
-                       wait_for_completion(&mvdev->reset_done);
-               kfree(mvdev);
-               iowrite8(1, &dc->guest_ack);
-               dev_dbg(mdrv->dev, "%s %d guest_ack %d\n",
-                       __func__, __LINE__, ioread8(&dc->guest_ack));
-               ret = 0;
-       }
-
-       return ret;
-}
-
-#define REMOVE_DEVICES true
-
-static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
-{
-       s8 type;
-       unsigned int i;
-       struct mic_device_desc __iomem *d;
-       struct mic_device_ctrl __iomem *dc;
-       struct device *dev;
-       int ret;
-
-       for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
-               i += mic_total_desc_size(d)) {
-               d = mdrv->dp + i;
-               dc = (void __iomem *)d + mic_aligned_desc_size(d);
-               /*
-                * This read barrier is paired with the corresponding write
-                * barrier on the host which is inserted before adding or
-                * removing a virtio device descriptor, by updating the type.
-                */
-               rmb();
-               type = ioread8(&d->type);
-
-               /* end of list */
-               if (type == 0)
-                       break;
-
-               if (type == -1)
-                       continue;
-
-               /* device already exists */
-               dev = device_find_child(mdrv->dev, (void __force *)d,
-                                       mic_match_desc);
-               if (dev) {
-                       if (remove)
-                               iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
-                                        &dc->config_change);
-                       put_device(dev);
-                       mic_handle_config_change(d, i, mdrv);
-                       ret = mic_remove_device(d, i, mdrv);
-                       if (!ret && !remove)
-                               iowrite8(-1, &d->type);
-                       if (remove) {
-                               iowrite8(0, &dc->config_change);
-                               iowrite8(0, &dc->guest_ack);
-                       }
-                       continue;
-               }
-
-               /* new device */
-               dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n",
-                       __func__, __LINE__, d);
-               if (!remove)
-                       mic_add_device(d, i, mdrv);
-       }
-}
-
-/*
- * mic_hotplug_device tries to find changes in the device page.
- */
-static void mic_hotplug_devices(struct work_struct *work)
-{
-       struct mic_driver *mdrv = container_of(work,
-               struct mic_driver, hotplug_work);
-
-       mic_scan_devices(mdrv, !REMOVE_DEVICES);
-}
-
-/*
- * Interrupt handler for hot plug/config changes etc.
- */
-static irqreturn_t
-mic_extint_handler(int irq, void *data)
-{
-       struct mic_driver *mdrv = (struct mic_driver *)data;
-
-       dev_dbg(mdrv->dev, "%s %d hotplug work\n",
-               __func__, __LINE__);
-       mic_ack_interrupt(&mdrv->mdev);
-       schedule_work(&mdrv->hotplug_work);
-       return IRQ_HANDLED;
-}
-
-/*
- * Init function for virtio
- */
-int mic_devices_init(struct mic_driver *mdrv)
-{
-       int rc;
-       struct mic_bootparam __iomem *bootparam;
-       int config_db;
-
-       INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices);
-       mic_scan_devices(mdrv, !REMOVE_DEVICES);
-
-       config_db = mic_next_card_db();
-       virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL,
-                                                   "virtio_config_intr", mdrv,
-                                                   config_db);
-       if (IS_ERR(virtio_config_cookie)) {
-               rc = PTR_ERR(virtio_config_cookie);
-               goto exit;
-       }
-
-       bootparam = mdrv->dp;
-       iowrite8(config_db, &bootparam->h2c_config_db);
-       return 0;
-exit:
-       return rc;
-}
-
-/*
- * Uninit function for virtio
- */
-void mic_devices_uninit(struct mic_driver *mdrv)
-{
-       struct mic_bootparam __iomem *bootparam = mdrv->dp;
-       iowrite8(-1, &bootparam->h2c_config_db);
-       mic_free_card_irq(virtio_config_cookie, mdrv);
-       flush_work(&mdrv->hotplug_work);
-       mic_scan_devices(mdrv, REMOVE_DEVICES);
-}
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
deleted file mode 100644 (file)
index d0407ba..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- *
- */
-#ifndef __MIC_CARD_VIRTIO_H
-#define __MIC_CARD_VIRTIO_H
-
-#include <linux/mic_common.h>
-#include "mic_device.h"
-
-/*
- * 64 bit I/O access
- */
-#ifndef ioread64
-#define ioread64 readq
-#endif
-#ifndef iowrite64
-#define iowrite64 writeq
-#endif
-
-static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
-{
-       return sizeof(*desc)
-               + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
-               + ioread8(&desc->feature_len) * 2
-               + ioread8(&desc->config_len);
-}
-
-static inline struct mic_vqconfig __iomem *
-mic_vq_config(struct mic_device_desc __iomem *desc)
-{
-       return (struct mic_vqconfig __iomem *)(desc + 1);
-}
-
-static inline __u8 __iomem *
-mic_vq_features(struct mic_device_desc __iomem *desc)
-{
-       return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq));
-}
-
-static inline __u8 __iomem *
-mic_vq_configspace(struct mic_device_desc __iomem *desc)
-{
-       return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2;
-}
-static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
-{
-       return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-
-int mic_devices_init(struct mic_driver *mdrv);
-void mic_devices_uninit(struct mic_driver *mdrv);
-
-#endif
index b2958ce2368c7b3fc1609bf891aa99e0701215aa..b9f0710ffa6b0f80f256994c968d783242723243 100644 (file)
@@ -326,6 +326,7 @@ static int __init mic_init(void)
                goto done;
        }
 
+       request_module("mic_x100_dma");
        mic_init_card_debugfs();
        ret = platform_device_register(&mic_platform_dev);
        if (ret) {
index 4b4b356c797d8a5c5b1aa115b9a3a01eec1c920c..7005cb1e01d21080539f4d4ece33cf0beaa4278e 100644 (file)
@@ -153,8 +153,10 @@ void cosm_stop(struct cosm_device *cdev, bool force)
                 * stop(..) calls device_unregister and will crash the system if
                 * called multiple times.
                 */
-               bool call_hw_ops = cdev->state != MIC_RESET_FAILED &&
-                                       cdev->state != MIC_READY;
+               u8 state = cdev->state == MIC_RESETTING ?
+                                       cdev->prev_state : cdev->state;
+               bool call_hw_ops = state != MIC_RESET_FAILED &&
+                                       state != MIC_READY;
 
                if (cdev->state != MIC_RESETTING)
                        cosm_set_state(cdev, MIC_RESETTING);
@@ -195,8 +197,11 @@ int cosm_reset(struct cosm_device *cdev)
 
        mutex_lock(&cdev->cosm_mutex);
        if (cdev->state != MIC_READY) {
-               cosm_set_state(cdev, MIC_RESETTING);
-               schedule_work(&cdev->reset_trigger_work);
+               if (cdev->state != MIC_RESETTING) {
+                       cdev->prev_state = cdev->state;
+                       cosm_set_state(cdev, MIC_RESETTING);
+                       schedule_work(&cdev->reset_trigger_work);
+               }
        } else {
                dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
                rc = -EINVAL;
index 004d3db0f9909b39f33e5ed3b376b9be873d14d9..f3b502333deda3718e8da3a31fae774772fa2541 100644 (file)
@@ -9,5 +9,3 @@ mic_host-objs += mic_smpt.o
 mic_host-objs += mic_intr.o
 mic_host-objs += mic_boot.o
 mic_host-objs += mic_debugfs.o
-mic_host-objs += mic_fops.o
-mic_host-objs += mic_virtio.o
index 7845564dff6401fdea46a8296271289aeabaf0ed..8c91c9950b545d98069d7a02a6c7c25c15333c1a 100644 (file)
 #include <linux/mic_common.h>
 #include <linux/mic_bus.h>
 #include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 #include "mic_smpt.h"
-#include "mic_virtio.h"
+
+static inline struct mic_device *vpdev_to_mdev(struct device *dev)
+{
+       return dev_get_drvdata(dev->parent);
+}
+
+static dma_addr_t
+_mic_dma_map_page(struct device *dev, struct page *page,
+                 unsigned long offset, size_t size,
+                 enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       void *va = phys_to_virt(page_to_phys(page)) + offset;
+       struct mic_device *mdev = vpdev_to_mdev(dev);
+
+       return mic_map_single(mdev, va, size);
+}
+
+static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+                               size_t size, enum dma_data_direction dir,
+                               struct dma_attrs *attrs)
+{
+       struct mic_device *mdev = vpdev_to_mdev(dev);
+
+       mic_unmap_single(mdev, dma_addr, size);
+}
+
+static const struct dma_map_ops _mic_dma_ops = {
+       .map_page = _mic_dma_map_page,
+       .unmap_page = _mic_dma_unmap_page,
+};
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+                 irqreturn_t (*func)(int irq, void *data),
+                 const char *name, void *data, int intr_src)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       return mic_request_threaded_irq(mdev, func, NULL, name, data,
+                                       intr_src, MIC_INTR_DB);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+                          struct mic_irq *cookie, void *data)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       return mic_free_irq(mdev, cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       mdev->ops->intr_workarounds(mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       return mic_next_db(mdev);
+}
+
+static void *__mic_get_dp(struct vop_device *vpdev)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       return mdev->dp;
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+       return NULL;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       mdev->ops->send_intr(mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+                                  dma_addr_t pa, size_t len)
+{
+       struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+       return mdev->aper.va + pa;
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+       /* nothing to do */
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+       .request_irq = __mic_request_irq,
+       .free_irq = __mic_free_irq,
+       .ack_interrupt = __mic_ack_interrupt,
+       .next_db = __mic_next_db,
+       .get_dp = __mic_get_dp,
+       .get_remote_dp = __mic_get_remote_dp,
+       .send_intr = __mic_send_intr,
+       .ioremap = __mic_ioremap,
+       .iounmap = __mic_iounmap,
+};
 
 static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
 {
@@ -315,7 +422,6 @@ static int mic_request_dma_chans(struct mic_device *mdev)
        dma_cap_mask_t mask;
        struct dma_chan *chan;
 
-       request_module("mic_x100_dma");
        dma_cap_zero(mask);
        dma_cap_set(DMA_MEMCPY, mask);
 
@@ -387,9 +493,18 @@ static int _mic_start(struct cosm_device *cdev, int id)
                goto dma_free;
        }
 
+       mdev->vpdev = vop_register_device(&mdev->pdev->dev,
+                                         VOP_DEV_TRNSP, &_mic_dma_ops,
+                                         &vop_hw_ops, id + 1, &mdev->aper,
+                                         mdev->dma_ch[0]);
+       if (IS_ERR(mdev->vpdev)) {
+               rc = PTR_ERR(mdev->vpdev);
+               goto scif_remove;
+       }
+
        rc = mdev->ops->load_mic_fw(mdev, NULL);
        if (rc)
-               goto scif_remove;
+               goto vop_remove;
        mic_smpt_restore(mdev);
        mic_intr_restore(mdev);
        mdev->intr_ops->enable_interrupts(mdev);
@@ -397,6 +512,8 @@ static int _mic_start(struct cosm_device *cdev, int id)
        mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
        mdev->ops->send_firmware_intr(mdev);
        goto unlock_ret;
+vop_remove:
+       vop_unregister_device(mdev->vpdev);
 scif_remove:
        scif_unregister_device(mdev->scdev);
 dma_free:
@@ -423,7 +540,7 @@ static void _mic_stop(struct cosm_device *cdev, bool force)
         * will be the first to be registered and the last to be
         * unregistered.
         */
-       mic_virtio_reset_devices(mdev);
+       vop_unregister_device(mdev->vpdev);
        scif_unregister_device(mdev->scdev);
        mic_free_dma_chans(mdev);
        mbus_unregister_device(mdev->dma_mbdev);
index 10581600777ab38da39f209d352e2e43e5d4fa97..0a9daba8bb5de8ee3809c6da1a6a6023bf78a853 100644 (file)
@@ -26,7 +26,6 @@
 #include "../common/mic_dev.h"
 #include "mic_device.h"
 #include "mic_smpt.h"
-#include "mic_virtio.h"
 
 /* Debugfs parent dir */
 static struct dentry *mic_dbg;
@@ -100,190 +99,6 @@ static const struct file_operations post_code_ops = {
        .release = mic_post_code_debug_release
 };
 
-static int mic_dp_show(struct seq_file *s, void *pos)
-{
-       struct mic_device *mdev = s->private;
-       struct mic_device_desc *d;
-       struct mic_device_ctrl *dc;
-       struct mic_vqconfig *vqconfig;
-       __u32 *features;
-       __u8 *config;
-       struct mic_bootparam *bootparam = mdev->dp;
-       int i, j;
-
-       seq_printf(s, "Bootparam: magic 0x%x\n",
-                  bootparam->magic);
-       seq_printf(s, "Bootparam: h2c_config_db %d\n",
-                  bootparam->h2c_config_db);
-       seq_printf(s, "Bootparam: node_id %d\n",
-                  bootparam->node_id);
-       seq_printf(s, "Bootparam: c2h_scif_db %d\n",
-                  bootparam->c2h_scif_db);
-       seq_printf(s, "Bootparam: h2c_scif_db %d\n",
-                  bootparam->h2c_scif_db);
-       seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
-                  bootparam->scif_host_dma_addr);
-       seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
-                  bootparam->scif_card_dma_addr);
-
-
-       for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
-            i += mic_total_desc_size(d)) {
-               d = mdev->dp + i;
-               dc = (void *)d + mic_aligned_desc_size(d);
-
-               /* end of list */
-               if (d->type == 0)
-                       break;
-
-               if (d->type == -1)
-                       continue;
-
-               seq_printf(s, "Type %d ", d->type);
-               seq_printf(s, "Num VQ %d ", d->num_vq);
-               seq_printf(s, "Feature Len %d\n", d->feature_len);
-               seq_printf(s, "Config Len %d ", d->config_len);
-               seq_printf(s, "Shutdown Status %d\n", d->status);
-
-               for (j = 0; j < d->num_vq; j++) {
-                       vqconfig = mic_vq_config(d) + j;
-                       seq_printf(s, "vqconfig[%d]: ", j);
-                       seq_printf(s, "address 0x%llx ", vqconfig->address);
-                       seq_printf(s, "num %d ", vqconfig->num);
-                       seq_printf(s, "used address 0x%llx\n",
-                                  vqconfig->used_address);
-               }
-
-               features = (__u32 *)mic_vq_features(d);
-               seq_printf(s, "Features: Host 0x%x ", features[0]);
-               seq_printf(s, "Guest 0x%x\n", features[1]);
-
-               config = mic_vq_configspace(d);
-               for (j = 0; j < d->config_len; j++)
-                       seq_printf(s, "config[%d]=%d\n", j, config[j]);
-
-               seq_puts(s, "Device control:\n");
-               seq_printf(s, "Config Change %d ", dc->config_change);
-               seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
-               seq_printf(s, "Guest Ack %d ", dc->guest_ack);
-               seq_printf(s, "Host ack %d\n", dc->host_ack);
-               seq_printf(s, "Used address updated %d ",
-                          dc->used_address_updated);
-               seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
-               seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
-               seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
-       }
-
-       return 0;
-}
-
-static int mic_dp_debug_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mic_dp_show, inode->i_private);
-}
-
-static int mic_dp_debug_release(struct inode *inode, struct file *file)
-{
-       return single_release(inode, file);
-}
-
-static const struct file_operations dp_ops = {
-       .owner   = THIS_MODULE,
-       .open    = mic_dp_debug_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = mic_dp_debug_release
-};
-
-static int mic_vdev_info_show(struct seq_file *s, void *unused)
-{
-       struct mic_device *mdev = s->private;
-       struct list_head *pos, *tmp;
-       struct mic_vdev *mvdev;
-       int i, j;
-
-       mutex_lock(&mdev->mic_mutex);
-       list_for_each_safe(pos, tmp, &mdev->vdev_list) {
-               mvdev = list_entry(pos, struct mic_vdev, list);
-               seq_printf(s, "VDEV type %d state %s in %ld out %ld\n",
-                          mvdev->virtio_id,
-                          mic_vdevup(mvdev) ? "UP" : "DOWN",
-                          mvdev->in_bytes,
-                          mvdev->out_bytes);
-               for (i = 0; i < MIC_MAX_VRINGS; i++) {
-                       struct vring_desc *desc;
-                       struct vring_avail *avail;
-                       struct vring_used *used;
-                       struct mic_vringh *mvr = &mvdev->mvr[i];
-                       struct vringh *vrh = &mvr->vrh;
-                       int num = vrh->vring.num;
-                       if (!num)
-                               continue;
-                       desc = vrh->vring.desc;
-                       seq_printf(s, "vring i %d avail_idx %d",
-                                  i, mvr->vring.info->avail_idx & (num - 1));
-                       seq_printf(s, " vring i %d avail_idx %d\n",
-                                  i, mvr->vring.info->avail_idx);
-                       seq_printf(s, "vrh i %d weak_barriers %d",
-                                  i, vrh->weak_barriers);
-                       seq_printf(s, " last_avail_idx %d last_used_idx %d",
-                                  vrh->last_avail_idx, vrh->last_used_idx);
-                       seq_printf(s, " completed %d\n", vrh->completed);
-                       for (j = 0; j < num; j++) {
-                               seq_printf(s, "desc[%d] addr 0x%llx len %d",
-                                          j, desc->addr, desc->len);
-                               seq_printf(s, " flags 0x%x next %d\n",
-                                          desc->flags, desc->next);
-                               desc++;
-                       }
-                       avail = vrh->vring.avail;
-                       seq_printf(s, "avail flags 0x%x idx %d\n",
-                                  vringh16_to_cpu(vrh, avail->flags),
-                                  vringh16_to_cpu(vrh, avail->idx) & (num - 1));
-                       seq_printf(s, "avail flags 0x%x idx %d\n",
-                                  vringh16_to_cpu(vrh, avail->flags),
-                                  vringh16_to_cpu(vrh, avail->idx));
-                       for (j = 0; j < num; j++)
-                               seq_printf(s, "avail ring[%d] %d\n",
-                                          j, avail->ring[j]);
-                       used = vrh->vring.used;
-                       seq_printf(s, "used flags 0x%x idx %d\n",
-                                  vringh16_to_cpu(vrh, used->flags),
-                                  vringh16_to_cpu(vrh, used->idx) & (num - 1));
-                       seq_printf(s, "used flags 0x%x idx %d\n",
-                                  vringh16_to_cpu(vrh, used->flags),
-                                  vringh16_to_cpu(vrh, used->idx));
-                       for (j = 0; j < num; j++)
-                               seq_printf(s, "used ring[%d] id %d len %d\n",
-                                          j, vringh32_to_cpu(vrh,
-                                                             used->ring[j].id),
-                                          vringh32_to_cpu(vrh,
-                                                          used->ring[j].len));
-               }
-       }
-       mutex_unlock(&mdev->mic_mutex);
-
-       return 0;
-}
-
-static int mic_vdev_info_debug_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, mic_vdev_info_show, inode->i_private);
-}
-
-static int mic_vdev_info_debug_release(struct inode *inode, struct file *file)
-{
-       return single_release(inode, file);
-}
-
-static const struct file_operations vdev_info_ops = {
-       .owner   = THIS_MODULE,
-       .open    = mic_vdev_info_debug_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = mic_vdev_info_debug_release
-};
-
 static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
 {
        struct mic_device *mdev  = s->private;
@@ -367,11 +182,6 @@ void mic_create_debug_dir(struct mic_device *mdev)
        debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
                            &post_code_ops);
 
-       debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops);
-
-       debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev,
-                           &vdev_info_ops);
-
        debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
                            &msi_irq_info_ops);
 }
index 461184a12fbbfbbcdb73a1efb5e37b1247521f70..52b12b22f4aea2477f516475c33990f774a5bb91 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/miscdevice.h>
 #include <linux/mic_bus.h>
 #include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
 #include "../bus/cosm_bus.h"
 #include "mic_intr.h"
 
@@ -64,13 +65,11 @@ extern struct cosm_hw_ops cosm_hw_ops;
  * @bootaddr: MIC boot address.
  * @dp: virtio device page
  * @dp_dma_addr: virtio device page DMA address.
- * @name: name for the misc char device
- * @miscdev: registered misc char device
- * @vdev_list: list of virtio devices.
  * @dma_mbdev: MIC BUS DMA device.
  * @dma_ch - Array of DMA channels
  * @num_dma_ch - Number of DMA channels available
  * @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
  * @cosm_dev: COSM device
  */
 struct mic_device {
@@ -91,13 +90,11 @@ struct mic_device {
        u32 bootaddr;
        void *dp;
        dma_addr_t dp_dma_addr;
-       char name[16];
-       struct miscdevice miscdev;
-       struct list_head vdev_list;
        struct mbus_device *dma_mbdev;
        struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
        int num_dma_ch;
        struct scif_hw_dev *scdev;
+       struct vop_device *vpdev;
        struct cosm_device *cosm_dev;
 };
 
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c
deleted file mode 100644 (file)
index 8cc1d90..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/poll.h>
-#include <linux/pci.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
-
-int mic_open(struct inode *inode, struct file *f)
-{
-       struct mic_vdev *mvdev;
-       struct mic_device *mdev = container_of(f->private_data,
-               struct mic_device, miscdev);
-
-       mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
-       if (!mvdev)
-               return -ENOMEM;
-
-       init_waitqueue_head(&mvdev->waitq);
-       INIT_LIST_HEAD(&mvdev->list);
-       mvdev->mdev = mdev;
-       mvdev->virtio_id = -1;
-
-       f->private_data = mvdev;
-       return 0;
-}
-
-int mic_release(struct inode *inode, struct file *f)
-{
-       struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-
-       if (-1 != mvdev->virtio_id)
-               mic_virtio_del_device(mvdev);
-       f->private_data = NULL;
-       kfree(mvdev);
-       return 0;
-}
-
-long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
-       struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-       void __user *argp = (void __user *)arg;
-       int ret;
-
-       switch (cmd) {
-       case MIC_VIRTIO_ADD_DEVICE:
-       {
-               ret = mic_virtio_add_device(mvdev, argp);
-               if (ret < 0) {
-                       dev_err(mic_dev(mvdev),
-                               "%s %d errno ret %d\n",
-                               __func__, __LINE__, ret);
-                       return ret;
-               }
-               break;
-       }
-       case MIC_VIRTIO_COPY_DESC:
-       {
-               struct mic_copy_desc copy;
-
-               ret = mic_vdev_inited(mvdev);
-               if (ret)
-                       return ret;
-
-               if (copy_from_user(&copy, argp, sizeof(copy)))
-                       return -EFAULT;
-
-               dev_dbg(mic_dev(mvdev),
-                       "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n",
-                       __func__, __LINE__, copy.iovcnt, copy.vr_idx,
-                       copy.update_used);
-
-               ret = mic_virtio_copy_desc(mvdev, &copy);
-               if (ret < 0) {
-                       dev_err(mic_dev(mvdev),
-                               "%s %d errno ret %d\n",
-                               __func__, __LINE__, ret);
-                       return ret;
-               }
-               if (copy_to_user(
-                       &((struct mic_copy_desc __user *)argp)->out_len,
-                       &copy.out_len, sizeof(copy.out_len))) {
-                       dev_err(mic_dev(mvdev), "%s %d errno ret %d\n",
-                               __func__, __LINE__, -EFAULT);
-                       return -EFAULT;
-               }
-               break;
-       }
-       case MIC_VIRTIO_CONFIG_CHANGE:
-       {
-               ret = mic_vdev_inited(mvdev);
-               if (ret)
-                       return ret;
-
-               ret = mic_virtio_config_change(mvdev, argp);
-               if (ret < 0) {
-                       dev_err(mic_dev(mvdev),
-                               "%s %d errno ret %d\n",
-                               __func__, __LINE__, ret);
-                       return ret;
-               }
-               break;
-       }
-       default:
-               return -ENOIOCTLCMD;
-       };
-       return 0;
-}
-
-/*
- * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
- * not when previously enqueued buffers may be available. This means that
- * in the card->host (TX) path, when userspace is unblocked by poll it
- * must drain all available descriptors or it can stall.
- */
-unsigned int mic_poll(struct file *f, poll_table *wait)
-{
-       struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-       int mask = 0;
-
-       poll_wait(f, &mvdev->waitq, wait);
-
-       if (mic_vdev_inited(mvdev)) {
-               mask = POLLERR;
-       } else if (mvdev->poll_wake) {
-               mvdev->poll_wake = 0;
-               mask = POLLIN | POLLOUT;
-       }
-
-       return mask;
-}
-
-static inline int
-mic_query_offset(struct mic_vdev *mvdev, unsigned long offset,
-                unsigned long *size, unsigned long *pa)
-{
-       struct mic_device *mdev = mvdev->mdev;
-       unsigned long start = MIC_DP_SIZE;
-       int i;
-
-       /*
-        * MMAP interface is as follows:
-        * offset                               region
-        * 0x0                                  virtio device_page
-        * 0x1000                               first vring
-        * 0x1000 + size of 1st vring           second vring
-        * ....
-        */
-       if (!offset) {
-               *pa = virt_to_phys(mdev->dp);
-               *size = MIC_DP_SIZE;
-               return 0;
-       }
-
-       for (i = 0; i < mvdev->dd->num_vq; i++) {
-               struct mic_vringh *mvr = &mvdev->mvr[i];
-               if (offset == start) {
-                       *pa = virt_to_phys(mvr->vring.va);
-                       *size = mvr->vring.len;
-                       return 0;
-               }
-               start += mvr->vring.len;
-       }
-       return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-int
-mic_mmap(struct file *f, struct vm_area_struct *vma)
-{
-       struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-       unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
-       int i, err;
-
-       err = mic_vdev_inited(mvdev);
-       if (err)
-               return err;
-
-       if (vma->vm_flags & VM_WRITE)
-               return -EACCES;
-
-       while (size_rem) {
-               i = mic_query_offset(mvdev, offset, &size, &pa);
-               if (i < 0)
-                       return -EINVAL;
-               err = remap_pfn_range(vma, vma->vm_start + offset,
-                       pa >> PAGE_SHIFT, size, vma->vm_page_prot);
-               if (err)
-                       return err;
-               dev_dbg(mic_dev(mvdev),
-                       "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n",
-                       __func__, __LINE__, mvdev->virtio_id, size, offset,
-                       pa, vma->vm_start + offset);
-               size_rem -= size;
-               offset += size;
-       }
-       return 0;
-}
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h
deleted file mode 100644 (file)
index dc3893d..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef _MIC_FOPS_H_
-#define _MIC_FOPS_H_
-
-int mic_open(struct inode *inode, struct file *filp);
-int mic_release(struct inode *inode, struct file *filp);
-ssize_t mic_read(struct file *filp, char __user *buf,
-                       size_t count, loff_t *pos);
-long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int mic_mmap(struct file *f, struct vm_area_struct *vma);
-unsigned int mic_poll(struct file *f, poll_table *wait);
-
-#endif
index 153894e7ed5b83302cb77be582c7c23ac533141e..035be3e9ceba075baa14d8617b46812d9a2060eb 100644 (file)
@@ -27,8 +27,6 @@
 #include "mic_device.h"
 #include "mic_x100.h"
 #include "mic_smpt.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
 
 static const char mic_driver_name[] = "mic";
 
@@ -57,17 +55,6 @@ MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
 
 /* ID allocator for MIC devices */
 static struct ida g_mic_ida;
-/* Base device node number for MIC devices */
-static dev_t g_mic_devno;
-
-static const struct file_operations mic_fops = {
-       .open = mic_open,
-       .release = mic_release,
-       .unlocked_ioctl = mic_ioctl,
-       .poll = mic_poll,
-       .mmap = mic_mmap,
-       .owner = THIS_MODULE,
-};
 
 /* Initialize the device page */
 static int mic_dp_init(struct mic_device *mdev)
@@ -169,7 +156,6 @@ mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
        mic_ops_init(mdev);
        mutex_init(&mdev->mic_mutex);
        mdev->irq_info.next_avail_src = 0;
-       INIT_LIST_HEAD(&mdev->vdev_list);
 }
 
 /**
@@ -259,30 +245,15 @@ static int mic_probe(struct pci_dev *pdev,
                goto smpt_uninit;
        }
        mic_bootparam_init(mdev);
-
        mic_create_debug_dir(mdev);
 
-       mdev->miscdev.minor = MISC_DYNAMIC_MINOR;
-       snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id);
-       mdev->miscdev.name = mdev->name;
-       mdev->miscdev.fops = &mic_fops;
-       mdev->miscdev.parent = &mdev->pdev->dev;
-       rc = misc_register(&mdev->miscdev);
-       if (rc) {
-               dev_err(&pdev->dev, "misc_register err id %d rc %d\n",
-                       mdev->id, rc);
-               goto cleanup_debug_dir;
-       }
-
        mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
        if (IS_ERR(mdev->cosm_dev)) {
                rc = PTR_ERR(mdev->cosm_dev);
                dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
-               goto misc_dereg;
+               goto cleanup_debug_dir;
        }
        return 0;
-misc_dereg:
-       misc_deregister(&mdev->miscdev);
 cleanup_debug_dir:
        mic_delete_debug_dir(mdev);
        mic_dp_uninit(mdev);
@@ -323,7 +294,6 @@ static void mic_remove(struct pci_dev *pdev)
                return;
 
        cosm_unregister_device(mdev->cosm_dev);
-       misc_deregister(&mdev->miscdev);
        mic_delete_debug_dir(mdev);
        mic_dp_uninit(mdev);
        mic_smpt_uninit(mdev);
@@ -347,26 +317,18 @@ static int __init mic_init(void)
 {
        int ret;
 
-       ret = alloc_chrdev_region(&g_mic_devno, 0,
-                                 MIC_MAX_NUM_DEVS, mic_driver_name);
-       if (ret) {
-               pr_err("alloc_chrdev_region failed ret %d\n", ret);
-               goto error;
-       }
-
+       request_module("mic_x100_dma");
        mic_init_debugfs();
        ida_init(&g_mic_ida);
        ret = pci_register_driver(&mic_driver);
        if (ret) {
                pr_err("pci_register_driver failed ret %d\n", ret);
-               goto cleanup_chrdev;
+               goto cleanup_debugfs;
        }
-       return ret;
-cleanup_chrdev:
+       return 0;
+cleanup_debugfs:
        ida_destroy(&g_mic_ida);
        mic_exit_debugfs();
-       unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
-error:
        return ret;
 }
 
@@ -375,7 +337,6 @@ static void __exit mic_exit(void)
        pci_unregister_driver(&mic_driver);
        ida_destroy(&g_mic_ida);
        mic_exit_debugfs();
-       unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
 }
 
 module_init(mic_init);
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
deleted file mode 100644 (file)
index 58b107a..0000000
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/dmaengine.h>
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-#include "mic_virtio.h"
-
-/*
- * Size of the internal buffer used during DMA's as an intermediate buffer
- * for copy to/from user.
- */
-#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
-
-static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
-                       dma_addr_t src, size_t len)
-{
-       int err = 0;
-       struct dma_async_tx_descriptor *tx;
-       struct dma_chan *mic_ch = mdev->dma_ch[0];
-
-       if (!mic_ch) {
-               err = -EBUSY;
-               goto error;
-       }
-
-       tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
-                                                   DMA_PREP_FENCE);
-       if (!tx) {
-               err = -ENOMEM;
-               goto error;
-       } else {
-               dma_cookie_t cookie = tx->tx_submit(tx);
-
-               err = dma_submit_error(cookie);
-               if (err)
-                       goto error;
-               err = dma_sync_wait(mic_ch, cookie);
-       }
-error:
-       if (err)
-               dev_err(&mdev->pdev->dev, "%s %d err %d\n",
-                       __func__, __LINE__, err);
-       return err;
-}
-
-/*
- * Initiates the copies across the PCIe bus from card memory to a user
- * space buffer. When transfers are done using DMA, source/destination
- * addresses and transfer length must follow the alignment requirements of
- * the MIC DMA engine.
- */
-static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
-                                  size_t len, u64 daddr, size_t dlen,
-                                  int vr_idx)
-{
-       struct mic_device *mdev = mvdev->mdev;
-       void __iomem *dbuf = mdev->aper.va + daddr;
-       struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
-       size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
-       size_t dma_offset;
-       size_t partlen;
-       int err;
-
-       dma_offset = daddr - round_down(daddr, dma_alignment);
-       daddr -= dma_offset;
-       len += dma_offset;
-
-       while (len) {
-               partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
-               err = mic_sync_dma(mdev, mvr->buf_da, daddr,
-                                  ALIGN(partlen, dma_alignment));
-               if (err)
-                       goto err;
-
-               if (copy_to_user(ubuf, mvr->buf + dma_offset,
-                                partlen - dma_offset)) {
-                       err = -EFAULT;
-                       goto err;
-               }
-               daddr += partlen;
-               ubuf += partlen;
-               dbuf += partlen;
-               mvdev->in_bytes_dma += partlen;
-               mvdev->in_bytes += partlen;
-               len -= partlen;
-               dma_offset = 0;
-       }
-       return 0;
-err:
-       dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
-       return err;
-}
-
-/*
- * Initiates copies across the PCIe bus from a user space buffer to card
- * memory. When transfers are done using DMA, source/destination addresses
- * and transfer length must follow the alignment requirements of the MIC
- * DMA engine.
- */
-static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
-                                    size_t len, u64 daddr, size_t dlen,
-                                    int vr_idx)
-{
-       struct mic_device *mdev = mvdev->mdev;
-       void __iomem *dbuf = mdev->aper.va + daddr;
-       struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
-       size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
-       size_t partlen;
-       int err;
-
-       if (daddr & (dma_alignment - 1)) {
-               mvdev->tx_dst_unaligned += len;
-               goto memcpy;
-       } else if (ALIGN(len, dma_alignment) > dlen) {
-               mvdev->tx_len_unaligned += len;
-               goto memcpy;
-       }
-
-       while (len) {
-               partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
-               if (copy_from_user(mvr->buf, ubuf, partlen)) {
-                       err = -EFAULT;
-                       goto err;
-               }
-               err = mic_sync_dma(mdev, daddr, mvr->buf_da,
-                                  ALIGN(partlen, dma_alignment));
-               if (err)
-                       goto err;
-               daddr += partlen;
-               ubuf += partlen;
-               dbuf += partlen;
-               mvdev->out_bytes_dma += partlen;
-               mvdev->out_bytes += partlen;
-               len -= partlen;
-       }
-memcpy:
-       /*
-        * We are copying to IO below and should ideally use something
-        * like copy_from_user_toio(..) if it existed.
-        */
-       if (copy_from_user((void __force *)dbuf, ubuf, len)) {
-               err = -EFAULT;
-               goto err;
-       }
-       mvdev->out_bytes += len;
-       return 0;
-err:
-       dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
-       return err;
-}
-
-#define MIC_VRINGH_READ true
-
-/* The function to call to notify the card about added buffers */
-static void mic_notify(struct vringh *vrh)
-{
-       struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
-       struct mic_vdev *mvdev = mvrh->mvdev;
-       s8 db = mvdev->dc->h2c_vdev_db;
-
-       if (db != -1)
-               mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-}
-
-/* Determine the total number of bytes consumed in a VRINGH KIOV */
-static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
-{
-       int i;
-       u32 total = iov->consumed;
-
-       for (i = 0; i < iov->i; i++)
-               total += iov->iov[i].iov_len;
-       return total;
-}
-
-/*
- * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
- * This API is heavily based on the vringh_iov_xfer(..) implementation
- * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
- * and vringh_iov_push_kern(..) directly is because there is no
- * way to override the VRINGH xfer(..) routines as of v3.10.
- */
-static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
-                       void __user *ubuf, size_t len, bool read, int vr_idx,
-                       size_t *out_len)
-{
-       int ret = 0;
-       size_t partlen, tot_len = 0;
-
-       while (len && iov->i < iov->used) {
-               partlen = min(iov->iov[iov->i].iov_len, len);
-               if (read)
-                       ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
-                                               (u64)iov->iov[iov->i].iov_base,
-                                               iov->iov[iov->i].iov_len,
-                                               vr_idx);
-               else
-                       ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
-                                               (u64)iov->iov[iov->i].iov_base,
-                                               iov->iov[iov->i].iov_len,
-                                               vr_idx);
-               if (ret) {
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       break;
-               }
-               len -= partlen;
-               ubuf += partlen;
-               tot_len += partlen;
-               iov->consumed += partlen;
-               iov->iov[iov->i].iov_len -= partlen;
-               iov->iov[iov->i].iov_base += partlen;
-               if (!iov->iov[iov->i].iov_len) {
-                       /* Fix up old iov element then increment. */
-                       iov->iov[iov->i].iov_len = iov->consumed;
-                       iov->iov[iov->i].iov_base -= iov->consumed;
-
-                       iov->consumed = 0;
-                       iov->i++;
-               }
-       }
-       *out_len = tot_len;
-       return ret;
-}
-
-/*
- * Use the standard VRINGH infrastructure in the kernel to fetch new
- * descriptors, initiate the copies and update the used ring.
- */
-static int _mic_virtio_copy(struct mic_vdev *mvdev,
-       struct mic_copy_desc *copy)
-{
-       int ret = 0;
-       u32 iovcnt = copy->iovcnt;
-       struct iovec iov;
-       struct iovec __user *u_iov = copy->iov;
-       void __user *ubuf = NULL;
-       struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
-       struct vringh_kiov *riov = &mvr->riov;
-       struct vringh_kiov *wiov = &mvr->wiov;
-       struct vringh *vrh = &mvr->vrh;
-       u16 *head = &mvr->head;
-       struct mic_vring *vr = &mvr->vring;
-       size_t len = 0, out_len;
-
-       copy->out_len = 0;
-       /* Fetch a new IOVEC if all previous elements have been processed */
-       if (riov->i == riov->used && wiov->i == wiov->used) {
-               ret = vringh_getdesc_kern(vrh, riov, wiov,
-                               head, GFP_KERNEL);
-               /* Check if there are available descriptors */
-               if (ret <= 0)
-                       return ret;
-       }
-       while (iovcnt) {
-               if (!len) {
-                       /* Copy over a new iovec from user space. */
-                       ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
-                       if (ret) {
-                               ret = -EINVAL;
-                               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                                       __func__, __LINE__, ret);
-                               break;
-                       }
-                       len = iov.iov_len;
-                       ubuf = iov.iov_base;
-               }
-               /* Issue all the read descriptors first */
-               ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
-                                     copy->vr_idx, &out_len);
-               if (ret) {
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       break;
-               }
-               len -= out_len;
-               ubuf += out_len;
-               copy->out_len += out_len;
-               /* Issue the write descriptors next */
-               ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
-                                     copy->vr_idx, &out_len);
-               if (ret) {
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       break;
-               }
-               len -= out_len;
-               ubuf += out_len;
-               copy->out_len += out_len;
-               if (!len) {
-                       /* One user space iovec is now completed */
-                       iovcnt--;
-                       u_iov++;
-               }
-               /* Exit loop if all elements in KIOVs have been processed. */
-               if (riov->i == riov->used && wiov->i == wiov->used)
-                       break;
-       }
-       /*
-        * Update the used ring if a descriptor was available and some data was
-        * copied in/out and the user asked for a used ring update.
-        */
-       if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
-               u32 total = 0;
-
-               /* Determine the total data consumed */
-               total += mic_vringh_iov_consumed(riov);
-               total += mic_vringh_iov_consumed(wiov);
-               vringh_complete_kern(vrh, *head, total);
-               *head = USHRT_MAX;
-               if (vringh_need_notify_kern(vrh) > 0)
-                       vringh_notify(vrh);
-               vringh_kiov_cleanup(riov);
-               vringh_kiov_cleanup(wiov);
-               /* Update avail idx for user space */
-               vr->info->avail_idx = vrh->last_avail_idx;
-       }
-       return ret;
-}
-
-static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
-               struct mic_copy_desc *copy)
-{
-       if (copy->vr_idx >= mvdev->dd->num_vq) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -EINVAL);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/* Copy a specified number of virtio descriptors in a chain */
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
-               struct mic_copy_desc *copy)
-{
-       int err;
-       struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
-
-       err = mic_verify_copy_args(mvdev, copy);
-       if (err)
-               return err;
-
-       mutex_lock(&mvr->vr_mutex);
-       if (!mic_vdevup(mvdev)) {
-               err = -ENODEV;
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, err);
-               goto err;
-       }
-       err = _mic_virtio_copy(mvdev, copy);
-       if (err) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, err);
-       }
-err:
-       mutex_unlock(&mvr->vr_mutex);
-       return err;
-}
-
-static void mic_virtio_init_post(struct mic_vdev *mvdev)
-{
-       struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
-       int i;
-
-       for (i = 0; i < mvdev->dd->num_vq; i++) {
-               if (!le64_to_cpu(vqconfig[i].used_address)) {
-                       dev_warn(mic_dev(mvdev), "used_address zero??\n");
-                       continue;
-               }
-               mvdev->mvr[i].vrh.vring.used =
-                       (void __force *)mvdev->mdev->aper.va +
-                       le64_to_cpu(vqconfig[i].used_address);
-       }
-
-       mvdev->dc->used_address_updated = 0;
-
-       dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
-               __func__, mvdev->virtio_id);
-}
-
-static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
-{
-       int i;
-
-       dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
-               __func__, mvdev->dd->status, mvdev->virtio_id);
-
-       for (i = 0; i < mvdev->dd->num_vq; i++)
-               /*
-                * Avoid lockdep false positive. The + 1 is for the mic
-                * mutex which is held in the reset devices code path.
-                */
-               mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
-       /* 0 status means "reset" */
-       mvdev->dd->status = 0;
-       mvdev->dc->vdev_reset = 0;
-       mvdev->dc->host_ack = 1;
-
-       for (i = 0; i < mvdev->dd->num_vq; i++) {
-               struct vringh *vrh = &mvdev->mvr[i].vrh;
-               mvdev->mvr[i].vring.info->avail_idx = 0;
-               vrh->completed = 0;
-               vrh->last_avail_idx = 0;
-               vrh->last_used_idx = 0;
-       }
-
-       for (i = 0; i < mvdev->dd->num_vq; i++)
-               mutex_unlock(&mvdev->mvr[i].vr_mutex);
-}
-
-void mic_virtio_reset_devices(struct mic_device *mdev)
-{
-       struct list_head *pos, *tmp;
-       struct mic_vdev *mvdev;
-
-       dev_dbg(&mdev->pdev->dev, "%s\n",  __func__);
-
-       list_for_each_safe(pos, tmp, &mdev->vdev_list) {
-               mvdev = list_entry(pos, struct mic_vdev, list);
-               mic_virtio_device_reset(mvdev);
-               mvdev->poll_wake = 1;
-               wake_up(&mvdev->waitq);
-       }
-}
-
-void mic_bh_handler(struct work_struct *work)
-{
-       struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
-                       virtio_bh_work);
-
-       if (mvdev->dc->used_address_updated)
-               mic_virtio_init_post(mvdev);
-
-       if (mvdev->dc->vdev_reset)
-               mic_virtio_device_reset(mvdev);
-
-       mvdev->poll_wake = 1;
-       wake_up(&mvdev->waitq);
-}
-
-static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
-{
-       struct mic_vdev *mvdev = data;
-       struct mic_device *mdev = mvdev->mdev;
-
-       mdev->ops->intr_workarounds(mdev);
-       schedule_work(&mvdev->virtio_bh_work);
-       return IRQ_HANDLED;
-}
-
-int mic_virtio_config_change(struct mic_vdev *mvdev,
-                       void __user *argp)
-{
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
-       int ret = 0, retry, i;
-       struct mic_bootparam *bootparam = mvdev->mdev->dp;
-       s8 db = bootparam->h2c_config_db;
-
-       mutex_lock(&mvdev->mdev->mic_mutex);
-       for (i = 0; i < mvdev->dd->num_vq; i++)
-               mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
-       if (db == -1 || mvdev->dd->type == -1) {
-               ret = -EIO;
-               goto exit;
-       }
-
-       if (copy_from_user(mic_vq_configspace(mvdev->dd),
-                          argp, mvdev->dd->config_len)) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -EFAULT);
-               ret = -EFAULT;
-               goto exit;
-       }
-       mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
-       mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-
-       for (retry = 100; retry--;) {
-               ret = wait_event_timeout(wake,
-                       mvdev->dc->guest_ack, msecs_to_jiffies(100));
-               if (ret)
-                       break;
-       }
-
-       dev_dbg(mic_dev(mvdev),
-               "%s %d retry: %d\n", __func__, __LINE__, retry);
-       mvdev->dc->config_change = 0;
-       mvdev->dc->guest_ack = 0;
-exit:
-       for (i = 0; i < mvdev->dd->num_vq; i++)
-               mutex_unlock(&mvdev->mvr[i].vr_mutex);
-       mutex_unlock(&mvdev->mdev->mic_mutex);
-       return ret;
-}
-
-static int mic_copy_dp_entry(struct mic_vdev *mvdev,
-                                       void __user *argp,
-                                       __u8 *type,
-                                       struct mic_device_desc **devpage)
-{
-       struct mic_device *mdev = mvdev->mdev;
-       struct mic_device_desc dd, *dd_config, *devp;
-       struct mic_vqconfig *vqconfig;
-       int ret = 0, i;
-       bool slot_found = false;
-
-       if (copy_from_user(&dd, argp, sizeof(dd))) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -EFAULT);
-               return -EFAULT;
-       }
-
-       if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
-           dd.num_vq > MIC_MAX_VRINGS) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -EINVAL);
-               return -EINVAL;
-       }
-
-       dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
-       if (dd_config == NULL) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -ENOMEM);
-               return -ENOMEM;
-       }
-       if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
-               ret = -EFAULT;
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, ret);
-               goto exit;
-       }
-
-       vqconfig = mic_vq_config(dd_config);
-       for (i = 0; i < dd.num_vq; i++) {
-               if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
-                       ret =  -EINVAL;
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       goto exit;
-               }
-       }
-
-       /* Find the first free device page entry */
-       for (i = sizeof(struct mic_bootparam);
-               i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
-               i += mic_total_desc_size(devp)) {
-               devp = mdev->dp + i;
-               if (devp->type == 0 || devp->type == -1) {
-                       slot_found = true;
-                       break;
-               }
-       }
-       if (!slot_found) {
-               ret =  -EINVAL;
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, ret);
-               goto exit;
-       }
-       /*
-        * Save off the type before doing the memcpy. Type will be set in the
-        * end after completing all initialization for the new device.
-        */
-       *type = dd_config->type;
-       dd_config->type = 0;
-       memcpy(devp, dd_config, mic_desc_size(dd_config));
-
-       *devpage = devp;
-exit:
-       kfree(dd_config);
-       return ret;
-}
-
-static void mic_init_device_ctrl(struct mic_vdev *mvdev,
-                               struct mic_device_desc *devpage)
-{
-       struct mic_device_ctrl *dc;
-
-       dc = (void *)devpage + mic_aligned_desc_size(devpage);
-
-       dc->config_change = 0;
-       dc->guest_ack = 0;
-       dc->vdev_reset = 0;
-       dc->host_ack = 0;
-       dc->used_address_updated = 0;
-       dc->c2h_vdev_db = -1;
-       dc->h2c_vdev_db = -1;
-       mvdev->dc = dc;
-}
-
-int mic_virtio_add_device(struct mic_vdev *mvdev,
-                       void __user *argp)
-{
-       struct mic_device *mdev = mvdev->mdev;
-       struct mic_device_desc *dd = NULL;
-       struct mic_vqconfig *vqconfig;
-       int vr_size, i, j, ret;
-       u8 type = 0;
-       s8 db;
-       char irqname[10];
-       struct mic_bootparam *bootparam = mdev->dp;
-       u16 num;
-       dma_addr_t vr_addr;
-
-       mutex_lock(&mdev->mic_mutex);
-
-       ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
-       if (ret) {
-               mutex_unlock(&mdev->mic_mutex);
-               return ret;
-       }
-
-       mic_init_device_ctrl(mvdev, dd);
-
-       mvdev->dd = dd;
-       mvdev->virtio_id = type;
-       vqconfig = mic_vq_config(dd);
-       INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
-
-       for (i = 0; i < dd->num_vq; i++) {
-               struct mic_vringh *mvr = &mvdev->mvr[i];
-               struct mic_vring *vr = &mvdev->mvr[i].vring;
-               num = le16_to_cpu(vqconfig[i].num);
-               mutex_init(&mvr->vr_mutex);
-               vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
-                       sizeof(struct _mic_vring_info));
-               vr->va = (void *)
-                       __get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                        get_order(vr_size));
-               if (!vr->va) {
-                       ret = -ENOMEM;
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       goto err;
-               }
-               vr->len = vr_size;
-               vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
-               vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
-               vr_addr = mic_map_single(mdev, vr->va, vr_size);
-               if (mic_map_error(vr_addr)) {
-                       free_pages((unsigned long)vr->va, get_order(vr_size));
-                       ret = -ENOMEM;
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       goto err;
-               }
-               vqconfig[i].address = cpu_to_le64(vr_addr);
-
-               vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
-               ret = vringh_init_kern(&mvr->vrh,
-                       *(u32 *)mic_vq_features(mvdev->dd), num, false,
-                       vr->vr.desc, vr->vr.avail, vr->vr.used);
-               if (ret) {
-                       dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                               __func__, __LINE__, ret);
-                       goto err;
-               }
-               vringh_kiov_init(&mvr->riov, NULL, 0);
-               vringh_kiov_init(&mvr->wiov, NULL, 0);
-               mvr->head = USHRT_MAX;
-               mvr->mvdev = mvdev;
-               mvr->vrh.notify = mic_notify;
-               dev_dbg(&mdev->pdev->dev,
-                       "%s %d index %d va %p info %p vr_size 0x%x\n",
-                       __func__, __LINE__, i, vr->va, vr->info, vr_size);
-               mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
-                                       get_order(MIC_INT_DMA_BUF_SIZE));
-               mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
-                                         MIC_INT_DMA_BUF_SIZE);
-       }
-
-       snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
-                mvdev->virtio_id);
-       mvdev->virtio_db = mic_next_db(mdev);
-       mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
-                                              mic_virtio_intr_handler,
-                                              NULL, irqname, mvdev,
-                                              mvdev->virtio_db, MIC_INTR_DB);
-       if (IS_ERR(mvdev->virtio_cookie)) {
-               ret = PTR_ERR(mvdev->virtio_cookie);
-               dev_dbg(&mdev->pdev->dev, "request irq failed\n");
-               goto err;
-       }
-
-       mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
-
-       list_add_tail(&mvdev->list, &mdev->vdev_list);
-       /*
-        * Order the type update with previous stores. This write barrier
-        * is paired with the corresponding read barrier before the uncached
-        * system memory read of the type, on the card while scanning the
-        * device page.
-        */
-       smp_wmb();
-       dd->type = type;
-
-       dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type);
-
-       db = bootparam->h2c_config_db;
-       if (db != -1)
-               mdev->ops->send_intr(mdev, db);
-       mutex_unlock(&mdev->mic_mutex);
-       return 0;
-err:
-       vqconfig = mic_vq_config(dd);
-       for (j = 0; j < i; j++) {
-               struct mic_vringh *mvr = &mvdev->mvr[j];
-               mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
-                                mvr->vring.len);
-               free_pages((unsigned long)mvr->vring.va,
-                          get_order(mvr->vring.len));
-       }
-       mutex_unlock(&mdev->mic_mutex);
-       return ret;
-}
-
-void mic_virtio_del_device(struct mic_vdev *mvdev)
-{
-       struct list_head *pos, *tmp;
-       struct mic_vdev *tmp_mvdev;
-       struct mic_device *mdev = mvdev->mdev;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
-       int i, ret, retry;
-       struct mic_vqconfig *vqconfig;
-       struct mic_bootparam *bootparam = mdev->dp;
-       s8 db;
-
-       mutex_lock(&mdev->mic_mutex);
-       db = bootparam->h2c_config_db;
-       if (db == -1)
-               goto skip_hot_remove;
-       dev_dbg(&mdev->pdev->dev,
-               "Requesting hot remove id %d\n", mvdev->virtio_id);
-       mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
-       mdev->ops->send_intr(mdev, db);
-       for (retry = 100; retry--;) {
-               ret = wait_event_timeout(wake,
-                       mvdev->dc->guest_ack, msecs_to_jiffies(100));
-               if (ret)
-                       break;
-       }
-       dev_dbg(&mdev->pdev->dev,
-               "Device id %d config_change %d guest_ack %d retry %d\n",
-               mvdev->virtio_id, mvdev->dc->config_change,
-               mvdev->dc->guest_ack, retry);
-       mvdev->dc->config_change = 0;
-       mvdev->dc->guest_ack = 0;
-skip_hot_remove:
-       mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
-       flush_work(&mvdev->virtio_bh_work);
-       vqconfig = mic_vq_config(mvdev->dd);
-       for (i = 0; i < mvdev->dd->num_vq; i++) {
-               struct mic_vringh *mvr = &mvdev->mvr[i];
-
-               mic_unmap_single(mvdev->mdev, mvr->buf_da,
-                                MIC_INT_DMA_BUF_SIZE);
-               free_pages((unsigned long)mvr->buf,
-                          get_order(MIC_INT_DMA_BUF_SIZE));
-               vringh_kiov_cleanup(&mvr->riov);
-               vringh_kiov_cleanup(&mvr->wiov);
-               mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
-                                mvr->vring.len);
-               free_pages((unsigned long)mvr->vring.va,
-                          get_order(mvr->vring.len));
-       }
-
-       list_for_each_safe(pos, tmp, &mdev->vdev_list) {
-               tmp_mvdev = list_entry(pos, struct mic_vdev, list);
-               if (tmp_mvdev == mvdev) {
-                       list_del(pos);
-                       dev_dbg(&mdev->pdev->dev,
-                               "Removing virtio device id %d\n",
-                               mvdev->virtio_id);
-                       break;
-               }
-       }
-       /*
-        * Order the type update with previous stores. This write barrier
-        * is paired with the corresponding read barrier before the uncached
-        * system memory read of the type, on the card while scanning the
-        * device page.
-        */
-       smp_wmb();
-       mvdev->dd->type = -1;
-       mutex_unlock(&mdev->mic_mutex);
-}
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h
deleted file mode 100644 (file)
index a80631f..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef MIC_VIRTIO_H
-#define MIC_VIRTIO_H
-
-#include <linux/virtio_config.h>
-#include <linux/mic_ioctl.h>
-
-/*
- * Note on endianness.
- * 1. Host can be both BE or LE
- * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
- *    rings and ioreadXX/iowriteXX to access used ring.
- * 3. Device page exposed by host to guest contains LE values. Guest
- *    accesses these using ioreadXX/iowriteXX etc. This way in general we
- *    obey the virtio spec according to which guest works with native
- *    endianness and host is aware of guest endianness and does all
- *    required endianness conversion.
- * 4. Data provided from user space to guest (in ADD_DEVICE and
- *    CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
- *    in guest endianness.
- */
-
-/**
- * struct mic_vringh - Virtio ring host information.
- *
- * @vring: The MIC vring used for setting up user space mappings.
- * @vrh: The host VRINGH used for accessing the card vrings.
- * @riov: The VRINGH read kernel IOV.
- * @wiov: The VRINGH write kernel IOV.
- * @vr_mutex: Mutex for synchronizing access to the VRING.
- * @buf: Temporary kernel buffer used to copy in/out data
- * from/to the card via DMA.
- * @buf_da: dma address of buf.
- * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
- */
-struct mic_vringh {
-       struct mic_vring vring;
-       struct vringh vrh;
-       struct vringh_kiov riov;
-       struct vringh_kiov wiov;
-       struct mutex vr_mutex;
-       void *buf;
-       dma_addr_t buf_da;
-       struct mic_vdev *mvdev;
-       u16 head;
-};
-
-/**
- * struct mic_vdev - Host information for a card Virtio device.
- *
- * @virtio_id - Virtio device id.
- * @waitq - Waitqueue to allow ring3 apps to poll.
- * @mdev - Back pointer to host MIC device.
- * @poll_wake - Used for waking up threads blocked in poll.
- * @out_bytes - Debug stats for number of bytes copied from host to card.
- * @in_bytes - Debug stats for number of bytes copied from card to host.
- * @out_bytes_dma - Debug stats for number of bytes copied from host to card
- * using DMA.
- * @in_bytes_dma - Debug stats for number of bytes copied from card to host
- * using DMA.
- * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
- * the transfer length did not have the required DMA alignment.
- * @tx_dst_unaligned - Debug stats for number of bytes copied where the
- * destination address on the card did not have the required DMA alignment.
- * @mvr - Store per VRING data structures.
- * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
- * @dd - Virtio device descriptor.
- * @dc - Virtio device control fields.
- * @list - List of Virtio devices.
- * @virtio_db - The doorbell used by the card to interrupt the host.
- * @virtio_cookie - The cookie returned while requesting interrupts.
- */
-struct mic_vdev {
-       int virtio_id;
-       wait_queue_head_t waitq;
-       struct mic_device *mdev;
-       int poll_wake;
-       unsigned long out_bytes;
-       unsigned long in_bytes;
-       unsigned long out_bytes_dma;
-       unsigned long in_bytes_dma;
-       unsigned long tx_len_unaligned;
-       unsigned long tx_dst_unaligned;
-       struct mic_vringh mvr[MIC_MAX_VRINGS];
-       struct work_struct virtio_bh_work;
-       struct mic_device_desc *dd;
-       struct mic_device_ctrl *dc;
-       struct list_head list;
-       int virtio_db;
-       struct mic_irq *virtio_cookie;
-};
-
-void mic_virtio_uninit(struct mic_device *mdev);
-int mic_virtio_add_device(struct mic_vdev *mvdev,
-                       void __user *argp);
-void mic_virtio_del_device(struct mic_vdev *mvdev);
-int mic_virtio_config_change(struct mic_vdev *mvdev,
-                       void __user *argp);
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
-       struct mic_copy_desc *request);
-void mic_virtio_reset_devices(struct mic_device *mdev);
-void mic_bh_handler(struct work_struct *work);
-
-/* Helper API to obtain the MIC PCIe device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
-       return &mvdev->mdev->pdev->dev;
-}
-
-/* Helper API to check if a virtio device is initialized */
-static inline int mic_vdev_inited(struct mic_vdev *mvdev)
-{
-       /* Device has not been created yet */
-       if (!mvdev->dd || !mvdev->dd->type) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -EINVAL);
-               return -EINVAL;
-       }
-
-       /* Device has been removed/deleted */
-       if (mvdev->dd->type == -1) {
-               dev_err(mic_dev(mvdev), "%s %d err %d\n",
-                       __func__, __LINE__, -ENODEV);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/* Helper API to check if a virtio device is running */
-static inline bool mic_vdevup(struct mic_vdev *mvdev)
-{
-       return !!mvdev->dd->status;
-}
-#endif
index 8118ac48c76450bcacfe10ef32e3e48a8e79bdc0..82a973c85b5da729f3b3e1721014d014fd480e88 100644 (file)
@@ -450,26 +450,29 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
 
        rc = mic_x100_get_boot_addr(mdev);
        if (rc)
-               goto error;
+               return rc;
        /* load OS */
        rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
        if (rc < 0) {
                dev_err(&mdev->pdev->dev,
                        "ramdisk request_firmware failed: %d %s\n",
                        rc, mdev->cosm_dev->firmware);
-               goto error;
+               return rc;
        }
        if (mdev->bootaddr > mdev->aper.len - fw->size) {
                rc = -EINVAL;
                dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
                        __func__, __LINE__, rc, mdev->bootaddr);
-               release_firmware(fw);
                goto error;
        }
        memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
        mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
-       if (!strcmp(mdev->cosm_dev->bootmode, "flash"))
-               goto done;
+       if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
+               rc = -EINVAL;
+               dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
+                       __func__, __LINE__, rc);
+               goto error;
+       }
        /* load command line */
        rc = mic_x100_load_command_line(mdev, fw);
        if (rc) {
@@ -481,9 +484,11 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
        /* load ramdisk */
        if (mdev->cosm_dev->ramdisk)
                rc = mic_x100_load_ramdisk(mdev);
+
+       return rc;
+
 error:
-       dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc);
-done:
+       release_firmware(fw);
        return rc;
 }
 
index 95a13c629a8e976477235081b199a77a79391fb1..cd01a0efda6b9a4f8710ab3ad94a24f060a9fba1 100644 (file)
@@ -74,11 +74,6 @@ struct scif_copy_work {
        bool ordered;
 };
 
-#ifndef list_entry_next
-#define list_entry_next(pos, member) \
-       list_entry(pos->member.next, typeof(*pos), member)
-#endif
-
 /**
  * scif_reserve_dma_chan:
  * @ep: Endpoint Descriptor.
@@ -276,13 +271,10 @@ static struct scif_mmu_notif *
 scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
 {
        struct scif_mmu_notif *mmn;
-       struct list_head *item;
 
-       list_for_each(item, &rma->mmn_list) {
-               mmn = list_entry(item, struct scif_mmu_notif, list);
+       list_for_each_entry(mmn, &rma->mmn_list, list)
                if (mmn->mm == mm)
                        return mmn;
-       }
        return NULL;
 }
 
@@ -293,13 +285,12 @@ scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
                 = kzalloc(sizeof(*mmn), GFP_KERNEL);
 
        if (!mmn)
-               return ERR_PTR(ENOMEM);
+               return ERR_PTR(-ENOMEM);
 
        scif_init_mmu_notifier(mmn, current->mm, ep);
-       if (mmu_notifier_register(&mmn->ep_mmu_notifier,
-                                 current->mm)) {
+       if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
                kfree(mmn);
-               return ERR_PTR(EBUSY);
+               return ERR_PTR(-EBUSY);
        }
        list_add(&mmn->list, &ep->rma_info.mmn_list);
        return mmn;
@@ -851,7 +842,7 @@ static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
                (window->nr_pages << PAGE_SHIFT);
        while (rem_len) {
                if (offset == end_offset) {
-                       window = list_entry_next(window, list);
+                       window = list_next_entry(window, list);
                        end_offset = window->offset +
                                (window->nr_pages << PAGE_SHIFT);
                }
@@ -957,7 +948,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
        remaining_len -= tail_len;
        while (remaining_len) {
                if (offset == end_offset) {
-                       window = list_entry_next(window, list);
+                       window = list_next_entry(window, list);
                        end_offset = window->offset +
                                (window->nr_pages << PAGE_SHIFT);
                }
@@ -1064,7 +1055,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
        }
        if (tail_len) {
                if (offset == end_offset) {
-                       window = list_entry_next(window, list);
+                       window = list_next_entry(window, list);
                        end_offset = window->offset +
                                (window->nr_pages << PAGE_SHIFT);
                }
@@ -1147,13 +1138,13 @@ static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
                (dst_window->nr_pages << PAGE_SHIFT);
        while (remaining_len) {
                if (src_offset == end_src_offset) {
-                       src_window = list_entry_next(src_window, list);
+                       src_window = list_next_entry(src_window, list);
                        end_src_offset = src_window->offset +
                                (src_window->nr_pages << PAGE_SHIFT);
                        scif_init_window_iter(src_window, &src_win_iter);
                }
                if (dst_offset == end_dst_offset) {
-                       dst_window = list_entry_next(dst_window, list);
+                       dst_window = list_next_entry(dst_window, list);
                        end_dst_offset = dst_window->offset +
                                (dst_window->nr_pages << PAGE_SHIFT);
                        scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1314,13 +1305,13 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
        remaining_len -= tail_len;
        while (remaining_len) {
                if (src_offset == end_src_offset) {
-                       src_window = list_entry_next(src_window, list);
+                       src_window = list_next_entry(src_window, list);
                        end_src_offset = src_window->offset +
                                (src_window->nr_pages << PAGE_SHIFT);
                        scif_init_window_iter(src_window, &src_win_iter);
                }
                if (dst_offset == end_dst_offset) {
-                       dst_window = list_entry_next(dst_window, list);
+                       dst_window = list_next_entry(dst_window, list);
                        end_dst_offset = dst_window->offset +
                                (dst_window->nr_pages << PAGE_SHIFT);
                        scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1405,9 +1396,9 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
        if (remaining_len) {
                loop_len = remaining_len;
                if (src_offset == end_src_offset)
-                       src_window = list_entry_next(src_window, list);
+                       src_window = list_next_entry(src_window, list);
                if (dst_offset == end_dst_offset)
-                       dst_window = list_entry_next(dst_window, list);
+                       dst_window = list_next_entry(dst_window, list);
 
                src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
                dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
@@ -1550,12 +1541,12 @@ static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
                        end_dst_offset = dst_window->offset +
                                (dst_window->nr_pages << PAGE_SHIFT);
                        if (src_offset == end_src_offset) {
-                               src_window = list_entry_next(src_window, list);
+                               src_window = list_next_entry(src_window, list);
                                scif_init_window_iter(src_window,
                                                      &src_win_iter);
                        }
                        if (dst_offset == end_dst_offset) {
-                               dst_window = list_entry_next(dst_window, list);
+                               dst_window = list_next_entry(dst_window, list);
                                scif_init_window_iter(dst_window,
                                                      &dst_win_iter);
                        }
@@ -1730,7 +1721,7 @@ static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
                mutex_lock(&ep->rma_info.mmn_lock);
                mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
                if (!mmn)
-                       scif_add_mmu_notifier(current->mm, ep);
+                       mmn = scif_add_mmu_notifier(current->mm, ep);
                mutex_unlock(&ep->rma_info.mmn_lock);
                if (IS_ERR(mmn)) {
                        scif_put_peer_dev(spdev);
index 8310b4dbff0602ea5f4bd50f3ce87a9e58e854a7..6a451bd65bf3f8914586e3916f39c847730f15a8 100644 (file)
@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
        if ((map_flags & SCIF_MAP_FIXED) &&
            ((ALIGN(offset, PAGE_SIZE) != offset) ||
            (offset < 0) ||
-           (offset + (off_t)len < offset)))
+           (len > LONG_MAX - offset)))
                return -EINVAL;
 
        might_sleep();
@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
        if ((map_flags & SCIF_MAP_FIXED) &&
            ((ALIGN(offset, PAGE_SIZE) != offset) ||
            (offset < 0) ||
-           (offset + (off_t)len < offset)))
+           (len > LONG_MAX - offset)))
                return -EINVAL;
 
        /* Unsupported protection requested */
@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
 
        /* Offset is not page aligned or offset+len wraps around */
        if ((ALIGN(offset, PAGE_SIZE) != offset) ||
-           (offset + (off_t)len < offset))
+           (offset < 0) ||
+           (len > LONG_MAX - offset))
                return -EINVAL;
 
        err = scif_verify_epd(ep);
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile
new file mode 100644 (file)
index 0000000..78819c8
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2016, Intel Corporation.
+#
+obj-m := vop.o
+
+vop-objs += vop_main.o
+vop-objs += vop_debugfs.o
+vop-objs += vop_vringh.o
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
new file mode 100644 (file)
index 0000000..ab43884
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "vop_main.h"
+
+static int vop_dp_show(struct seq_file *s, void *pos)
+{
+       struct mic_device_desc *d;
+       struct mic_device_ctrl *dc;
+       struct mic_vqconfig *vqconfig;
+       __u32 *features;
+       __u8 *config;
+       struct vop_info *vi = s->private;
+       struct vop_device *vpdev = vi->vpdev;
+       struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+       int j, k;
+
+       seq_printf(s, "Bootparam: magic 0x%x\n",
+                  bootparam->magic);
+       seq_printf(s, "Bootparam: h2c_config_db %d\n",
+                  bootparam->h2c_config_db);
+       seq_printf(s, "Bootparam: node_id %d\n",
+                  bootparam->node_id);
+       seq_printf(s, "Bootparam: c2h_scif_db %d\n",
+                  bootparam->c2h_scif_db);
+       seq_printf(s, "Bootparam: h2c_scif_db %d\n",
+                  bootparam->h2c_scif_db);
+       seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
+                  bootparam->scif_host_dma_addr);
+       seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
+                  bootparam->scif_card_dma_addr);
+
+       for (j = sizeof(*bootparam);
+               j < MIC_DP_SIZE; j += mic_total_desc_size(d)) {
+               d = (void *)bootparam + j;
+               dc = (void *)d + mic_aligned_desc_size(d);
+
+               /* end of list */
+               if (d->type == 0)
+                       break;
+
+               if (d->type == -1)
+                       continue;
+
+               seq_printf(s, "Type %d ", d->type);
+               seq_printf(s, "Num VQ %d ", d->num_vq);
+               seq_printf(s, "Feature Len %d\n", d->feature_len);
+               seq_printf(s, "Config Len %d ", d->config_len);
+               seq_printf(s, "Shutdown Status %d\n", d->status);
+
+               for (k = 0; k < d->num_vq; k++) {
+                       vqconfig = mic_vq_config(d) + k;
+                       seq_printf(s, "vqconfig[%d]: ", k);
+                       seq_printf(s, "address 0x%llx ",
+                                  vqconfig->address);
+                       seq_printf(s, "num %d ", vqconfig->num);
+                       seq_printf(s, "used address 0x%llx\n",
+                                  vqconfig->used_address);
+               }
+
+               features = (__u32 *)mic_vq_features(d);
+               seq_printf(s, "Features: Host 0x%x ", features[0]);
+               seq_printf(s, "Guest 0x%x\n", features[1]);
+
+               config = mic_vq_configspace(d);
+               for (k = 0; k < d->config_len; k++)
+                       seq_printf(s, "config[%d]=%d\n", k, config[k]);
+
+               seq_puts(s, "Device control:\n");
+               seq_printf(s, "Config Change %d ", dc->config_change);
+               seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
+               seq_printf(s, "Guest Ack %d ", dc->guest_ack);
+               seq_printf(s, "Host ack %d\n", dc->host_ack);
+               seq_printf(s, "Used address updated %d ",
+                          dc->used_address_updated);
+               seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
+               seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
+               seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
+       }
+       schedule_work(&vi->hotplug_work);
+       return 0;
+}
+
+static int vop_dp_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, vop_dp_show, inode->i_private);
+}
+
+static int vop_dp_debug_release(struct inode *inode, struct file *file)
+{
+       return single_release(inode, file);
+}
+
+static const struct file_operations dp_ops = {
+       .owner   = THIS_MODULE,
+       .open    = vop_dp_debug_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = vop_dp_debug_release
+};
+
+static int vop_vdev_info_show(struct seq_file *s, void *unused)
+{
+       struct vop_info *vi = s->private;
+       struct list_head *pos, *tmp;
+       struct vop_vdev *vdev;
+       int i, j;
+
+       mutex_lock(&vi->vop_mutex);
+       list_for_each_safe(pos, tmp, &vi->vdev_list) {
+               vdev = list_entry(pos, struct vop_vdev, list);
+               seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n",
+                          vdev->virtio_id,
+                          vop_vdevup(vdev) ? "UP" : "DOWN",
+                          vdev->in_bytes,
+                          vdev->out_bytes,
+                          vdev->in_bytes_dma,
+                          vdev->out_bytes_dma);
+               for (i = 0; i < MIC_MAX_VRINGS; i++) {
+                       struct vring_desc *desc;
+                       struct vring_avail *avail;
+                       struct vring_used *used;
+                       struct vop_vringh *vvr = &vdev->vvr[i];
+                       struct vringh *vrh = &vvr->vrh;
+                       int num = vrh->vring.num;
+
+                       if (!num)
+                               continue;
+                       desc = vrh->vring.desc;
+                       seq_printf(s, "vring i %d avail_idx %d",
+                                  i, vvr->vring.info->avail_idx & (num - 1));
+                       seq_printf(s, " vring i %d avail_idx %d\n",
+                                  i, vvr->vring.info->avail_idx);
+                       seq_printf(s, "vrh i %d weak_barriers %d",
+                                  i, vrh->weak_barriers);
+                       seq_printf(s, " last_avail_idx %d last_used_idx %d",
+                                  vrh->last_avail_idx, vrh->last_used_idx);
+                       seq_printf(s, " completed %d\n", vrh->completed);
+                       for (j = 0; j < num; j++) {
+                               seq_printf(s, "desc[%d] addr 0x%llx len %d",
+                                          j, desc->addr, desc->len);
+                               seq_printf(s, " flags 0x%x next %d\n",
+                                          desc->flags, desc->next);
+                               desc++;
+                       }
+                       avail = vrh->vring.avail;
+                       seq_printf(s, "avail flags 0x%x idx %d\n",
+                                  vringh16_to_cpu(vrh, avail->flags),
+                                  vringh16_to_cpu(vrh,
+                                                  avail->idx) & (num - 1));
+                       seq_printf(s, "avail flags 0x%x idx %d\n",
+                                  vringh16_to_cpu(vrh, avail->flags),
+                                  vringh16_to_cpu(vrh, avail->idx));
+                       for (j = 0; j < num; j++)
+                               seq_printf(s, "avail ring[%d] %d\n",
+                                          j, avail->ring[j]);
+                       used = vrh->vring.used;
+                       seq_printf(s, "used flags 0x%x idx %d\n",
+                                  vringh16_to_cpu(vrh, used->flags),
+                                  vringh16_to_cpu(vrh, used->idx) & (num - 1));
+                       seq_printf(s, "used flags 0x%x idx %d\n",
+                                  vringh16_to_cpu(vrh, used->flags),
+                                  vringh16_to_cpu(vrh, used->idx));
+                       for (j = 0; j < num; j++)
+                               seq_printf(s, "used ring[%d] id %d len %d\n",
+                                          j, vringh32_to_cpu(vrh,
+                                                             used->ring[j].id),
+                                          vringh32_to_cpu(vrh,
+                                                          used->ring[j].len));
+               }
+       }
+       mutex_unlock(&vi->vop_mutex);
+
+       return 0;
+}
+
+static int vop_vdev_info_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, vop_vdev_info_show, inode->i_private);
+}
+
+static int vop_vdev_info_debug_release(struct inode *inode, struct file *file)
+{
+       return single_release(inode, file);
+}
+
+static const struct file_operations vdev_info_ops = {
+       .owner   = THIS_MODULE,
+       .open    = vop_vdev_info_debug_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = vop_vdev_info_debug_release
+};
+
+void vop_init_debugfs(struct vop_info *vi)
+{
+       char name[16];
+
+       snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode);
+       vi->dbg = debugfs_create_dir(name, NULL);
+       if (!vi->dbg) {
+               pr_err("can't create debugfs dir vop\n");
+               return;
+       }
+       debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops);
+       debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops);
+}
+
+void vop_exit_debugfs(struct vop_info *vi)
+{
+       debugfs_remove_recursive(vi->dbg);
+}
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
new file mode 100644 (file)
index 0000000..1a2b67f
--- /dev/null
@@ -0,0 +1,755 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Adapted from:
+ *
+ * virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_main.h"
+
+#define VOP_MAX_VRINGS 4
+
+/*
+ * _vop_vdev - Allocated per virtio device instance injected by the peer.
+ *
+ * @vdev: Virtio device
+ * @desc: Virtio device page descriptor
+ * @dc: Virtio device control
+ * @vpdev: VOP device which is the parent for this virtio device
+ * @vr: Buffer for accessing the VRING
+ * @used: Buffer for used
+ * @used_size: Size of the used buffer
+ * @reset_done: Track whether VOP reset is complete
+ * @virtio_cookie: Cookie returned upon requesting a interrupt
+ * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
+ * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
+ * @dnode: The destination node
+ */
+struct _vop_vdev {
+       struct virtio_device vdev;
+       struct mic_device_desc __iomem *desc;
+       struct mic_device_ctrl __iomem *dc;
+       struct vop_device *vpdev;
+       void __iomem *vr[VOP_MAX_VRINGS];
+       dma_addr_t used[VOP_MAX_VRINGS];
+       int used_size[VOP_MAX_VRINGS];
+       struct completion reset_done;
+       struct mic_irq *virtio_cookie;
+       int c2h_vdev_db;
+       int h2c_vdev_db;
+       int dnode;
+};
+
+#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
+
+#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
+
+/* Helper API to obtain the parent of the virtio device */
+static inline struct device *_vop_dev(struct _vop_vdev *vdev)
+{
+       return vdev->vdev.dev.parent;
+}
+
+static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
+{
+       return sizeof(*desc)
+               + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
+               + ioread8(&desc->feature_len) * 2
+               + ioread8(&desc->config_len);
+}
+
+static inline struct mic_vqconfig __iomem *
+_vop_vq_config(struct mic_device_desc __iomem *desc)
+{
+       return (struct mic_vqconfig __iomem *)(desc + 1);
+}
+
+static inline u8 __iomem *
+_vop_vq_features(struct mic_device_desc __iomem *desc)
+{
+       return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
+}
+
+static inline u8 __iomem *
+_vop_vq_configspace(struct mic_device_desc __iomem *desc)
+{
+       return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
+}
+
+static inline unsigned
+_vop_total_desc_size(struct mic_device_desc __iomem *desc)
+{
+       return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
+}
+
+/* This gets the device's feature bits. */
+static u64 vop_get_features(struct virtio_device *vdev)
+{
+       unsigned int i, bits;
+       u32 features = 0;
+       struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+       u8 __iomem *in_features = _vop_vq_features(desc);
+       int feature_len = ioread8(&desc->feature_len);
+
+       bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
+       for (i = 0; i < bits; i++)
+               if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
+                       features |= BIT(i);
+
+       return features;
+}
+
+static int vop_finalize_features(struct virtio_device *vdev)
+{
+       unsigned int i, bits;
+       struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+       u8 feature_len = ioread8(&desc->feature_len);
+       /* Second half of bitmap is features we accept. */
+       u8 __iomem *out_features =
+               _vop_vq_features(desc) + feature_len;
+
+       /* Give virtio_ring a chance to accept features. */
+       vring_transport_features(vdev);
+
+       memset_io(out_features, 0, feature_len);
+       bits = min_t(unsigned, feature_len,
+                    sizeof(vdev->features)) * 8;
+       for (i = 0; i < bits; i++) {
+               if (__virtio_test_bit(vdev, i))
+                       iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
+                                &out_features[i / 8]);
+       }
+       return 0;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void vop_get(struct virtio_device *vdev, unsigned int offset,
+                   void *buf, unsigned len)
+{
+       struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+       if (offset + len > ioread8(&desc->config_len))
+               return;
+       memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
+}
+
+static void vop_set(struct virtio_device *vdev, unsigned int offset,
+                   const void *buf, unsigned len)
+{
+       struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+       if (offset + len > ioread8(&desc->config_len))
+               return;
+       memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access the status
+ * field of the device descriptor. set_status also interrupts the host
+ * to tell about status changes.
+ */
+static u8 vop_get_status(struct virtio_device *vdev)
+{
+       return ioread8(&to_vopvdev(vdev)->desc->status);
+}
+
+static void vop_set_status(struct virtio_device *dev, u8 status)
+{
+       struct _vop_vdev *vdev = to_vopvdev(dev);
+       struct vop_device *vpdev = vdev->vpdev;
+
+       if (!status)
+               return;
+       iowrite8(status, &vdev->desc->status);
+       vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+}
+
+/* Inform host on a virtio device reset and wait for ack from host */
+static void vop_reset_inform_host(struct virtio_device *dev)
+{
+       struct _vop_vdev *vdev = to_vopvdev(dev);
+       struct mic_device_ctrl __iomem *dc = vdev->dc;
+       struct vop_device *vpdev = vdev->vpdev;
+       int retry;
+
+       iowrite8(0, &dc->host_ack);
+       iowrite8(1, &dc->vdev_reset);
+       vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+
+       /* Wait till host completes all card accesses and acks the reset */
+       for (retry = 100; retry--;) {
+               if (ioread8(&dc->host_ack))
+                       break;
+               msleep(100);
+       };
+
+       dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+
+       /* Reset status to 0 in case we timed out */
+       iowrite8(0, &vdev->desc->status);
+}
+
+static void vop_reset(struct virtio_device *dev)
+{
+       struct _vop_vdev *vdev = to_vopvdev(dev);
+
+       dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
+               __func__, dev->id.device);
+
+       vop_reset_inform_host(dev);
+       complete_all(&vdev->reset_done);
+}
+
+/*
+ * The virtio_ring code calls this API when it wants to notify the Host.
+ */
+static bool vop_notify(struct virtqueue *vq)
+{
+       struct _vop_vdev *vdev = vq->priv;
+       struct vop_device *vpdev = vdev->vpdev;
+
+       vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+       return true;
+}
+
+static void vop_del_vq(struct virtqueue *vq, int n)
+{
+       struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
+       struct vring *vr = (struct vring *)(vq + 1);
+       struct vop_device *vpdev = vdev->vpdev;
+
+       dma_unmap_single(&vpdev->dev, vdev->used[n],
+                        vdev->used_size[n], DMA_BIDIRECTIONAL);
+       free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
+       vring_del_virtqueue(vq);
+       vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
+       vdev->vr[n] = NULL;
+}
+
+static void vop_del_vqs(struct virtio_device *dev)
+{
+       struct _vop_vdev *vdev = to_vopvdev(dev);
+       struct virtqueue *vq, *n;
+       int idx = 0;
+
+       dev_dbg(_vop_dev(vdev), "%s\n", __func__);
+
+       list_for_each_entry_safe(vq, n, &dev->vqs, list)
+               vop_del_vq(vq, idx++);
+}
+
+/*
+ * This routine will assign vring's allocated in host/io memory. Code in
+ * virtio_ring.c however continues to access this io memory as if it were local
+ * memory without io accessors.
+ */
+static struct virtqueue *vop_find_vq(struct virtio_device *dev,
+                                    unsigned index,
+                                    void (*callback)(struct virtqueue *vq),
+                                    const char *name)
+{
+       struct _vop_vdev *vdev = to_vopvdev(dev);
+       struct vop_device *vpdev = vdev->vpdev;
+       struct mic_vqconfig __iomem *vqconfig;
+       struct mic_vqconfig config;
+       struct virtqueue *vq;
+       void __iomem *va;
+       struct _mic_vring_info __iomem *info;
+       void *used;
+       int vr_size, _vr_size, err, magic;
+       struct vring *vr;
+       u8 type = ioread8(&vdev->desc->type);
+
+       if (index >= ioread8(&vdev->desc->num_vq))
+               return ERR_PTR(-ENOENT);
+
+       if (!name)
+               return ERR_PTR(-ENOENT);
+
+       /* First assign the vring's allocated in host memory */
+       vqconfig = _vop_vq_config(vdev->desc) + index;
+       memcpy_fromio(&config, vqconfig, sizeof(config));
+       _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
+       vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+       va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
+                       vr_size);
+       if (!va)
+               return ERR_PTR(-ENOMEM);
+       vdev->vr[index] = va;
+       memset_io(va, 0x0, _vr_size);
+       vq = vring_new_virtqueue(
+                               index,
+                               le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
+                               dev,
+                               false,
+                               (void __force *)va, vop_notify, callback, name);
+       if (!vq) {
+               err = -ENOMEM;
+               goto unmap;
+       }
+       info = va + _vr_size;
+       magic = ioread32(&info->magic);
+
+       if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
+               err = -EIO;
+               goto unmap;
+       }
+
+       /* Allocate and reassign used ring now */
+       vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
+                                            sizeof(struct vring_used_elem) *
+                                            le16_to_cpu(config.num));
+       used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                       get_order(vdev->used_size[index]));
+       if (!used) {
+               err = -ENOMEM;
+               dev_err(_vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, err);
+               goto del_vq;
+       }
+       vdev->used[index] = dma_map_single(&vpdev->dev, used,
+                                           vdev->used_size[index],
+                                           DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
+               err = -ENOMEM;
+               dev_err(_vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, err);
+               goto free_used;
+       }
+       writeq(vdev->used[index], &vqconfig->used_address);
+       /*
+        * To reassign the used ring here we are directly accessing
+        * struct vring_virtqueue which is a private data structure
+        * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
+        * vring_new_virtqueue() would ensure that
+        *  (&vq->vring == (struct vring *) (&vq->vq + 1));
+        */
+       vr = (struct vring *)(vq + 1);
+       vr->used = used;
+
+       vq->priv = vdev;
+       return vq;
+free_used:
+       free_pages((unsigned long)used,
+                  get_order(vdev->used_size[index]));
+del_vq:
+       vring_del_virtqueue(vq);
+unmap:
+       vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
+       return ERR_PTR(err);
+}
+
+static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
+                       struct virtqueue *vqs[],
+                       vq_callback_t *callbacks[],
+                       const char * const names[])
+{
+       struct _vop_vdev *vdev = to_vopvdev(dev);
+       struct vop_device *vpdev = vdev->vpdev;
+       struct mic_device_ctrl __iomem *dc = vdev->dc;
+       int i, err, retry;
+
+       /* We must have this many virtqueues. */
+       if (nvqs > ioread8(&vdev->desc->num_vq))
+               return -ENOENT;
+
+       for (i = 0; i < nvqs; ++i) {
+               dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
+                       __func__, i, names[i]);
+               vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]);
+               if (IS_ERR(vqs[i])) {
+                       err = PTR_ERR(vqs[i]);
+                       goto error;
+               }
+       }
+
+       iowrite8(1, &dc->used_address_updated);
+       /*
+        * Send an interrupt to the host to inform it that used
+        * rings have been re-assigned.
+        */
+       vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+       for (retry = 100; --retry;) {
+               if (!ioread8(&dc->used_address_updated))
+                       break;
+               msleep(100);
+       };
+
+       dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+       if (!retry) {
+               err = -ENODEV;
+               goto error;
+       }
+
+       return 0;
+error:
+       vop_del_vqs(dev);
+       return err;
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops vop_vq_config_ops = {
+       .get_features = vop_get_features,
+       .finalize_features = vop_finalize_features,
+       .get = vop_get,
+       .set = vop_set,
+       .get_status = vop_get_status,
+       .set_status = vop_set_status,
+       .reset = vop_reset,
+       .find_vqs = vop_find_vqs,
+       .del_vqs = vop_del_vqs,
+};
+
+static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
+{
+       struct _vop_vdev *vdev = data;
+       struct vop_device *vpdev = vdev->vpdev;
+       struct virtqueue *vq;
+
+       vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
+       list_for_each_entry(vq, &vdev->vdev.vqs, list)
+               vring_interrupt(0, vq);
+
+       return IRQ_HANDLED;
+}
+
+static void vop_virtio_release_dev(struct device *_d)
+{
+       /*
+        * No need for a release method similar to virtio PCI.
+        * Provide an empty one to avoid getting a warning from core.
+        */
+}
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static int _vop_add_device(struct mic_device_desc __iomem *d,
+                          unsigned int offset, struct vop_device *vpdev,
+                          int dnode)
+{
+       struct _vop_vdev *vdev;
+       int ret;
+       u8 type = ioread8(&d->type);
+
+       vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+       if (!vdev)
+               return -ENOMEM;
+
+       vdev->vpdev = vpdev;
+       vdev->vdev.dev.parent = &vpdev->dev;
+       vdev->vdev.dev.release = vop_virtio_release_dev;
+       vdev->vdev.id.device = type;
+       vdev->vdev.config = &vop_vq_config_ops;
+       vdev->desc = d;
+       vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+       vdev->dnode = dnode;
+       vdev->vdev.priv = (void *)(u64)dnode;
+       init_completion(&vdev->reset_done);
+
+       vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
+       vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+                       vop_virtio_intr_handler, "virtio intr",
+                       vdev, vdev->h2c_vdev_db);
+       if (IS_ERR(vdev->virtio_cookie)) {
+               ret = PTR_ERR(vdev->virtio_cookie);
+               goto kfree;
+       }
+       iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
+       vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
+
+       ret = register_virtio_device(&vdev->vdev);
+       if (ret) {
+               dev_err(_vop_dev(vdev),
+                       "Failed to register vop device %u type %u\n",
+                       offset, type);
+               goto free_irq;
+       }
+       writeq((u64)vdev, &vdev->dc->vdev);
+       dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
+               __func__, offset, type, vdev);
+
+       return 0;
+
+free_irq:
+       vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+kfree:
+       kfree(vdev);
+       return ret;
+}
+
+/*
+ * match for a vop device with a specific desc pointer
+ */
+static int vop_match_desc(struct device *dev, void *data)
+{
+       struct virtio_device *_dev = dev_to_virtio(dev);
+       struct _vop_vdev *vdev = to_vopvdev(_dev);
+
+       return vdev->desc == (void __iomem *)data;
+}
+
+static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
+                                     unsigned int offset,
+                                     struct vop_device *vpdev)
+{
+       struct mic_device_ctrl __iomem *dc
+               = (void __iomem *)d + _vop_aligned_desc_size(d);
+       struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+
+       if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
+               return;
+
+       dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
+       virtio_config_changed(&vdev->vdev);
+       iowrite8(1, &dc->guest_ack);
+}
+
+/*
+ * removes a virtio device if a hot remove event has been
+ * requested by the host.
+ */
+static int _vop_remove_device(struct mic_device_desc __iomem *d,
+                             unsigned int offset, struct vop_device *vpdev)
+{
+       struct mic_device_ctrl __iomem *dc
+               = (void __iomem *)d + _vop_aligned_desc_size(d);
+       struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+       u8 status;
+       int ret = -1;
+
+       if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+               dev_dbg(&vpdev->dev,
+                       "%s %d config_change %d type %d vdev %p\n",
+                       __func__, __LINE__,
+                       ioread8(&dc->config_change), ioread8(&d->type), vdev);
+               status = ioread8(&d->status);
+               reinit_completion(&vdev->reset_done);
+               unregister_virtio_device(&vdev->vdev);
+               vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+               iowrite8(-1, &dc->h2c_vdev_db);
+               if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+                       wait_for_completion(&vdev->reset_done);
+               kfree(vdev);
+               iowrite8(1, &dc->guest_ack);
+               dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
+                       __func__, __LINE__, ioread8(&dc->guest_ack));
+               iowrite8(-1, &d->type);
+               ret = 0;
+       }
+       return ret;
+}
+
+#define REMOVE_DEVICES true
+
+static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
+                             bool remove, int dnode)
+{
+       s8 type;
+       unsigned int i;
+       struct mic_device_desc __iomem *d;
+       struct mic_device_ctrl __iomem *dc;
+       struct device *dev;
+       int ret;
+
+       for (i = sizeof(struct mic_bootparam);
+                       i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
+               d = dp + i;
+               dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+               /*
+                * This read barrier is paired with the corresponding write
+                * barrier on the host which is inserted before adding or
+                * removing a virtio device descriptor, by updating the type.
+                */
+               rmb();
+               type = ioread8(&d->type);
+
+               /* end of list */
+               if (type == 0)
+                       break;
+
+               if (type == -1)
+                       continue;
+
+               /* device already exists */
+               dev = device_find_child(&vpdev->dev, (void __force *)d,
+                                       vop_match_desc);
+               if (dev) {
+                       if (remove)
+                               iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
+                                        &dc->config_change);
+                       put_device(dev);
+                       _vop_handle_config_change(d, i, vpdev);
+                       ret = _vop_remove_device(d, i, vpdev);
+                       if (remove) {
+                               iowrite8(0, &dc->config_change);
+                               iowrite8(0, &dc->guest_ack);
+                       }
+                       continue;
+               }
+
+               /* new device */
+               dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
+                       __func__, __LINE__, d);
+               if (!remove)
+                       _vop_add_device(d, i, vpdev, dnode);
+       }
+}
+
+static void vop_scan_devices(struct vop_info *vi,
+                            struct vop_device *vpdev, bool remove)
+{
+       void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
+
+       if (!dp)
+               return;
+       mutex_lock(&vi->vop_mutex);
+       _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
+       mutex_unlock(&vi->vop_mutex);
+}
+
+/*
+ * vop_hotplug_device tries to find changes in the device page.
+ */
+static void vop_hotplug_devices(struct work_struct *work)
+{
+       struct vop_info *vi = container_of(work, struct vop_info,
+                                            hotplug_work);
+
+       vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
+}
+
+/*
+ * Interrupt handler for hot plug/config changes etc.
+ */
+static irqreturn_t vop_extint_handler(int irq, void *data)
+{
+       struct vop_info *vi = data;
+       struct mic_bootparam __iomem *bp;
+       struct vop_device *vpdev = vi->vpdev;
+
+       bp = vpdev->hw_ops->get_remote_dp(vpdev);
+       dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
+               __func__, __LINE__);
+       vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
+       schedule_work(&vi->hotplug_work);
+       return IRQ_HANDLED;
+}
+
+static int vop_driver_probe(struct vop_device *vpdev)
+{
+       struct vop_info *vi;
+       int rc;
+
+       vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+       if (!vi) {
+               rc = -ENOMEM;
+               goto exit;
+       }
+       dev_set_drvdata(&vpdev->dev, vi);
+       vi->vpdev = vpdev;
+
+       mutex_init(&vi->vop_mutex);
+       INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
+       if (vpdev->dnode) {
+               rc = vop_host_init(vi);
+               if (rc < 0)
+                       goto free;
+       } else {
+               struct mic_bootparam __iomem *bootparam;
+
+               vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
+
+               vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
+               vi->cookie = vpdev->hw_ops->request_irq(vpdev,
+                                                       vop_extint_handler,
+                                                       "virtio_config_intr",
+                                                       vi, vi->h2c_config_db);
+               if (IS_ERR(vi->cookie)) {
+                       rc = PTR_ERR(vi->cookie);
+                       goto free;
+               }
+               bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
+               iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
+       }
+       vop_init_debugfs(vi);
+       return 0;
+free:
+       kfree(vi);
+exit:
+       return rc;
+}
+
+static void vop_driver_remove(struct vop_device *vpdev)
+{
+       struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+
+       if (vpdev->dnode) {
+               vop_host_uninit(vi);
+       } else {
+               struct mic_bootparam __iomem *bootparam =
+                       vpdev->hw_ops->get_remote_dp(vpdev);
+               if (bootparam)
+                       iowrite8(-1, &bootparam->h2c_config_db);
+               vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
+               flush_work(&vi->hotplug_work);
+               vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
+       }
+       vop_exit_debugfs(vi);
+       kfree(vi);
+}
+
+static struct vop_device_id id_table[] = {
+       { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
+       { 0 },
+};
+
+static struct vop_driver vop_driver = {
+       .driver.name =  KBUILD_MODNAME,
+       .driver.owner = THIS_MODULE,
+       .id_table = id_table,
+       .probe = vop_driver_probe,
+       .remove = vop_driver_remove,
+};
+
+module_vop_driver(vop_driver);
+
+MODULE_DEVICE_TABLE(mbus, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/vop/vop_main.h b/drivers/misc/mic/vop/vop_main.h
new file mode 100644 (file)
index 0000000..ba47ec7
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#ifndef _VOP_MAIN_H_
+#define _VOP_MAIN_H_
+
+#include <linux/vringh.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include "../bus/vop_bus.h"
+
+/*
+ * Note on endianness.
+ * 1. Host can be both BE or LE
+ * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
+ *    rings and ioreadXX/iowriteXX to access used ring.
+ * 3. Device page exposed by host to guest contains LE values. Guest
+ *    accesses these using ioreadXX/iowriteXX etc. This way in general we
+ *    obey the virtio spec according to which guest works with native
+ *    endianness and host is aware of guest endianness and does all
+ *    required endianness conversion.
+ * 4. Data provided from user space to guest (in ADD_DEVICE and
+ *    CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
+ *    in guest endianness.
+ */
+
+/*
+ * vop_info - Allocated per invocation of VOP probe
+ *
+ * @vpdev: VOP device
+ * @hotplug_work: Handle virtio device creation, deletion and configuration
+ * @cookie: Cookie received upon requesting a virtio configuration interrupt
+ * @h2c_config_db: The doorbell used by the peer to indicate a config change
+ * @vdev_list: List of "active" virtio devices injected in the peer node
+ * @vop_mutex: Synchronize access to the device page as well as serialize
+ *             creation/deletion of virtio devices on the peer node
+ * @dp: Peer device page information
+ * @dbg: Debugfs entry
+ * @dma_ch: The DMA channel used by this transport for data transfers.
+ * @name: Name for this transport used in misc device creation.
+ * @miscdev: The misc device registered.
+ */
+struct vop_info {
+       struct vop_device *vpdev;
+       struct work_struct hotplug_work;
+       struct mic_irq *cookie;
+       int h2c_config_db;
+       struct list_head vdev_list;
+       struct mutex vop_mutex;
+       void __iomem *dp;
+       struct dentry *dbg;
+       struct dma_chan *dma_ch;
+       char name[16];
+       struct miscdevice miscdev;
+};
+
+/**
+ * struct vop_vringh - Virtio ring host information.
+ *
+ * @vring: The VOP vring used for setting up user space mappings.
+ * @vrh: The host VRINGH used for accessing the card vrings.
+ * @riov: The VRINGH read kernel IOV.
+ * @wiov: The VRINGH write kernel IOV.
+ * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
+ * @vr_mutex: Mutex for synchronizing access to the VRING.
+ * @buf: Temporary kernel buffer used to copy in/out data
+ * from/to the card via DMA.
+ * @buf_da: dma address of buf.
+ * @vdev: Back pointer to VOP virtio device for vringh_notify(..).
+ */
+struct vop_vringh {
+       struct mic_vring vring;
+       struct vringh vrh;
+       struct vringh_kiov riov;
+       struct vringh_kiov wiov;
+       u16 head;
+       struct mutex vr_mutex;
+       void *buf;
+       dma_addr_t buf_da;
+       struct vop_vdev *vdev;
+};
+
+/**
+ * struct vop_vdev - Host information for a card Virtio device.
+ *
+ * @virtio_id - Virtio device id.
+ * @waitq - Waitqueue to allow ring3 apps to poll.
+ * @vpdev - pointer to VOP bus device.
+ * @poll_wake - Used for waking up threads blocked in poll.
+ * @out_bytes - Debug stats for number of bytes copied from host to card.
+ * @in_bytes - Debug stats for number of bytes copied from card to host.
+ * @out_bytes_dma - Debug stats for number of bytes copied from host to card
+ * using DMA.
+ * @in_bytes_dma - Debug stats for number of bytes copied from card to host
+ * using DMA.
+ * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
+ * the transfer length did not have the required DMA alignment.
+ * @tx_dst_unaligned - Debug stats for number of bytes copied where the
+ * destination address on the card did not have the required DMA alignment.
+ * @vvr - Store per VRING data structures.
+ * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
+ * @dd - Virtio device descriptor.
+ * @dc - Virtio device control fields.
+ * @list - List of Virtio devices.
+ * @virtio_db - The doorbell used by the card to interrupt the host.
+ * @virtio_cookie - The cookie returned while requesting interrupts.
+ * @vi: Transport information.
+ * @vdev_mutex: Mutex synchronizing virtio device injection,
+ *              removal and data transfers.
+ * @destroy: Track if a virtio device is being destroyed.
+ * @deleted: The virtio device has been deleted.
+ */
+struct vop_vdev {
+       int virtio_id;
+       wait_queue_head_t waitq;
+       struct vop_device *vpdev;
+       int poll_wake;
+       unsigned long out_bytes;
+       unsigned long in_bytes;
+       unsigned long out_bytes_dma;
+       unsigned long in_bytes_dma;
+       unsigned long tx_len_unaligned;
+       unsigned long tx_dst_unaligned;
+       unsigned long rx_dst_unaligned;
+       struct vop_vringh vvr[MIC_MAX_VRINGS];
+       struct work_struct virtio_bh_work;
+       struct mic_device_desc *dd;
+       struct mic_device_ctrl *dc;
+       struct list_head list;
+       int virtio_db;
+       struct mic_irq *virtio_cookie;
+       struct vop_info *vi;
+       struct mutex vdev_mutex;
+       struct completion destroy;
+       bool deleted;
+};
+
+/* Helper API to check if a virtio device is running */
+static inline bool vop_vdevup(struct vop_vdev *vdev)
+{
+       return !!vdev->dd->status;
+}
+
+void vop_init_debugfs(struct vop_info *vi);
+void vop_exit_debugfs(struct vop_info *vi);
+int vop_host_init(struct vop_info *vi);
+void vop_host_uninit(struct vop_info *vi);
+#endif
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
new file mode 100644 (file)
index 0000000..e94c7fb
--- /dev/null
@@ -0,0 +1,1165 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include <linux/mic_ioctl.h>
+#include "vop_main.h"
+
+/* Helper API to obtain the VOP PCIe device */
+static inline struct device *vop_dev(struct vop_vdev *vdev)
+{
+       return vdev->vpdev->dev.parent;
+}
+
+/* Helper API to check if a virtio device is initialized */
+static inline int vop_vdev_inited(struct vop_vdev *vdev)
+{
+       if (!vdev)
+               return -EINVAL;
+       /* Device has not been created yet */
+       if (!vdev->dd || !vdev->dd->type) {
+               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, -EINVAL);
+               return -EINVAL;
+       }
+       /* Device has been removed/deleted */
+       if (vdev->dd->type == -1) {
+               dev_dbg(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, -ENODEV);
+               return -ENODEV;
+       }
+       return 0;
+}
+
+static void _vop_notify(struct vringh *vrh)
+{
+       struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh);
+       struct vop_vdev *vdev = vvrh->vdev;
+       struct vop_device *vpdev = vdev->vpdev;
+       s8 db = vdev->dc->h2c_vdev_db;
+
+       if (db != -1)
+               vpdev->hw_ops->send_intr(vpdev, db);
+}
+
+static void vop_virtio_init_post(struct vop_vdev *vdev)
+{
+       struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
+       struct vop_device *vpdev = vdev->vpdev;
+       int i, used_size;
+
+       for (i = 0; i < vdev->dd->num_vq; i++) {
+               used_size = PAGE_ALIGN(sizeof(u16) * 3 +
+                               sizeof(struct vring_used_elem) *
+                               le16_to_cpu(vqconfig->num));
+               if (!le64_to_cpu(vqconfig[i].used_address)) {
+                       dev_warn(vop_dev(vdev), "used_address zero??\n");
+                       continue;
+               }
+               vdev->vvr[i].vrh.vring.used =
+                       (void __force *)vpdev->hw_ops->ioremap(
+                       vpdev,
+                       le64_to_cpu(vqconfig[i].used_address),
+                       used_size);
+       }
+
+       vdev->dc->used_address_updated = 0;
+
+       dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
+                __func__, vdev->virtio_id);
+}
+
+static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
+{
+       int i;
+
+       dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n",
+               __func__, vdev->dd->status, vdev->virtio_id);
+
+       for (i = 0; i < vdev->dd->num_vq; i++)
+               /*
+                * Avoid lockdep false positive. The + 1 is for the vop
+                * mutex which is held in the reset devices code path.
+                */
+               mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+       /* 0 status means "reset" */
+       vdev->dd->status = 0;
+       vdev->dc->vdev_reset = 0;
+       vdev->dc->host_ack = 1;
+
+       for (i = 0; i < vdev->dd->num_vq; i++) {
+               struct vringh *vrh = &vdev->vvr[i].vrh;
+
+               vdev->vvr[i].vring.info->avail_idx = 0;
+               vrh->completed = 0;
+               vrh->last_avail_idx = 0;
+               vrh->last_used_idx = 0;
+       }
+
+       for (i = 0; i < vdev->dd->num_vq; i++)
+               mutex_unlock(&vdev->vvr[i].vr_mutex);
+}
+
+static void vop_virtio_reset_devices(struct vop_info *vi)
+{
+       struct list_head *pos, *tmp;
+       struct vop_vdev *vdev;
+
+       list_for_each_safe(pos, tmp, &vi->vdev_list) {
+               vdev = list_entry(pos, struct vop_vdev, list);
+               vop_virtio_device_reset(vdev);
+               vdev->poll_wake = 1;
+               wake_up(&vdev->waitq);
+       }
+}
+
+static void vop_bh_handler(struct work_struct *work)
+{
+       struct vop_vdev *vdev = container_of(work, struct vop_vdev,
+                       virtio_bh_work);
+
+       if (vdev->dc->used_address_updated)
+               vop_virtio_init_post(vdev);
+
+       if (vdev->dc->vdev_reset)
+               vop_virtio_device_reset(vdev);
+
+       vdev->poll_wake = 1;
+       wake_up(&vdev->waitq);
+}
+
+static irqreturn_t _vop_virtio_intr_handler(int irq, void *data)
+{
+       struct vop_vdev *vdev = data;
+       struct vop_device *vpdev = vdev->vpdev;
+
+       vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db);
+       schedule_work(&vdev->virtio_bh_work);
+       return IRQ_HANDLED;
+}
+
+static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp)
+{
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+       int ret = 0, retry, i;
+       struct vop_device *vpdev = vdev->vpdev;
+       struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+       struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+       s8 db = bootparam->h2c_config_db;
+
+       mutex_lock(&vi->vop_mutex);
+       for (i = 0; i < vdev->dd->num_vq; i++)
+               mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+       if (db == -1 || vdev->dd->type == -1) {
+               ret = -EIO;
+               goto exit;
+       }
+
+       memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
+       vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
+       vpdev->hw_ops->send_intr(vpdev, db);
+
+       for (retry = 100; retry--;) {
+               ret = wait_event_timeout(wake, vdev->dc->guest_ack,
+                                        msecs_to_jiffies(100));
+               if (ret)
+                       break;
+       }
+
+       dev_dbg(vop_dev(vdev),
+               "%s %d retry: %d\n", __func__, __LINE__, retry);
+       vdev->dc->config_change = 0;
+       vdev->dc->guest_ack = 0;
+exit:
+       for (i = 0; i < vdev->dd->num_vq; i++)
+               mutex_unlock(&vdev->vvr[i].vr_mutex);
+       mutex_unlock(&vi->vop_mutex);
+       return ret;
+}
+
+static int vop_copy_dp_entry(struct vop_vdev *vdev,
+                            struct mic_device_desc *argp, __u8 *type,
+                            struct mic_device_desc **devpage)
+{
+       struct vop_device *vpdev = vdev->vpdev;
+       struct mic_device_desc *devp;
+       struct mic_vqconfig *vqconfig;
+       int ret = 0, i;
+       bool slot_found = false;
+
+       vqconfig = mic_vq_config(argp);
+       for (i = 0; i < argp->num_vq; i++) {
+               if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
+                       ret =  -EINVAL;
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       goto exit;
+               }
+       }
+
+       /* Find the first free device page entry */
+       for (i = sizeof(struct mic_bootparam);
+               i < MIC_DP_SIZE - mic_total_desc_size(argp);
+               i += mic_total_desc_size(devp)) {
+               devp = vpdev->hw_ops->get_dp(vpdev) + i;
+               if (devp->type == 0 || devp->type == -1) {
+                       slot_found = true;
+                       break;
+               }
+       }
+       if (!slot_found) {
+               ret =  -EINVAL;
+               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, ret);
+               goto exit;
+       }
+       /*
+        * Save off the type before doing the memcpy. Type will be set in the
+        * end after completing all initialization for the new device.
+        */
+       *type = argp->type;
+       argp->type = 0;
+       memcpy(devp, argp, mic_desc_size(argp));
+
+       *devpage = devp;
+exit:
+       return ret;
+}
+
+static void vop_init_device_ctrl(struct vop_vdev *vdev,
+                                struct mic_device_desc *devpage)
+{
+       struct mic_device_ctrl *dc;
+
+       dc = (void *)devpage + mic_aligned_desc_size(devpage);
+
+       dc->config_change = 0;
+       dc->guest_ack = 0;
+       dc->vdev_reset = 0;
+       dc->host_ack = 0;
+       dc->used_address_updated = 0;
+       dc->c2h_vdev_db = -1;
+       dc->h2c_vdev_db = -1;
+       vdev->dc = dc;
+}
+
+static int vop_virtio_add_device(struct vop_vdev *vdev,
+                                struct mic_device_desc *argp)
+{
+       struct vop_info *vi = vdev->vi;
+       struct vop_device *vpdev = vi->vpdev;
+       struct mic_device_desc *dd = NULL;
+       struct mic_vqconfig *vqconfig;
+       int vr_size, i, j, ret;
+       u8 type = 0;
+       s8 db = -1;
+       char irqname[16];
+       struct mic_bootparam *bootparam;
+       u16 num;
+       dma_addr_t vr_addr;
+
+       bootparam = vpdev->hw_ops->get_dp(vpdev);
+       init_waitqueue_head(&vdev->waitq);
+       INIT_LIST_HEAD(&vdev->list);
+       vdev->vpdev = vpdev;
+
+       ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
+       if (ret) {
+               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, ret);
+               kfree(vdev);
+               return ret;
+       }
+
+       vop_init_device_ctrl(vdev, dd);
+
+       vdev->dd = dd;
+       vdev->virtio_id = type;
+       vqconfig = mic_vq_config(dd);
+       INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
+
+       for (i = 0; i < dd->num_vq; i++) {
+               struct vop_vringh *vvr = &vdev->vvr[i];
+               struct mic_vring *vr = &vdev->vvr[i].vring;
+
+               num = le16_to_cpu(vqconfig[i].num);
+               mutex_init(&vvr->vr_mutex);
+               vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
+                       sizeof(struct _mic_vring_info));
+               vr->va = (void *)
+                       __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                        get_order(vr_size));
+               if (!vr->va) {
+                       ret = -ENOMEM;
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       goto err;
+               }
+               vr->len = vr_size;
+               vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
+               vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
+               vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
+                                        DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(&vpdev->dev, vr_addr)) {
+                       free_pages((unsigned long)vr->va, get_order(vr_size));
+                       ret = -ENOMEM;
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       goto err;
+               }
+               vqconfig[i].address = cpu_to_le64(vr_addr);
+
+               vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
+               ret = vringh_init_kern(&vvr->vrh,
+                                      *(u32 *)mic_vq_features(vdev->dd),
+                                      num, false, vr->vr.desc, vr->vr.avail,
+                                      vr->vr.used);
+               if (ret) {
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       goto err;
+               }
+               vringh_kiov_init(&vvr->riov, NULL, 0);
+               vringh_kiov_init(&vvr->wiov, NULL, 0);
+               vvr->head = USHRT_MAX;
+               vvr->vdev = vdev;
+               vvr->vrh.notify = _vop_notify;
+               dev_dbg(&vpdev->dev,
+                       "%s %d index %d va %p info %p vr_size 0x%x\n",
+                       __func__, __LINE__, i, vr->va, vr->info, vr_size);
+               vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
+                                       get_order(VOP_INT_DMA_BUF_SIZE));
+               vvr->buf_da = dma_map_single(&vpdev->dev,
+                                         vvr->buf, VOP_INT_DMA_BUF_SIZE,
+                                         DMA_BIDIRECTIONAL);
+       }
+
+       snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
+                vdev->virtio_id);
+       vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
+       vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+                       _vop_virtio_intr_handler, irqname, vdev,
+                       vdev->virtio_db);
+       if (IS_ERR(vdev->virtio_cookie)) {
+               ret = PTR_ERR(vdev->virtio_cookie);
+               dev_dbg(&vpdev->dev, "request irq failed\n");
+               goto err;
+       }
+
+       vdev->dc->c2h_vdev_db = vdev->virtio_db;
+
+       /*
+        * Order the type update with previous stores. This write barrier
+        * is paired with the corresponding read barrier before the uncached
+        * system memory read of the type, on the card while scanning the
+        * device page.
+        */
+       smp_wmb();
+       dd->type = type;
+       argp->type = type;
+
+       if (bootparam) {
+               db = bootparam->h2c_config_db;
+               if (db != -1)
+                       vpdev->hw_ops->send_intr(vpdev, db);
+       }
+       dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
+       return 0;
+err:
+       vqconfig = mic_vq_config(dd);
+       for (j = 0; j < i; j++) {
+               struct vop_vringh *vvr = &vdev->vvr[j];
+
+               dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
+                                vvr->vring.len, DMA_BIDIRECTIONAL);
+               free_pages((unsigned long)vvr->vring.va,
+                          get_order(vvr->vring.len));
+       }
+       return ret;
+}
+
+static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp,
+                          struct vop_device *vpdev)
+{
+       struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+       s8 db;
+       int ret, retry;
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+
+       devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
+       db = bootparam->h2c_config_db;
+       if (db != -1)
+               vpdev->hw_ops->send_intr(vpdev, db);
+       else
+               goto done;
+       for (retry = 15; retry--;) {
+               ret = wait_event_timeout(wake, devp->guest_ack,
+                                        msecs_to_jiffies(1000));
+               if (ret)
+                       break;
+       }
+done:
+       devp->config_change = 0;
+       devp->guest_ack = 0;
+}
+
+static void vop_virtio_del_device(struct vop_vdev *vdev)
+{
+       struct vop_info *vi = vdev->vi;
+       struct vop_device *vpdev = vdev->vpdev;
+       int i;
+       struct mic_vqconfig *vqconfig;
+       struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+
+       if (!bootparam)
+               goto skip_hot_remove;
+       vop_dev_remove(vi, vdev->dc, vpdev);
+skip_hot_remove:
+       vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+       flush_work(&vdev->virtio_bh_work);
+       vqconfig = mic_vq_config(vdev->dd);
+       for (i = 0; i < vdev->dd->num_vq; i++) {
+               struct vop_vringh *vvr = &vdev->vvr[i];
+
+               dma_unmap_single(&vpdev->dev,
+                                vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
+                                DMA_BIDIRECTIONAL);
+               free_pages((unsigned long)vvr->buf,
+                          get_order(VOP_INT_DMA_BUF_SIZE));
+               vringh_kiov_cleanup(&vvr->riov);
+               vringh_kiov_cleanup(&vvr->wiov);
+               dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
+                                vvr->vring.len, DMA_BIDIRECTIONAL);
+               free_pages((unsigned long)vvr->vring.va,
+                          get_order(vvr->vring.len));
+       }
+       /*
+        * Order the type update with previous stores. This write barrier
+        * is paired with the corresponding read barrier before the uncached
+        * system memory read of the type, on the card while scanning the
+        * device page.
+        */
+       smp_wmb();
+       vdev->dd->type = -1;
+}
+
+/*
+ * vop_sync_dma - Wrapper for synchronous DMAs.
+ *
+ * @dev - The address of the pointer to the device instance used
+ * for DMA registration.
+ * @dst - destination DMA address.
+ * @src - source DMA address.
+ * @len - size of the transfer.
+ *
+ * Return DMA_SUCCESS on success
+ */
+static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src,
+                       size_t len)
+{
+       int err = 0;
+       struct dma_device *ddev;
+       struct dma_async_tx_descriptor *tx;
+       struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+       struct dma_chan *vop_ch = vi->dma_ch;
+
+       if (!vop_ch) {
+               err = -EBUSY;
+               goto error;
+       }
+       ddev = vop_ch->device;
+       tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len,
+               DMA_PREP_FENCE);
+       if (!tx) {
+               err = -ENOMEM;
+               goto error;
+       } else {
+               dma_cookie_t cookie;
+
+               cookie = tx->tx_submit(tx);
+               if (dma_submit_error(cookie)) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+               dma_async_issue_pending(vop_ch);
+               err = dma_sync_wait(vop_ch, cookie);
+       }
+error:
+       if (err)
+               dev_err(&vi->vpdev->dev, "%s %d err %d\n",
+                       __func__, __LINE__, err);
+       return err;
+}
+
+#define VOP_USE_DMA true
+
+/*
+ * Initiates the copies across the PCIe bus from card memory to a user
+ * space buffer. When transfers are done using DMA, source/destination
+ * addresses and transfer length must follow the alignment requirements of
+ * the MIC DMA engine.
+ */
+static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
+                                  size_t len, u64 daddr, size_t dlen,
+                                  int vr_idx)
+{
+       struct vop_device *vpdev = vdev->vpdev;
+       void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+       struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+       struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+       size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+       bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+       size_t dma_offset, partlen;
+       int err;
+
+       if (!VOP_USE_DMA) {
+               if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
+                       err = -EFAULT;
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, err);
+                       goto err;
+               }
+               vdev->in_bytes += len;
+               err = 0;
+               goto err;
+       }
+
+       dma_offset = daddr - round_down(daddr, dma_alignment);
+       daddr -= dma_offset;
+       len += dma_offset;
+       /*
+        * X100 uses DMA addresses as seen by the card so adding
+        * the aperture base is not required for DMA. However x200
+        * requires DMA addresses to be an offset into the bar so
+        * add the aperture base for x200.
+        */
+       if (x200)
+               daddr += vpdev->aper->pa;
+       while (len) {
+               partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+               err = vop_sync_dma(vdev, vvr->buf_da, daddr,
+                                  ALIGN(partlen, dma_alignment));
+               if (err) {
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, err);
+                       goto err;
+               }
+               if (copy_to_user(ubuf, vvr->buf + dma_offset,
+                                partlen - dma_offset)) {
+                       err = -EFAULT;
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, err);
+                       goto err;
+               }
+               daddr += partlen;
+               ubuf += partlen;
+               dbuf += partlen;
+               vdev->in_bytes_dma += partlen;
+               vdev->in_bytes += partlen;
+               len -= partlen;
+               dma_offset = 0;
+       }
+       err = 0;
+err:
+       vpdev->hw_ops->iounmap(vpdev, dbuf);
+       dev_dbg(vop_dev(vdev),
+               "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+               __func__, ubuf, dbuf, len, vr_idx);
+       return err;
+}
+
+/*
+ * Initiates copies across the PCIe bus from a user space buffer to card
+ * memory. When transfers are done using DMA, source/destination addresses
+ * and transfer length must follow the alignment requirements of the MIC
+ * DMA engine.
+ */
+static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
+                                    size_t len, u64 daddr, size_t dlen,
+                                    int vr_idx)
+{
+       struct vop_device *vpdev = vdev->vpdev;
+       void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+       struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+       struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+       size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+       bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+       size_t partlen;
+       bool dma = VOP_USE_DMA;
+       int err = 0;
+
+       if (daddr & (dma_alignment - 1)) {
+               vdev->tx_dst_unaligned += len;
+               dma = false;
+       } else if (ALIGN(len, dma_alignment) > dlen) {
+               vdev->tx_len_unaligned += len;
+               dma = false;
+       }
+
+       if (!dma)
+               goto memcpy;
+
+       /*
+        * X100 uses DMA addresses as seen by the card so adding
+        * the aperture base is not required for DMA. However x200
+        * requires DMA addresses to be an offset into the bar so
+        * add the aperture base for x200.
+        */
+       if (x200)
+               daddr += vpdev->aper->pa;
+       while (len) {
+               partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+
+               if (copy_from_user(vvr->buf, ubuf, partlen)) {
+                       err = -EFAULT;
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, err);
+                       goto err;
+               }
+               err = vop_sync_dma(vdev, daddr, vvr->buf_da,
+                                  ALIGN(partlen, dma_alignment));
+               if (err) {
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, err);
+                       goto err;
+               }
+               daddr += partlen;
+               ubuf += partlen;
+               dbuf += partlen;
+               vdev->out_bytes_dma += partlen;
+               vdev->out_bytes += partlen;
+               len -= partlen;
+       }
+memcpy:
+       /*
+        * We are copying to IO below and should ideally use something
+        * like copy_from_user_toio(..) if it existed.
+        */
+       if (copy_from_user((void __force *)dbuf, ubuf, len)) {
+               err = -EFAULT;
+               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, err);
+               goto err;
+       }
+       vdev->out_bytes += len;
+       err = 0;
+err:
+       vpdev->hw_ops->iounmap(vpdev, dbuf);
+       dev_dbg(vop_dev(vdev),
+               "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+               __func__, ubuf, dbuf, len, vr_idx);
+       return err;
+}
+
+#define MIC_VRINGH_READ true
+
+/* Determine the total number of bytes consumed in a VRINGH KIOV */
+static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov)
+{
+       int i;
+       u32 total = iov->consumed;
+
+       for (i = 0; i < iov->i; i++)
+               total += iov->iov[i].iov_len;
+       return total;
+}
+
+/*
+ * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
+ * This API is heavily based on the vringh_iov_xfer(..) implementation
+ * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
+ * and vringh_iov_push_kern(..) directly is because there is no
+ * way to override the VRINGH xfer(..) routines as of v3.10.
+ */
+static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
+                          void __user *ubuf, size_t len, bool read, int vr_idx,
+                          size_t *out_len)
+{
+       int ret = 0;
+       size_t partlen, tot_len = 0;
+
+       while (len && iov->i < iov->used) {
+               struct kvec *kiov = &iov->iov[iov->i];
+
+               partlen = min(kiov->iov_len, len);
+               if (read)
+                       ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
+                                                     (u64)kiov->iov_base,
+                                                     kiov->iov_len,
+                                                     vr_idx);
+               else
+                       ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
+                                                       (u64)kiov->iov_base,
+                                                       kiov->iov_len,
+                                                       vr_idx);
+               if (ret) {
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       break;
+               }
+               len -= partlen;
+               ubuf += partlen;
+               tot_len += partlen;
+               iov->consumed += partlen;
+               kiov->iov_len -= partlen;
+               kiov->iov_base += partlen;
+               if (!kiov->iov_len) {
+                       /* Fix up old iov element then increment. */
+                       kiov->iov_len = iov->consumed;
+                       kiov->iov_base -= iov->consumed;
+
+                       iov->consumed = 0;
+                       iov->i++;
+               }
+       }
+       *out_len = tot_len;
+       return ret;
+}
+
+/*
+ * Use the standard VRINGH infrastructure in the kernel to fetch new
+ * descriptors, initiate the copies and update the used ring.
+ */
+static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy)
+{
+       int ret = 0;
+       u32 iovcnt = copy->iovcnt;
+       struct iovec iov;
+       struct iovec __user *u_iov = copy->iov;
+       void __user *ubuf = NULL;
+       struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx];
+       struct vringh_kiov *riov = &vvr->riov;
+       struct vringh_kiov *wiov = &vvr->wiov;
+       struct vringh *vrh = &vvr->vrh;
+       u16 *head = &vvr->head;
+       struct mic_vring *vr = &vvr->vring;
+       size_t len = 0, out_len;
+
+       copy->out_len = 0;
+       /* Fetch a new IOVEC if all previous elements have been processed */
+       if (riov->i == riov->used && wiov->i == wiov->used) {
+               ret = vringh_getdesc_kern(vrh, riov, wiov,
+                                         head, GFP_KERNEL);
+               /* Check if there are available descriptors */
+               if (ret <= 0)
+                       return ret;
+       }
+       while (iovcnt) {
+               if (!len) {
+                       /* Copy over a new iovec from user space. */
+                       ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
+                       if (ret) {
+                               ret = -EINVAL;
+                               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                                       __func__, __LINE__, ret);
+                               break;
+                       }
+                       len = iov.iov_len;
+                       ubuf = iov.iov_base;
+               }
+               /* Issue all the read descriptors first */
+               ret = vop_vringh_copy(vdev, riov, ubuf, len,
+                                     MIC_VRINGH_READ, copy->vr_idx, &out_len);
+               if (ret) {
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       break;
+               }
+               len -= out_len;
+               ubuf += out_len;
+               copy->out_len += out_len;
+               /* Issue the write descriptors next */
+               ret = vop_vringh_copy(vdev, wiov, ubuf, len,
+                                     !MIC_VRINGH_READ, copy->vr_idx, &out_len);
+               if (ret) {
+                       dev_err(vop_dev(vdev), "%s %d err %d\n",
+                               __func__, __LINE__, ret);
+                       break;
+               }
+               len -= out_len;
+               ubuf += out_len;
+               copy->out_len += out_len;
+               if (!len) {
+                       /* One user space iovec is now completed */
+                       iovcnt--;
+                       u_iov++;
+               }
+               /* Exit loop if all elements in KIOVs have been processed. */
+               if (riov->i == riov->used && wiov->i == wiov->used)
+                       break;
+       }
+       /*
+        * Update the used ring if a descriptor was available and some data was
+        * copied in/out and the user asked for a used ring update.
+        */
+       if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
+               u32 total = 0;
+
+               /* Determine the total data consumed */
+               total += vop_vringh_iov_consumed(riov);
+               total += vop_vringh_iov_consumed(wiov);
+               vringh_complete_kern(vrh, *head, total);
+               *head = USHRT_MAX;
+               if (vringh_need_notify_kern(vrh) > 0)
+                       vringh_notify(vrh);
+               vringh_kiov_cleanup(riov);
+               vringh_kiov_cleanup(wiov);
+               /* Update avail idx for user space */
+               vr->info->avail_idx = vrh->last_avail_idx;
+       }
+       return ret;
+}
+
+static inline int vop_verify_copy_args(struct vop_vdev *vdev,
+                                      struct mic_copy_desc *copy)
+{
+       if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
+               return -EINVAL;
+       return 0;
+}
+
+/* Copy a specified number of virtio descriptors in a chain */
+static int vop_virtio_copy_desc(struct vop_vdev *vdev,
+                               struct mic_copy_desc *copy)
+{
+       int err;
+       struct vop_vringh *vvr;
+
+       err = vop_verify_copy_args(vdev, copy);
+       if (err)
+               return err;
+
+       vvr = &vdev->vvr[copy->vr_idx];
+       mutex_lock(&vvr->vr_mutex);
+       if (!vop_vdevup(vdev)) {
+               err = -ENODEV;
+               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, err);
+               goto err;
+       }
+       err = _vop_virtio_copy(vdev, copy);
+       if (err) {
+               dev_err(vop_dev(vdev), "%s %d err %d\n",
+                       __func__, __LINE__, err);
+       }
+err:
+       mutex_unlock(&vvr->vr_mutex);
+       return err;
+}
+
+static int vop_open(struct inode *inode, struct file *f)
+{
+       struct vop_vdev *vdev;
+       struct vop_info *vi = container_of(f->private_data,
+               struct vop_info, miscdev);
+
+       vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+       if (!vdev)
+               return -ENOMEM;
+       vdev->vi = vi;
+       mutex_init(&vdev->vdev_mutex);
+       f->private_data = vdev;
+       init_completion(&vdev->destroy);
+       complete(&vdev->destroy);
+       return 0;
+}
+
+static int vop_release(struct inode *inode, struct file *f)
+{
+       struct vop_vdev *vdev = f->private_data, *vdev_tmp;
+       struct vop_info *vi = vdev->vi;
+       struct list_head *pos, *tmp;
+       bool found = false;
+
+       mutex_lock(&vdev->vdev_mutex);
+       if (vdev->deleted)
+               goto unlock;
+       mutex_lock(&vi->vop_mutex);
+       list_for_each_safe(pos, tmp, &vi->vdev_list) {
+               vdev_tmp = list_entry(pos, struct vop_vdev, list);
+               if (vdev == vdev_tmp) {
+                       vop_virtio_del_device(vdev);
+                       list_del(pos);
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&vi->vop_mutex);
+unlock:
+       mutex_unlock(&vdev->vdev_mutex);
+       if (!found)
+               wait_for_completion(&vdev->destroy);
+       f->private_data = NULL;
+       kfree(vdev);
+       return 0;
+}
+
+static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+       struct vop_vdev *vdev = f->private_data;
+       struct vop_info *vi = vdev->vi;
+       void __user *argp = (void __user *)arg;
+       int ret;
+
+       switch (cmd) {
+       case MIC_VIRTIO_ADD_DEVICE:
+       {
+               struct mic_device_desc dd, *dd_config;
+
+               if (copy_from_user(&dd, argp, sizeof(dd)))
+                       return -EFAULT;
+
+               if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
+                   dd.num_vq > MIC_MAX_VRINGS)
+                       return -EINVAL;
+
+               dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
+               if (!dd_config)
+                       return -ENOMEM;
+               if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
+                       ret = -EFAULT;
+                       goto free_ret;
+               }
+               mutex_lock(&vdev->vdev_mutex);
+               mutex_lock(&vi->vop_mutex);
+               ret = vop_virtio_add_device(vdev, dd_config);
+               if (ret)
+                       goto unlock_ret;
+               list_add_tail(&vdev->list, &vi->vdev_list);
+unlock_ret:
+               mutex_unlock(&vi->vop_mutex);
+               mutex_unlock(&vdev->vdev_mutex);
+free_ret:
+               kfree(dd_config);
+               return ret;
+       }
+       case MIC_VIRTIO_COPY_DESC:
+       {
+               struct mic_copy_desc copy;
+
+               mutex_lock(&vdev->vdev_mutex);
+               ret = vop_vdev_inited(vdev);
+               if (ret)
+                       goto _unlock_ret;
+
+               if (copy_from_user(&copy, argp, sizeof(copy))) {
+                       ret = -EFAULT;
+                       goto _unlock_ret;
+               }
+
+               ret = vop_virtio_copy_desc(vdev, &copy);
+               if (ret < 0)
+                       goto _unlock_ret;
+               if (copy_to_user(
+                       &((struct mic_copy_desc __user *)argp)->out_len,
+                       &copy.out_len, sizeof(copy.out_len)))
+                       ret = -EFAULT;
+_unlock_ret:
+               mutex_unlock(&vdev->vdev_mutex);
+               return ret;
+       }
+       case MIC_VIRTIO_CONFIG_CHANGE:
+       {
+               void *buf;
+
+               mutex_lock(&vdev->vdev_mutex);
+               ret = vop_vdev_inited(vdev);
+               if (ret)
+                       goto __unlock_ret;
+               buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto __unlock_ret;
+               }
+               if (copy_from_user(buf, argp, vdev->dd->config_len)) {
+                       ret = -EFAULT;
+                       goto done;
+               }
+               ret = vop_virtio_config_change(vdev, buf);
+done:
+               kfree(buf);
+__unlock_ret:
+               mutex_unlock(&vdev->vdev_mutex);
+               return ret;
+       }
+       default:
+               return -ENOIOCTLCMD;
+       };
+       return 0;
+}
+
+/*
+ * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
+ * not when previously enqueued buffers may be available. This means that
+ * in the card->host (TX) path, when userspace is unblocked by poll it
+ * must drain all available descriptors or it can stall.
+ */
+static unsigned int vop_poll(struct file *f, poll_table *wait)
+{
+       struct vop_vdev *vdev = f->private_data;
+       int mask = 0;
+
+       mutex_lock(&vdev->vdev_mutex);
+       if (vop_vdev_inited(vdev)) {
+               mask = POLLERR;
+               goto done;
+       }
+       poll_wait(f, &vdev->waitq, wait);
+       if (vop_vdev_inited(vdev)) {
+               mask = POLLERR;
+       } else if (vdev->poll_wake) {
+               vdev->poll_wake = 0;
+               mask = POLLIN | POLLOUT;
+       }
+done:
+       mutex_unlock(&vdev->vdev_mutex);
+       return mask;
+}
+
+static inline int
+vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
+                unsigned long *size, unsigned long *pa)
+{
+       struct vop_device *vpdev = vdev->vpdev;
+       unsigned long start = MIC_DP_SIZE;
+       int i;
+
+       /*
+        * MMAP interface is as follows:
+        * offset                               region
+        * 0x0                                  virtio device_page
+        * 0x1000                               first vring
+        * 0x1000 + size of 1st vring           second vring
+        * ....
+        */
+       if (!offset) {
+               *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
+               *size = MIC_DP_SIZE;
+               return 0;
+       }
+
+       for (i = 0; i < vdev->dd->num_vq; i++) {
+               struct vop_vringh *vvr = &vdev->vvr[i];
+
+               if (offset == start) {
+                       *pa = virt_to_phys(vvr->vring.va);
+                       *size = vvr->vring.len;
+                       return 0;
+               }
+               start += vvr->vring.len;
+       }
+       return -1;
+}
+
+/*
+ * Maps the device page and virtio rings to user space for readonly access.
+ */
+static int vop_mmap(struct file *f, struct vm_area_struct *vma)
+{
+       struct vop_vdev *vdev = f->private_data;
+       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
+       int i, err;
+
+       err = vop_vdev_inited(vdev);
+       if (err)
+               goto ret;
+       if (vma->vm_flags & VM_WRITE) {
+               err = -EACCES;
+               goto ret;
+       }
+       while (size_rem) {
+               i = vop_query_offset(vdev, offset, &size, &pa);
+               if (i < 0) {
+                       err = -EINVAL;
+                       goto ret;
+               }
+               err = remap_pfn_range(vma, vma->vm_start + offset,
+                                     pa >> PAGE_SHIFT, size,
+                                     vma->vm_page_prot);
+               if (err)
+                       goto ret;
+               size_rem -= size;
+               offset += size;
+       }
+ret:
+       return err;
+}
+
+static const struct file_operations vop_fops = {
+       .open = vop_open,
+       .release = vop_release,
+       .unlocked_ioctl = vop_ioctl,
+       .poll = vop_poll,
+       .mmap = vop_mmap,
+       .owner = THIS_MODULE,
+};
+
+int vop_host_init(struct vop_info *vi)
+{
+       int rc;
+       struct miscdevice *mdev;
+       struct vop_device *vpdev = vi->vpdev;
+
+       INIT_LIST_HEAD(&vi->vdev_list);
+       vi->dma_ch = vpdev->dma_ch;
+       mdev = &vi->miscdev;
+       mdev->minor = MISC_DYNAMIC_MINOR;
+       snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index);
+       mdev->name = vi->name;
+       mdev->fops = &vop_fops;
+       mdev->parent = &vpdev->dev;
+
+       rc = misc_register(mdev);
+       if (rc)
+               dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc);
+       return rc;
+}
+
+void vop_host_uninit(struct vop_info *vi)
+{
+       struct list_head *pos, *tmp;
+       struct vop_vdev *vdev;
+
+       mutex_lock(&vi->vop_mutex);
+       vop_virtio_reset_devices(vi);
+       list_for_each_safe(pos, tmp, &vi->vdev_list) {
+               vdev = list_entry(pos, struct vop_vdev, list);
+               list_del(pos);
+               reinit_completion(&vdev->destroy);
+               mutex_unlock(&vi->vop_mutex);
+               mutex_lock(&vdev->vdev_mutex);
+               vop_virtio_del_device(vdev);
+               vdev->deleted = true;
+               mutex_unlock(&vdev->vdev_mutex);
+               complete(&vdev->destroy);
+               mutex_lock(&vi->vop_mutex);
+       }
+       mutex_unlock(&vi->vop_mutex);
+       misc_deregister(&vi->miscdev);
+}
index 9a17a9bab8d6c980b2e23787c2fc0dbf6807e3c7..4810e039bbeced8bd7bb337ddd6404c95b3bfdd6 100644 (file)
@@ -503,8 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
        int err;
        ssize_t rom_size;
 
-       struct pch_phub_reg *chip =
-               dev_get_drvdata(container_of(kobj, struct device, kobj));
+       struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
 
        ret = mutex_lock_interruptible(&pch_phub_mutex);
        if (ret) {
@@ -514,8 +513,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
 
        /* Get Rom signature */
        chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
-       if (!chip->pch_phub_extrom_base_address)
+       if (!chip->pch_phub_extrom_base_address) {
+               err = -ENODATA;
                goto exrom_map_err;
+       }
 
        pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
                                (unsigned char *)&rom_signature);
@@ -567,8 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
        unsigned int addr_offset;
        int ret;
        ssize_t rom_size;
-       struct pch_phub_reg *chip =
-               dev_get_drvdata(container_of(kobj, struct device, kobj));
+       struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
 
        ret = mutex_lock_interruptible(&pch_phub_mutex);
        if (ret)
index 6e3af8b42cdd0f37ca509941343a2b963bdb44d7..dcdbd58672ccc6d2f7d1cecf1e04049bd26c6a58 100644 (file)
@@ -632,7 +632,6 @@ long st_register(struct st_proto_s *new_proto)
                spin_unlock_irqrestore(&st_gdata->lock, flags);
                return err;
        }
-       pr_debug("done %s(%d) ", __func__, new_proto->chnl_id);
 }
 EXPORT_SYMBOL_GPL(st_register);
 
index b823f9a6e4641c69af5cc9f7e5c9c438433ffa01..896be150e28fa5e0802f85e47cf882fe2ea4104d 100644 (file)
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.3.0-k");
+MODULE_VERSION("1.1.4.0-k");
 MODULE_LICENSE("GPL v2");
index bc4ea585b42e72c469f6512bf2f1057b2ab723c8..ca52952d850f6b62ebd83cb572fe0078fc9f6218 100644 (file)
@@ -25,9 +25,19 @@ config NVMEM_IMX_OCOTP
          This driver can also be built as a module. If so, the module
          will be called nvmem-imx-ocotp.
 
+config NVMEM_LPC18XX_EEPROM
+       tristate "NXP LPC18XX EEPROM Memory Support"
+       depends on ARCH_LPC18XX || COMPILE_TEST
+       help
+         Say Y here to include support for NXP LPC18xx EEPROM memory found in
+         NXP LPC185x/3x and LPC435x/3x/2x/1x devices.
+         To compile this driver as a module, choose M here: the module
+         will be called nvmem_lpc18xx_eeprom.
+
 config NVMEM_MXS_OCOTP
        tristate "Freescale MXS On-Chip OTP Memory Support"
        depends on ARCH_MXS || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          If you say Y here, you will get readonly access to the
          One Time Programmable memory pages that are stored
@@ -36,9 +46,21 @@ config NVMEM_MXS_OCOTP
          This driver can also be built as a module. If so, the module
          will be called nvmem-mxs-ocotp.
 
+config MTK_EFUSE
+       tristate "Mediatek SoCs EFUSE support"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select REGMAP_MMIO
+       help
+         This is a driver to access hardware related data like sensor
+         calibration, HDMI impedance etc.
+
+         This driver can also be built as a module. If so, the module
+         will be called efuse-mtk.
+
 config QCOM_QFPROM
        tristate "QCOM QFPROM Support"
        depends on ARCH_QCOM || COMPILE_TEST
+       depends on HAS_IOMEM
        select REGMAP_MMIO
        help
          Say y here to enable QFPROM support. The QFPROM provides access
@@ -50,6 +72,7 @@ config QCOM_QFPROM
 config ROCKCHIP_EFUSE
        tristate "Rockchip eFuse Support"
        depends on ARCH_ROCKCHIP || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This is a simple drive to dump specified values of Rockchip SoC
          from eFuse, such as cpu-leakage.
@@ -71,6 +94,7 @@ config NVMEM_SUNXI_SID
 config NVMEM_VF610_OCOTP
        tristate "VF610 SoC OCOTP support"
        depends on SOC_VF610 || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This is a driver for the 'OCOTP' peripheral available on Vybrid
          devices like VF5xx and VF6xx.
index 95dde3f8f08504b5cd6ea1f30981f5861bb8a83b..45ab1ae08fa9890ad2ccdef092b9031c2fc1e904 100644 (file)
@@ -8,8 +8,12 @@ nvmem_core-y                   := core.o
 # Devices
 obj-$(CONFIG_NVMEM_IMX_OCOTP)  += nvmem-imx-ocotp.o
 nvmem-imx-ocotp-y              := imx-ocotp.o
+obj-$(CONFIG_NVMEM_LPC18XX_EEPROM)     += nvmem_lpc18xx_eeprom.o
+nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
 obj-$(CONFIG_NVMEM_MXS_OCOTP)  += nvmem-mxs-ocotp.o
 nvmem-mxs-ocotp-y              := mxs-ocotp.o
+obj-$(CONFIG_MTK_EFUSE)                += nvmem_mtk-efuse.o
+nvmem_mtk-efuse-y              := mtk-efuse.o
 obj-$(CONFIG_QCOM_QFPROM)      += nvmem_qfprom.o
 nvmem_qfprom-y                 := qfprom.o
 obj-$(CONFIG_ROCKCHIP_EFUSE)   += nvmem_rockchip_efuse.o
index 9d11d98373128fef3de3406975d8bcc2ca286b9a..0de3d878c4393b4738d510808e5e667f8d8264c5 100644 (file)
@@ -38,8 +38,13 @@ struct nvmem_device {
        int                     users;
        size_t                  size;
        bool                    read_only;
+       int                     flags;
+       struct bin_attribute    eeprom;
+       struct device           *base_dev;
 };
 
+#define FLAG_COMPAT            BIT(0)
+
 struct nvmem_cell {
        const char              *name;
        int                     offset;
@@ -56,16 +61,26 @@ static DEFINE_IDA(nvmem_ida);
 static LIST_HEAD(nvmem_cells);
 static DEFINE_MUTEX(nvmem_cells_mutex);
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key eeprom_lock_key;
+#endif
+
 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
 
 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
                                    struct bin_attribute *attr,
                                    char *buf, loff_t pos, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct nvmem_device *nvmem = to_nvmem_device(dev);
+       struct device *dev;
+       struct nvmem_device *nvmem;
        int rc;
 
+       if (attr->private)
+               dev = attr->private;
+       else
+               dev = container_of(kobj, struct device, kobj);
+       nvmem = to_nvmem_device(dev);
+
        /* Stop the user from reading */
        if (pos >= nvmem->size)
                return 0;
@@ -90,10 +105,16 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
                                     struct bin_attribute *attr,
                                     char *buf, loff_t pos, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct nvmem_device *nvmem = to_nvmem_device(dev);
+       struct device *dev;
+       struct nvmem_device *nvmem;
        int rc;
 
+       if (attr->private)
+               dev = attr->private;
+       else
+               dev = container_of(kobj, struct device, kobj);
+       nvmem = to_nvmem_device(dev);
+
        /* Stop the user from writing */
        if (pos >= nvmem->size)
                return 0;
@@ -161,6 +182,53 @@ static const struct attribute_group *nvmem_ro_dev_groups[] = {
        NULL,
 };
 
+/* default read/write permissions, root only */
+static struct bin_attribute bin_attr_rw_root_nvmem = {
+       .attr   = {
+               .name   = "nvmem",
+               .mode   = S_IWUSR | S_IRUSR,
+       },
+       .read   = bin_attr_nvmem_read,
+       .write  = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
+       &bin_attr_rw_root_nvmem,
+       NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_root_group = {
+       .bin_attrs      = nvmem_bin_rw_root_attributes,
+};
+
+static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
+       &nvmem_bin_rw_root_group,
+       NULL,
+};
+
+/* read only permission, root only */
+static struct bin_attribute bin_attr_ro_root_nvmem = {
+       .attr   = {
+               .name   = "nvmem",
+               .mode   = S_IRUSR,
+       },
+       .read   = bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
+       &bin_attr_ro_root_nvmem,
+       NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_root_group = {
+       .bin_attrs      = nvmem_bin_ro_root_attributes,
+};
+
+static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
+       &nvmem_bin_ro_root_group,
+       NULL,
+};
+
 static void nvmem_release(struct device *dev)
 {
        struct nvmem_device *nvmem = to_nvmem_device(dev);
@@ -294,12 +362,51 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
 
        return 0;
 err:
-       while (--i)
+       while (i--)
                nvmem_cell_drop(cells[i]);
 
+       kfree(cells);
+
        return rval;
 }
 
+/*
+ * nvmem_setup_compat() - Create an additional binary entry in
+ * drivers sys directory, to be backwards compatible with the older
+ * drivers/misc/eeprom drivers.
+ */
+static int nvmem_setup_compat(struct nvmem_device *nvmem,
+                             const struct nvmem_config *config)
+{
+       int rval;
+
+       if (!config->base_dev)
+               return -EINVAL;
+
+       if (nvmem->read_only)
+               nvmem->eeprom = bin_attr_ro_root_nvmem;
+       else
+               nvmem->eeprom = bin_attr_rw_root_nvmem;
+       nvmem->eeprom.attr.name = "eeprom";
+       nvmem->eeprom.size = nvmem->size;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       nvmem->eeprom.attr.key = &eeprom_lock_key;
+#endif
+       nvmem->eeprom.private = &nvmem->dev;
+       nvmem->base_dev = config->base_dev;
+
+       rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
+       if (rval) {
+               dev_err(&nvmem->dev,
+                       "Failed to create eeprom binary file %d\n", rval);
+               return rval;
+       }
+
+       nvmem->flags |= FLAG_COMPAT;
+
+       return 0;
+}
+
 /**
  * nvmem_register() - Register a nvmem device for given nvmem_config.
  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
@@ -353,24 +460,37 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
        nvmem->read_only = of_property_read_bool(np, "read-only") |
                           config->read_only;
 
-       nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups :
-                                              nvmem_rw_dev_groups;
+       if (config->root_only)
+               nvmem->dev.groups = nvmem->read_only ?
+                       nvmem_ro_root_dev_groups :
+                       nvmem_rw_root_dev_groups;
+       else
+               nvmem->dev.groups = nvmem->read_only ?
+                       nvmem_ro_dev_groups :
+                       nvmem_rw_dev_groups;
 
        device_initialize(&nvmem->dev);
 
        dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
 
        rval = device_add(&nvmem->dev);
-       if (rval) {
-               ida_simple_remove(&nvmem_ida, nvmem->id);
-               kfree(nvmem);
-               return ERR_PTR(rval);
+       if (rval)
+               goto out;
+
+       if (config->compat) {
+               rval = nvmem_setup_compat(nvmem, config);
+               if (rval)
+                       goto out;
        }
 
        if (config->cells)
                nvmem_add_cells(nvmem, config);
 
        return nvmem;
+out:
+       ida_simple_remove(&nvmem_ida, nvmem->id);
+       kfree(nvmem);
+       return ERR_PTR(rval);
 }
 EXPORT_SYMBOL_GPL(nvmem_register);
 
@@ -390,6 +510,9 @@ int nvmem_unregister(struct nvmem_device *nvmem)
        }
        mutex_unlock(&nvmem_mutex);
 
+       if (nvmem->flags & FLAG_COMPAT)
+               device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+
        nvmem_device_remove_all_cells(nvmem);
        device_del(&nvmem->dev);
 
index b7971d410b60ef5f203018490ffefe38b56cfa2d..d7796eb5421ff0d4c01a3dbc87f76a065bf5d5ee 100644 (file)
@@ -51,7 +51,7 @@ static int imx_ocotp_read(void *context, const void *reg, size_t reg_size,
                val += 4;
        }
 
-       return (i - index) * 4;
+       return 0;
 }
 
 static int imx_ocotp_write(void *context, const void *data, size_t count)
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
new file mode 100644 (file)
index 0000000..878fce7
--- /dev/null
@@ -0,0 +1,330 @@
+/*
+ * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Registers */
+#define LPC18XX_EEPROM_AUTOPROG                        0x00c
+#define LPC18XX_EEPROM_AUTOPROG_WORD           0x1
+
+#define LPC18XX_EEPROM_CLKDIV                  0x014
+
+#define LPC18XX_EEPROM_PWRDWN                  0x018
+#define LPC18XX_EEPROM_PWRDWN_NO               0x0
+#define LPC18XX_EEPROM_PWRDWN_YES              0x1
+
+#define LPC18XX_EEPROM_INTSTAT                 0xfe0
+#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG     BIT(2)
+
+#define LPC18XX_EEPROM_INTSTATCLR              0xfe8
+#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST  BIT(2)
+
+/* Fixed page size (bytes) */
+#define LPC18XX_EEPROM_PAGE_SIZE               0x80
+
+/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */
+#define LPC18XX_EEPROM_CLOCK_HZ                        1500000
+
+/* EEPROM requires 3 ms of erase/program time between each writing */
+#define LPC18XX_EEPROM_PROGRAM_TIME            3
+
+struct lpc18xx_eeprom_dev {
+       struct clk *clk;
+       void __iomem *reg_base;
+       void __iomem *mem_base;
+       struct nvmem_device *nvmem;
+       unsigned reg_bytes;
+       unsigned val_bytes;
+};
+
+static struct regmap_config lpc18xx_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+};
+
+static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
+                                        u32 reg, u32 val)
+{
+       writel(val, eeprom->reg_base + reg);
+}
+
+static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom,
+                                      u32 reg)
+{
+       return readl(eeprom->reg_base + reg);
+}
+
+static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
+{
+       unsigned long end;
+       u32 val;
+
+       /* Wait until EEPROM program operation has finished */
+       end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10);
+
+       while (time_is_after_jiffies(end)) {
+               val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT);
+
+               if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) {
+                       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR,
+                                       LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST);
+                       return 0;
+               }
+
+               usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC,
+                            (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
+                                      size_t reg_size, const void *val,
+                                      size_t val_size)
+{
+       struct lpc18xx_eeprom_dev *eeprom = context;
+       unsigned int offset = *(u32 *)reg;
+       int ret;
+
+       if (offset % lpc18xx_regmap_config.reg_stride)
+               return -EINVAL;
+
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+                             LPC18XX_EEPROM_PWRDWN_NO);
+
+       /* Wait 100 us while the EEPROM wakes up */
+       usleep_range(100, 200);
+
+       while (val_size) {
+               writel(*(u32 *)val, eeprom->mem_base + offset);
+               ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
+               if (ret < 0)
+                       return ret;
+
+               val_size -= eeprom->val_bytes;
+               val += eeprom->val_bytes;
+               offset += eeprom->val_bytes;
+       }
+
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+                             LPC18XX_EEPROM_PWRDWN_YES);
+
+       return 0;
+}
+
+static int lpc18xx_eeprom_write(void *context, const void *data, size_t count)
+{
+       struct lpc18xx_eeprom_dev *eeprom = context;
+       unsigned int offset = eeprom->reg_bytes;
+
+       if (count <= offset)
+               return -EINVAL;
+
+       return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes,
+                                          data + offset, count - offset);
+}
+
+static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
+                              void *val, size_t val_size)
+{
+       struct lpc18xx_eeprom_dev *eeprom = context;
+       unsigned int offset = *(u32 *)reg;
+
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+                             LPC18XX_EEPROM_PWRDWN_NO);
+
+       /* Wait 100 us while the EEPROM wakes up */
+       usleep_range(100, 200);
+
+       while (val_size) {
+               *(u32 *)val = readl(eeprom->mem_base + offset);
+               val_size -= eeprom->val_bytes;
+               val += eeprom->val_bytes;
+               offset += eeprom->val_bytes;
+       }
+
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+                             LPC18XX_EEPROM_PWRDWN_YES);
+
+       return 0;
+}
+
+static struct regmap_bus lpc18xx_eeprom_bus = {
+       .write = lpc18xx_eeprom_write,
+       .gather_write = lpc18xx_eeprom_gather_write,
+       .read = lpc18xx_eeprom_read,
+       .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+       .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg)
+{
+       /*
+        * The last page contains the EEPROM initialization data and is not
+        * writable.
+        */
+       return reg <= lpc18xx_regmap_config.max_register -
+                                               LPC18XX_EEPROM_PAGE_SIZE;
+}
+
+static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg)
+{
+       return reg <= lpc18xx_regmap_config.max_register;
+}
+
+static struct nvmem_config lpc18xx_nvmem_config = {
+       .name = "lpc18xx-eeprom",
+       .owner = THIS_MODULE,
+};
+
+static int lpc18xx_eeprom_probe(struct platform_device *pdev)
+{
+       struct lpc18xx_eeprom_dev *eeprom;
+       struct device *dev = &pdev->dev;
+       struct reset_control *rst;
+       unsigned long clk_rate;
+       struct regmap *regmap;
+       struct resource *res;
+       int ret;
+
+       eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
+       if (!eeprom)
+               return -ENOMEM;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
+       eeprom->reg_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(eeprom->reg_base))
+               return PTR_ERR(eeprom->reg_base);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+       eeprom->mem_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(eeprom->mem_base))
+               return PTR_ERR(eeprom->mem_base);
+
+       eeprom->clk = devm_clk_get(&pdev->dev, "eeprom");
+       if (IS_ERR(eeprom->clk)) {
+               dev_err(&pdev->dev, "failed to get eeprom clock\n");
+               return PTR_ERR(eeprom->clk);
+       }
+
+       ret = clk_prepare_enable(eeprom->clk);
+       if (ret < 0) {
+               dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret);
+               return ret;
+       }
+
+       rst = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(rst)) {
+               dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst));
+               ret = PTR_ERR(rst);
+               goto err_clk;
+       }
+
+       ret = reset_control_assert(rst);
+       if (ret < 0) {
+               dev_err(dev, "failed to assert reset: %d\n", ret);
+               goto err_clk;
+       }
+
+       eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE;
+       eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE;
+
+       /*
+        * Clock rate is generated by dividing the system bus clock by the
+        * division factor, contained in the divider register (minus 1 encoded).
+        */
+       clk_rate = clk_get_rate(eeprom->clk);
+       clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1;
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate);
+
+       /*
+        * Writing a single word to the page will start the erase/program cycle
+        * automatically
+        */
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG,
+                             LPC18XX_EEPROM_AUTOPROG_WORD);
+
+       lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+                             LPC18XX_EEPROM_PWRDWN_YES);
+
+       lpc18xx_regmap_config.max_register = resource_size(res) - 1;
+       lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg;
+       lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg;
+
+       regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom,
+                                 &lpc18xx_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap));
+               ret = PTR_ERR(regmap);
+               goto err_clk;
+       }
+
+       lpc18xx_nvmem_config.dev = dev;
+
+       eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
+       if (IS_ERR(eeprom->nvmem)) {
+               ret = PTR_ERR(eeprom->nvmem);
+               goto err_clk;
+       }
+
+       platform_set_drvdata(pdev, eeprom);
+
+       return 0;
+
+err_clk:
+       clk_disable_unprepare(eeprom->clk);
+
+       return ret;
+}
+
+static int lpc18xx_eeprom_remove(struct platform_device *pdev)
+{
+       struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = nvmem_unregister(eeprom->nvmem);
+       if (ret < 0)
+               return ret;
+
+       clk_disable_unprepare(eeprom->clk);
+
+       return 0;
+}
+
+static const struct of_device_id lpc18xx_eeprom_of_match[] = {
+       { .compatible = "nxp,lpc1857-eeprom" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match);
+
+static struct platform_driver lpc18xx_eeprom_driver = {
+       .probe = lpc18xx_eeprom_probe,
+       .remove = lpc18xx_eeprom_remove,
+       .driver = {
+               .name = "lpc18xx-eeprom",
+               .of_match_table = lpc18xx_eeprom_of_match,
+       },
+};
+
+module_platform_driver(lpc18xx_eeprom_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
new file mode 100644 (file)
index 0000000..9c49369
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config mtk_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+};
+
+static int mtk_efuse_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       struct nvmem_device *nvmem;
+       struct nvmem_config *econfig;
+       struct regmap *regmap;
+       void __iomem *base;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
+       if (!econfig)
+               return -ENOMEM;
+
+       mtk_regmap_config.max_register = resource_size(res) - 1;
+
+       regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "regmap init failed\n");
+               return PTR_ERR(regmap);
+       }
+
+       econfig->dev = dev;
+       econfig->owner = THIS_MODULE;
+       nvmem = nvmem_register(econfig);
+       if (IS_ERR(nvmem))
+               return PTR_ERR(nvmem);
+
+       platform_set_drvdata(pdev, nvmem);
+
+       return 0;
+}
+
+static int mtk_efuse_remove(struct platform_device *pdev)
+{
+       struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+       return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id mtk_efuse_of_match[] = {
+       { .compatible = "mediatek,mt8173-efuse",},
+       { .compatible = "mediatek,efuse",},
+       {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
+
+static struct platform_driver mtk_efuse_driver = {
+       .probe = mtk_efuse_probe,
+       .remove = mtk_efuse_remove,
+       .driver = {
+               .name = "mediatek,efuse",
+               .of_match_table = mtk_efuse_of_match,
+       },
+};
+
+static int __init mtk_efuse_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&mtk_efuse_driver);
+       if (ret) {
+               pr_err("Failed to register efuse driver\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static void __exit mtk_efuse_exit(void)
+{
+       return platform_driver_unregister(&mtk_efuse_driver);
+}
+
+subsys_initcall(mtk_efuse_init);
+module_exit(mtk_efuse_exit);
+
+MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek EFUSE driver");
+MODULE_LICENSE("GPL v2");
index f552134242223959301a419a78fc08b0c65ec207..a009795111e984de6fd05b17d4a3be086719b031 100644 (file)
  * more details.
  */
 
-#include <linux/platform_device.h>
-#include <linux/nvmem-provider.h>
-#include <linux/slab.h>
-#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/delay.h>
+#include <linux/nvmem-provider.h>
+#include <linux/slab.h>
 #include <linux/of.h>
-#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
 
 #define EFUSE_A_SHIFT                  6
 #define EFUSE_A_MASK                   0x3ff
 #define REG_EFUSE_CTRL                 0x0000
 #define REG_EFUSE_DOUT                 0x0004
 
-struct rockchip_efuse_context {
+struct rockchip_efuse_chip {
        struct device *dev;
        void __iomem *base;
-       struct clk *efuse_clk;
+       struct clk *clk;
 };
 
 static int rockchip_efuse_write(void *context, const void *data, size_t count)
@@ -52,34 +52,32 @@ static int rockchip_efuse_read(void *context,
                               void *val, size_t val_size)
 {
        unsigned int offset = *(u32 *)reg;
-       struct rockchip_efuse_context *_context = context;
-       void __iomem *base = _context->base;
-       struct clk *clk = _context->efuse_clk;
+       struct rockchip_efuse_chip *efuse = context;
        u8 *buf = val;
        int ret;
 
-       ret = clk_prepare_enable(clk);
+       ret = clk_prepare_enable(efuse->clk);
        if (ret < 0) {
-               dev_err(_context->dev, "failed to prepare/enable efuse clk\n");
+               dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
                return ret;
        }
 
-       writel(EFUSE_LOAD | EFUSE_PGENB, base + REG_EFUSE_CTRL);
+       writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL);
        udelay(1);
        while (val_size) {
-               writel(readl(base + REG_EFUSE_CTRL) &
+               writel(readl(efuse->base + REG_EFUSE_CTRL) &
                             (~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
-                            base + REG_EFUSE_CTRL);
-               writel(readl(base + REG_EFUSE_CTRL) |
+                            efuse->base + REG_EFUSE_CTRL);
+               writel(readl(efuse->base + REG_EFUSE_CTRL) |
                             ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT),
-                            base + REG_EFUSE_CTRL);
+                            efuse->base + REG_EFUSE_CTRL);
                udelay(1);
-               writel(readl(base + REG_EFUSE_CTRL) |
-                            EFUSE_STROBE, base + REG_EFUSE_CTRL);
+               writel(readl(efuse->base + REG_EFUSE_CTRL) |
+                            EFUSE_STROBE, efuse->base + REG_EFUSE_CTRL);
                udelay(1);
-               *buf++ = readb(base + REG_EFUSE_DOUT);
-               writel(readl(base + REG_EFUSE_CTRL) &
-                    (~EFUSE_STROBE), base + REG_EFUSE_CTRL);
+               *buf++ = readb(efuse->base + REG_EFUSE_DOUT);
+               writel(readl(efuse->base + REG_EFUSE_CTRL) &
+                    (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL);
                udelay(1);
 
                val_size -= 1;
@@ -87,9 +85,9 @@ static int rockchip_efuse_read(void *context,
        }
 
        /* Switch to standby mode */
-       writel(EFUSE_PGENB | EFUSE_CSB, base + REG_EFUSE_CTRL);
+       writel(EFUSE_PGENB | EFUSE_CSB, efuse->base + REG_EFUSE_CTRL);
 
-       clk_disable_unprepare(clk);
+       clk_disable_unprepare(efuse->clk);
 
        return 0;
 }
@@ -114,48 +112,44 @@ static struct nvmem_config econfig = {
 };
 
 static const struct of_device_id rockchip_efuse_match[] = {
-       { .compatible = "rockchip,rockchip-efuse",},
+       { .compatible = "rockchip,rockchip-efuse", },
        { /* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
 
 static int rockchip_efuse_probe(struct platform_device *pdev)
 {
-       struct device *dev = &pdev->dev;
        struct resource *res;
        struct nvmem_device *nvmem;
        struct regmap *regmap;
-       void __iomem *base;
-       struct clk *clk;
-       struct rockchip_efuse_context *context;
+       struct rockchip_efuse_chip *efuse;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+       efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip),
+                            GFP_KERNEL);
+       if (!efuse)
+               return -ENOMEM;
 
-       context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context),
-                              GFP_KERNEL);
-       if (IS_ERR(context))
-               return PTR_ERR(context);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       efuse->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(efuse->base))
+               return PTR_ERR(efuse->base);
 
-       clk = devm_clk_get(dev, "pclk_efuse");
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
+       efuse->clk = devm_clk_get(&pdev->dev, "pclk_efuse");
+       if (IS_ERR(efuse->clk))
+               return PTR_ERR(efuse->clk);
 
-       context->dev = dev;
-       context->base = base;
-       context->efuse_clk = clk;
+       efuse->dev = &pdev->dev;
 
        rockchip_efuse_regmap_config.max_register = resource_size(res) - 1;
 
-       regmap = devm_regmap_init(dev, &rockchip_efuse_bus,
-                                 context, &rockchip_efuse_regmap_config);
+       regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus,
+                                 efuse, &rockchip_efuse_regmap_config);
        if (IS_ERR(regmap)) {
-               dev_err(dev, "regmap init failed\n");
+               dev_err(efuse->dev, "regmap init failed\n");
                return PTR_ERR(regmap);
        }
-       econfig.dev = dev;
+
+       econfig.dev = efuse->dev;
        nvmem = nvmem_register(&econfig);
        if (IS_ERR(nvmem))
                return PTR_ERR(nvmem);
index cfa3b85064dd233a463b1556742274d960e4f47b..bc88b40840552eb07b347fa9db41fc811ccf9c4b 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
  */
 
-
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -27,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 
-
 static struct nvmem_config econfig = {
        .name = "sunxi-sid",
        .read_only = true,
@@ -55,8 +52,8 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
 }
 
 static int sunxi_sid_read(void *context,
-                           const void *reg, size_t reg_size,
-                           void *val, size_t val_size)
+                         const void *reg, size_t reg_size,
+                         void *val, size_t val_size)
 {
        struct sunxi_sid *sid = context;
        unsigned int offset = *(u32 *)reg;
@@ -130,7 +127,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
        if (IS_ERR(nvmem))
                return PTR_ERR(nvmem);
 
-       randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+       randomness = kzalloc(sizeof(u8) * (size), GFP_KERNEL);
        if (!randomness) {
                ret = -EINVAL;
                goto err_unreg_nvmem;
index 0adccbf5c83f116bd698c392c47b75a9197f892f..c11db8bceea13e49cfd70704def1fb4e62b9313f 100644 (file)
@@ -4,8 +4,7 @@ endif
 if MIPS
 source "drivers/platform/mips/Kconfig"
 endif
-if GOLDFISH
+
 source "drivers/platform/goldfish/Kconfig"
-endif
 
 source "drivers/platform/chrome/Kconfig"
index 635ef25cc722a0c01ee13fd95ddfe78b6bbd6f39..fefbb8370da0a8ecfb93d6cc797108c962206cc3 100644 (file)
@@ -1,5 +1,24 @@
+menuconfig GOLDFISH
+       bool "Platform support for Goldfish virtual devices"
+       depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
+       depends on HAS_IOMEM
+       ---help---
+         Say Y here to get to see options for the Goldfish virtual platform.
+         This option alone does not add any kernel code.
+
+         Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_BUS
+       bool "Goldfish platform bus"
+       ---help---
+         This is a virtual bus to host Goldfish Android Virtual Devices.
+
 config GOLDFISH_PIPE
        tristate "Goldfish virtual device for QEMU pipes"
        ---help---
          This is a virtual device to drive the QEMU pipe interface used by
          the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
index a0022395eee93f38c6a981cc9145cbe5ae8774f4..d3487125838cdce457c2f7acefdd9052b92cbde1 100644 (file)
@@ -1,5 +1,5 @@
 #
 # Makefile for Goldfish platform specific drivers
 #
-obj-$(CONFIG_GOLDFISH) +=      pdev_bus.o
+obj-$(CONFIG_GOLDFISH_BUS)     += pdev_bus.o
 obj-$(CONFIG_GOLDFISH_PIPE)    += goldfish_pipe.o
index e7a29e2750c6aeb274047b37a9d32eb3fe70961d..839df4aace764631ab9b0599ab08460e73f465cb 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2012 Intel, Inc.
  * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -57,6 +58,8 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/goldfish.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
 
 /*
  * IMPORTANT: The following constants must match the ones used and defined
@@ -75,6 +78,7 @@
 #define PIPE_REG_PARAMS_ADDR_LOW       0x18  /* read/write: batch data address */
 #define PIPE_REG_PARAMS_ADDR_HIGH      0x1c  /* read/write: batch data address */
 #define PIPE_REG_ACCESS_PARAMS         0x20  /* write: batch access */
+#define PIPE_REG_VERSION               0x24  /* read: device version */
 
 /* list of commands for PIPE_REG_COMMAND */
 #define CMD_OPEN                       1  /* open new channel */
 #define CMD_WRITE_BUFFER       4  /* send a user buffer to the emulator */
 #define CMD_WAKE_ON_WRITE      5  /* tell the emulator to wake us when writing
                                     is possible */
-
-/* The following commands are related to read operations, they must be
- * listed in the same order than the corresponding write ones, since we
- * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
- * in goldfish_pipe_read_write() below.
- */
 #define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
 #define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
                                   * is possible */
@@ -130,6 +128,7 @@ struct goldfish_pipe_dev {
        unsigned char __iomem *base;
        struct access_params *aps;
        int irq;
+       u32 version;
 };
 
 static struct goldfish_pipe_dev   pipe_dev[1];
@@ -263,19 +262,14 @@ static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
        return 0;
 }
 
-/* This function is used for both reading from and writing to a given
- * pipe.
- */
 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
-                                   size_t bufflen, int is_write)
+                                      size_t bufflen, int is_write)
 {
        unsigned long irq_flags;
        struct goldfish_pipe *pipe = filp->private_data;
        struct goldfish_pipe_dev *dev = pipe->dev;
-       const int cmd_offset = is_write ? 0
-                                       : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
        unsigned long address, address_end;
-       int ret = 0;
+       int count = 0, ret = -EINVAL;
 
        /* If the emulator already closed the pipe, no need to go further */
        if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -298,79 +292,107 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
        address_end = address + bufflen;
 
        while (address < address_end) {
-               unsigned long  page_end = (address & PAGE_MASK) + PAGE_SIZE;
-               unsigned long  next     = page_end < address_end ? page_end
-                                                                : address_end;
-               unsigned long  avail    = next - address;
+               unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+               unsigned long next     = page_end < address_end ? page_end
+                                                               : address_end;
+               unsigned long avail    = next - address;
                int status, wakeBit;
+               struct page *page;
+
+               /* Either vaddr or paddr depending on the device version */
+               unsigned long xaddr;
+
+               /*
+                * We grab the pages on a page-by-page basis in case user
+                * space gives us a potentially huge buffer but the read only
+                * returns a small amount, then there's no need to pin that
+                * much memory to the process.
+                */
+               down_read(&current->mm->mmap_sem);
+               ret = get_user_pages(current, current->mm, address, 1,
+                                    !is_write, 0, &page, NULL);
+               up_read(&current->mm->mmap_sem);
+               if (ret < 0)
+                       break;
 
-               /* Ensure that the corresponding page is properly mapped */
-               /* FIXME: this isn't safe or sufficient - use get_user_pages */
-               if (is_write) {
-                       char c;
-                       /* Ensure that the page is mapped and readable */
-                       if (__get_user(c, (char __user *)address)) {
-                               if (!ret)
-                                       ret = -EFAULT;
-                               break;
-                       }
+               if (dev->version) {
+                       /* Device version 1 or newer (qemu-android) expects the
+                        * physical address.
+                        */
+                       xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
                } else {
-                       /* Ensure that the page is mapped and writable */
-                       if (__put_user(0, (char __user *)address)) {
-                               if (!ret)
-                                       ret = -EFAULT;
-                               break;
-                       }
+                       /* Device version 0 (classic emulator) expects the
+                        * virtual address.
+                        */
+                       xaddr = address;
                }
 
                /* Now, try to transfer the bytes in the current page */
                spin_lock_irqsave(&dev->lock, irq_flags);
-               if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
-                               address, avail, pipe, &status)) {
+               if (access_with_param(dev,
+                               is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+                               xaddr, avail, pipe, &status)) {
                        gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
                                     dev->base + PIPE_REG_CHANNEL_HIGH);
                        writel(avail, dev->base + PIPE_REG_SIZE);
-                       gf_write_ptr((void *)address,
+                       gf_write_ptr((void *)xaddr,
                                     dev->base + PIPE_REG_ADDRESS,
                                     dev->base + PIPE_REG_ADDRESS_HIGH);
-                       writel(CMD_WRITE_BUFFER + cmd_offset,
+                       writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
                                        dev->base + PIPE_REG_COMMAND);
                        status = readl(dev->base + PIPE_REG_STATUS);
                }
                spin_unlock_irqrestore(&dev->lock, irq_flags);
 
+               if (status > 0 && !is_write)
+                       set_page_dirty(page);
+               put_page(page);
+
                if (status > 0) { /* Correct transfer */
-                       ret += status;
+                       count += status;
                        address += status;
                        continue;
-               }
-
-               if (status == 0)  /* EOF */
+               } else if (status == 0) { /* EOF */
+                       ret = 0;
                        break;
-
-               /* An error occured. If we already transfered stuff, just
-               * return with its count. We expect the next call to return
-               * an error code */
-               if (ret > 0)
+               } else if (status < 0 && count > 0) {
+                       /*
+                        * An error occurred and we already transferred
+                        * something on one of the previous pages.
+                        * Just return what we already copied and log this
+                        * err.
+                        *
+                        * Note: This seems like an incorrect approach but
+                        * cannot change it until we check if any user space
+                        * ABI relies on this behavior.
+                        */
+                       if (status != PIPE_ERROR_AGAIN)
+                               pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+                                       status, is_write ? "write" : "read");
+                       ret = 0;
                        break;
+               }
 
-               /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
-               * non-blocking mode, just return the error code.
-               */
+               /*
+                * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+                * non-blocking mode, just return the error code.
+                */
                if (status != PIPE_ERROR_AGAIN ||
                        (filp->f_flags & O_NONBLOCK) != 0) {
                        ret = goldfish_pipe_error_convert(status);
                        break;
                }
 
-               /* We will have to wait until more data/space is available.
-               * First, mark the pipe as waiting for a specific wake signal.
-               */
+               /*
+                * The backend blocked the read/write, wait until the backend
+                * tells us it's ready to process more data.
+                */
                wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
                set_bit(wakeBit, &pipe->flags);
 
                /* Tell the emulator we're going to wait for a wake event */
-               goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
+               goldfish_cmd(pipe,
+                       is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
 
                /* Unlock the pipe, then wait for the wake signal */
                mutex_unlock(&pipe->lock);
@@ -388,12 +410,13 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
                /* Try to re-acquire the lock */
                if (mutex_lock_interruptible(&pipe->lock))
                        return -ERESTARTSYS;
-
-               /* Try the transfer again */
-               continue;
        }
        mutex_unlock(&pipe->lock);
-       return ret;
+
+       if (ret < 0)
+               return ret;
+       else
+               return count;
 }
 
 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
@@ -446,10 +469,11 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
        unsigned long irq_flags;
        int count = 0;
 
-       /* We're going to read from the emulator a list of (channel,flags)
-       * pairs corresponding to the wake events that occured on each
-       * blocked pipe (i.e. channel).
-       */
+       /*
+        * We're going to read from the emulator a list of (channel,flags)
+        * pairs corresponding to the wake events that occurred on each
+        * blocked pipe (i.e. channel).
+        */
        spin_lock_irqsave(&dev->lock, irq_flags);
        for (;;) {
                /* First read the channel, 0 means the end of the list */
@@ -600,6 +624,12 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
                goto error;
        }
        setup_access_params_addr(pdev, dev);
+
+       /* Although the pipe device in the classic Android emulator does not
+        * recognize the 'version' register, it won't treat this as an error
+        * either and will simply return 0, which is fine.
+        */
+       dev->version = readl(dev->base + PIPE_REG_VERSION);
        return 0;
 
 error:
@@ -615,11 +645,26 @@ static int goldfish_pipe_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+       { "GFSH0003", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+       { .compatible = "google,android-pipe", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
 static struct platform_driver goldfish_pipe = {
        .probe = goldfish_pipe_probe,
        .remove = goldfish_pipe_remove,
        .driver = {
-               .name = "goldfish_pipe"
+               .name = "goldfish_pipe",
+               .owner = THIS_MODULE,
+               .of_match_table = goldfish_pipe_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
        }
 };
 
index be822f7a9ce6262442ce3bad9426ad6ff2fb04e9..aca282d454213addcad7169c2a8c1732a41e195e 100644 (file)
@@ -10,6 +10,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
@@ -47,9 +48,9 @@
 #define SPMI_MAPPING_BIT_IS_1_FLAG(X)  (((X) >> 8) & 0x1)
 #define SPMI_MAPPING_BIT_IS_1_RESULT(X)        (((X) >> 0) & 0xFF)
 
-#define SPMI_MAPPING_TABLE_LEN         255
 #define SPMI_MAPPING_TABLE_TREE_DEPTH  16      /* Maximum of 16-bits */
-#define PPID_TO_CHAN_TABLE_SZ          BIT(12) /* PPID is 12bit chan is 1byte*/
+#define PMIC_ARB_MAX_PPID              BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID            BIT(15)
 
 /* Ownership Table */
 #define SPMI_OWNERSHIP_TABLE_REG(N)    (0x0700 + (4 * (N)))
@@ -85,9 +86,7 @@ enum pmic_arb_cmd_op_code {
 };
 
 /* Maximum number of support PMIC peripherals */
-#define PMIC_ARB_MAX_PERIPHS           256
-#define PMIC_ARB_MAX_CHNL              128
-#define PMIC_ARB_PERIPH_ID_VALID       (1 << 15)
+#define PMIC_ARB_MAX_PERIPHS           512
 #define PMIC_ARB_TIMEOUT_US            100
 #define PMIC_ARB_MAX_TRANS_BYTES       (8)
 
@@ -125,18 +124,22 @@ struct spmi_pmic_arb_dev {
        void __iomem            *wr_base;
        void __iomem            *intr;
        void __iomem            *cnfg;
+       void __iomem            *core;
+       resource_size_t         core_size;
        raw_spinlock_t          lock;
        u8                      channel;
        int                     irq;
        u8                      ee;
-       u8                      min_apid;
-       u8                      max_apid;
-       u32                     mapping_table[SPMI_MAPPING_TABLE_LEN];
+       u16                     min_apid;
+       u16                     max_apid;
+       u32                     *mapping_table;
+       DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
        struct irq_domain       *domain;
        struct spmi_controller  *spmic;
-       u16                     apid_to_ppid[256];
+       u16                     *apid_to_ppid;
        const struct pmic_arb_ver_ops *ver_ops;
-       u8                      *ppid_to_chan;
+       u16                     *ppid_to_chan;
+       u16                     last_channel;
 };
 
 /**
@@ -158,7 +161,8 @@ struct spmi_pmic_arb_dev {
  */
 struct pmic_arb_ver_ops {
        /* spmi commands (read_cmd, write_cmd, cmd) functionality */
-       u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+       int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+                     u32 *offset);
        u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
        int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
        /* Interrupts controller functionality (offset of PIC registers) */
@@ -212,7 +216,14 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
        struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
        u32 status = 0;
        u32 timeout = PMIC_ARB_TIMEOUT_US;
-       u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
+       u32 offset;
+       int rc;
+
+       rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+       if (rc)
+               return rc;
+
+       offset += PMIC_ARB_STATUS;
 
        while (timeout--) {
                status = readl_relaxed(base + offset);
@@ -257,7 +268,11 @@ pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
        unsigned long flags;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+       if (rc)
+               return rc;
 
        cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
@@ -297,7 +312,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        u8 bc = len - 1;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+       if (rc)
+               return rc;
 
        if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
                dev_err(&ctrl->dev,
@@ -344,7 +363,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        u8 bc = len - 1;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+       if (rc)
+               return rc;
 
        if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
                dev_err(&ctrl->dev,
@@ -614,6 +637,10 @@ static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
        u32 data;
 
        for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+               if (!test_and_set_bit(index, pa->mapping_table_valid))
+                       mapping_table[index] = readl_relaxed(pa->cnfg +
+                                               SPMI_MAPPING_TABLE_REG(index));
+
                data = mapping_table[index];
 
                if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
@@ -701,18 +728,61 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
 }
 
 /* v1 offset per ee */
-static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
-       return 0x800 + 0x80 * pa->channel;
+       *offset = 0x800 + 0x80 * pa->channel;
+       return 0;
 }
 
+static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+{
+       u32 regval, offset;
+       u16 chan;
+       u16 id;
+
+       /*
+        * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+        * ppid_to_chan is an in-memory invert of that table.
+        */
+       for (chan = pa->last_channel; ; chan++) {
+               offset = PMIC_ARB_REG_CHNL(chan);
+               if (offset >= pa->core_size)
+                       break;
+
+               regval = readl_relaxed(pa->core + offset);
+               if (!regval)
+                       continue;
+
+               id = (regval >> 8) & PMIC_ARB_PPID_MASK;
+               pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+               if (id == ppid) {
+                       chan |= PMIC_ARB_CHAN_VALID;
+                       break;
+               }
+       }
+       pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+
+       return chan;
+}
+
+
 /* v2 offset per ppid (chan) and per ee */
-static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
        u16 ppid = (sid << 8) | (addr >> 8);
-       u8  chan = pa->ppid_to_chan[ppid];
+       u16 chan;
 
-       return 0x1000 * pa->ee + 0x8000 * chan;
+       chan = pa->ppid_to_chan[ppid];
+       if (!(chan & PMIC_ARB_CHAN_VALID))
+               chan = pmic_arb_find_chan(pa, ppid);
+       if (!(chan & PMIC_ARB_CHAN_VALID))
+               return -ENODEV;
+       chan &= ~PMIC_ARB_CHAN_VALID;
+
+       *offset = 0x1000 * pa->ee + 0x8000 * chan;
+       return 0;
 }
 
 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -797,7 +867,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *core;
        u32 channel, ee, hw_ver;
-       int err, i;
+       int err;
        bool is_v1;
 
        ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
@@ -808,6 +878,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        pa->spmic = ctrl;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+       pa->core_size = resource_size(res);
        core = devm_ioremap_resource(&ctrl->dev, res);
        if (IS_ERR(core)) {
                err = PTR_ERR(core);
@@ -825,10 +896,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                pa->wr_base = core;
                pa->rd_base = core;
        } else {
-               u8  chan;
-               u16 ppid;
-               u32 regval;
-
+               pa->core = core;
                pa->ver_ops = &pmic_arb_v2;
 
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -847,24 +915,14 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                        goto err_put_ctrl;
                }
 
-               pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
-                                       PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
+               pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+                                               PMIC_ARB_MAX_PPID,
+                                               sizeof(*pa->ppid_to_chan),
+                                               GFP_KERNEL);
                if (!pa->ppid_to_chan) {
                        err = -ENOMEM;
                        goto err_put_ctrl;
                }
-               /*
-                * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-                * ppid_to_chan is an in-memory invert of that table.
-                */
-               for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
-                       regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
-                       if (!regval)
-                               continue;
-
-                       ppid = (regval >> 8) & 0xFFF;
-                       pa->ppid_to_chan[ppid] = chan;
-               }
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
@@ -915,9 +973,20 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 
        pa->ee = ee;
 
-       for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
-               pa->mapping_table[i] = readl_relaxed(
-                               pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+       pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
+                                           sizeof(*pa->apid_to_ppid),
+                                           GFP_KERNEL);
+       if (!pa->apid_to_ppid) {
+               err = -ENOMEM;
+               goto err_put_ctrl;
+       }
+
+       pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+                                       sizeof(*pa->mapping_table), GFP_KERNEL);
+       if (!pa->mapping_table) {
+               err = -ENOMEM;
+               goto err_put_ctrl;
+       }
 
        /* Initialize max_apid/min_apid to the opposite bounds, during
         * the irq domain translation, we are sure to update these */
index b0927e49d0a81f31e63895fa2621ed61bfc30a57..364fdcdd3a06ea101a4f5067e500c4e45ee0397e 100644 (file)
@@ -63,7 +63,7 @@ struct goldfish_audio {
 #define AUDIO_READ(data, addr)         (readl(data->reg_base + addr))
 #define AUDIO_WRITE(data, addr, x)     (writel(x, data->reg_base + addr))
 #define AUDIO_WRITE64(data, addr, addr2, x)    \
-       (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base+addr2))
+       (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base + addr2))
 
 /*
  *  temporary variable used between goldfish_audio_probe() and
@@ -280,12 +280,12 @@ static int goldfish_audio_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, data);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
+       if (!r) {
                dev_err(&pdev->dev, "platform_get_resource failed\n");
                return -ENODEV;
        }
        data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
-       if (data->reg_base == NULL)
+       if (!data->reg_base)
                return -ENOMEM;
 
        data->irq = platform_get_irq(pdev, 0);
@@ -295,7 +295,7 @@ static int goldfish_audio_probe(struct platform_device *pdev)
        }
        data->buffer_virt = dmam_alloc_coherent(&pdev->dev,
                                COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL);
-       if (data->buffer_virt == NULL) {
+       if (!data->buffer_virt) {
                dev_err(&pdev->dev, "allocate buffer failed\n");
                return -ENOMEM;
        }
index 623353db5a08ed019cd2f6d9d8bd8bd983d93067..76d60eed149004fd221aa200e14de6f9e9354cdc 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mutex.h>
 #include <linux/goldfish.h>
 #include <asm/div64.h>
+#include <linux/dma-mapping.h>
 
 #include "goldfish_nand_reg.h"
 
@@ -99,11 +100,11 @@ static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
        loff_t ofs = instr->addr;
        u32 len = instr->len;
-       u32 rem;
+       s32 rem;
 
        if (ofs + len > mtd->size)
                goto invalid_arg;
-       rem = do_div(ofs, mtd->writesize);
+       ofs = div_s64_rem(ofs, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        ofs *= (mtd->writesize + mtd->oobsize);
@@ -132,7 +133,7 @@ invalid_arg:
 static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
                                  struct mtd_oob_ops *ops)
 {
-       u32 rem;
+       s32 rem;
 
        if (ofs + ops->len > mtd->size)
                goto invalid_arg;
@@ -141,7 +142,7 @@ static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
        if (ops->ooblen + ops->ooboffs > mtd->oobsize)
                goto invalid_arg;
 
-       rem = do_div(ofs, mtd->writesize);
+       ofs = div_s64_rem(ofs, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        ofs *= (mtd->writesize + mtd->oobsize);
@@ -164,7 +165,7 @@ invalid_arg:
 static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
                                   struct mtd_oob_ops *ops)
 {
-       u32 rem;
+       s32 rem;
 
        if (ofs + ops->len > mtd->size)
                goto invalid_arg;
@@ -173,7 +174,7 @@ static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
        if (ops->ooblen + ops->ooboffs > mtd->oobsize)
                goto invalid_arg;
 
-       rem = do_div(ofs, mtd->writesize);
+       ofs = div_s64_rem(ofs, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        ofs *= (mtd->writesize + mtd->oobsize);
@@ -196,12 +197,12 @@ invalid_arg:
 static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
                              size_t *retlen, u_char *buf)
 {
-       u32 rem;
+       s32 rem;
 
        if (from + len > mtd->size)
                goto invalid_arg;
 
-       rem = do_div(from, mtd->writesize);
+       from = div_s64_rem(from, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        from *= (mtd->writesize + mtd->oobsize);
@@ -218,12 +219,12 @@ invalid_arg:
 static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
                               size_t *retlen, const u_char *buf)
 {
-       u32 rem;
+       s32 rem;
 
        if (to + len > mtd->size)
                goto invalid_arg;
 
-       rem = do_div(to, mtd->writesize);
+       to = div_s64_rem(to, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        to *= (mtd->writesize + mtd->oobsize);
@@ -239,12 +240,12 @@ invalid_arg:
 
 static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
-       u32 rem;
+       s32 rem;
 
        if (ofs >= mtd->size)
                goto invalid_arg;
 
-       rem = do_div(ofs, mtd->erasesize);
+       ofs = div_s64_rem(ofs, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        ofs *= mtd->erasesize / mtd->writesize;
@@ -260,12 +261,12 @@ invalid_arg:
 
 static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
-       u32 rem;
+       s32 rem;
 
        if (ofs >= mtd->size)
                goto invalid_arg;
 
-       rem = do_div(ofs, mtd->erasesize);
+       ofs = div_s64_rem(ofs, mtd->writesize, &rem);
        if (rem)
                goto invalid_arg;
        ofs *= mtd->erasesize / mtd->writesize;
@@ -284,17 +285,18 @@ invalid_arg:
 static int nand_setup_cmd_params(struct platform_device *pdev,
                                 struct goldfish_nand *nand)
 {
-       u64 paddr;
+       dma_addr_t dma_handle;
        unsigned char __iomem  *base = nand->base;
 
-       nand->cmd_params = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct cmd_params), GFP_KERNEL);
-       if (!nand->cmd_params)
-               return -1;
-
-       paddr = __pa(nand->cmd_params);
-       writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
-       writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
+       nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
+                                              sizeof(struct cmd_params),
+                                              &dma_handle, GFP_KERNEL);
+       if (!nand->cmd_params) {
+               dev_err(&pdev->dev, "allocate buffer failed\n");
+               return -ENOMEM;
+       }
+       writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
+       writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
        return 0;
 }
 
@@ -319,7 +321,7 @@ static int goldfish_nand_init_device(struct platform_device *pdev,
        mtd->oobavail = mtd->oobsize;
        mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
                        (mtd->writesize + mtd->oobsize) * mtd->writesize;
-       do_div(mtd->size, mtd->writesize + mtd->oobsize);
+       mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
        mtd->size *= mtd->writesize;
        dev_dbg(&pdev->dev,
                "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
index b79a74a98a23573763ed6023d068eab184ef9611..5fbeab38889ebd29843c3c0e32e7d7a36f937d76 100644 (file)
@@ -202,7 +202,7 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
        bridge = ca91cx42_bridge->driver_priv;
 
        /* Need pdev */
-       pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+       pdev = to_pci_dev(ca91cx42_bridge->parent);
 
        INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers);
 
@@ -293,8 +293,7 @@ static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
        iowrite32(tmp, bridge->base + LINT_EN);
 
        if ((state == 0) && (sync != 0)) {
-               pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
-                       dev);
+               pdev = to_pci_dev(ca91cx42_bridge->parent);
 
                synchronize_irq(pdev->irq);
        }
@@ -518,7 +517,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
                dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
                return -EINVAL;
        }
-       pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+       pdev = to_pci_dev(ca91cx42_bridge->parent);
 
        existing_size = (unsigned long long)(image->bus_resource.end -
                image->bus_resource.start);
@@ -1519,7 +1518,7 @@ static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
        struct pci_dev *pdev;
 
        /* Find pci_dev container of dev */
-       pdev = container_of(parent, struct pci_dev, dev);
+       pdev = to_pci_dev(parent);
 
        return pci_alloc_consistent(pdev, size, dma);
 }
@@ -1530,7 +1529,7 @@ static void ca91cx42_free_consistent(struct device *parent, size_t size,
        struct pci_dev *pdev;
 
        /* Find pci_dev container of dev */
-       pdev = container_of(parent, struct pci_dev, dev);
+       pdev = to_pci_dev(parent);
 
        pci_free_consistent(pdev, size, vaddr, dma);
 }
index 0e2f43bccf1ffba52d1a7fe60a089ea4da8e3b60..a2eec97d506496e71885130269ba0af6df05319d 100644 (file)
@@ -618,7 +618,6 @@ static u8 omap_w1_read_byte(void *_hdq)
 
        hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
                              ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
-       hdq_data->hdq_usecount = 0;
 
        /* Write followed by a read, release the module */
        if (hdq_data->init_trans) {
index c9a7ff67d395244a1b4cc4bd628979838a6b8d05..89a78475173810944411f1a61fa8236d56cb75e1 100644 (file)
@@ -1147,7 +1147,6 @@ int w1_process(void *data)
                        jremain = 1;
                }
 
-               try_to_freeze();
                __set_current_state(TASK_INTERRUPTIBLE);
 
                /* hold list_mutex until after interruptible to prevent loosing
index 80825a7e8e48e1ebd06af14a1bcf208acb733daf..9289da313d985f434f7c350a88db97321437372e 100644 (file)
@@ -1214,6 +1214,21 @@ config SBC_EPX_C3_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called sbc_epx_c3.
 
+config INTEL_MEI_WDT
+       tristate "Intel MEI iAMT Watchdog"
+       depends on INTEL_MEI && X86
+       select WATCHDOG_CORE
+       ---help---
+         A device driver for the Intel MEI iAMT watchdog.
+
+         The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
+         Whenever the OS hangs or crashes, iAMT will send an event
+         to any subscriber to this event. The watchdog doesn't reset the
+         the platform.
+
+         To compile this driver as a module, choose M here:
+         the module will be called mei_wdt.
+
 # M32R Architecture
 
 # M68K Architecture
index f6a6a387c6c71f7a5e9e2cda4cc91ae3f39edf2c..14bd772d3e66673cfcb2a495a9fe41863d0faf00 100644 (file)
@@ -126,6 +126,7 @@ obj-$(CONFIG_MACHZ_WDT) += machzwd.o
 obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
 obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
 obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
+obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
 
 # M32R Architecture
 
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
new file mode 100644 (file)
index 0000000..630bd18
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/watchdog.h>
+
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+
+/*
+ * iAMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "iamt_wdt"
+
+#define MEI_WDT_DEFAULT_TIMEOUT   120  /* seconds */
+#define MEI_WDT_MIN_TIMEOUT       120  /* seconds */
+#define MEI_WDT_MAX_TIMEOUT     65535  /* seconds */
+
+/* Commands */
+#define MEI_MANAGEMENT_CONTROL 0x02
+
+/* MEI Management Control version number */
+#define MEI_MC_VERSION_NUMBER  0x10
+
+/* Sub Commands */
+#define MEI_MC_START_WD_TIMER_REQ  0x13
+#define MEI_MC_START_WD_TIMER_RES  0x83
+#define   MEI_WDT_STATUS_SUCCESS 0
+#define   MEI_WDT_WDSTATE_NOT_REQUIRED 0x1
+#define MEI_MC_STOP_WD_TIMER_REQ   0x14
+
+/**
+ * enum mei_wdt_state - internal watchdog state
+ *
+ * @MEI_WDT_PROBE: wd in probing stage
+ * @MEI_WDT_IDLE: wd is idle and not opened
+ * @MEI_WDT_START: wd was opened, start was called
+ * @MEI_WDT_RUNNING: wd is expecting keep alive pings
+ * @MEI_WDT_STOPPING: wd is stopping and will move to IDLE
+ * @MEI_WDT_NOT_REQUIRED: wd device is not required
+ */
+enum mei_wdt_state {
+       MEI_WDT_PROBE,
+       MEI_WDT_IDLE,
+       MEI_WDT_START,
+       MEI_WDT_RUNNING,
+       MEI_WDT_STOPPING,
+       MEI_WDT_NOT_REQUIRED,
+};
+
+static const char *mei_wdt_state_str(enum mei_wdt_state state)
+{
+       switch (state) {
+       case MEI_WDT_PROBE:
+               return "PROBE";
+       case MEI_WDT_IDLE:
+               return "IDLE";
+       case MEI_WDT_START:
+               return "START";
+       case MEI_WDT_RUNNING:
+               return "RUNNING";
+       case MEI_WDT_STOPPING:
+               return "STOPPING";
+       case MEI_WDT_NOT_REQUIRED:
+               return "NOT_REQUIRED";
+       default:
+               return "unknown";
+       }
+}
+
+/**
+ * struct mei_wdt - mei watchdog driver
+ * @wdd: watchdog device
+ *
+ * @cldev: mei watchdog client device
+ * @state: watchdog internal state
+ * @resp_required: ping required response
+ * @response: ping response completion
+ * @unregister: unregister worker
+ * @reg_lock: watchdog device registration lock
+ * @timeout: watchdog current timeout
+ *
+ * @dbgfs_dir: debugfs dir entry
+ */
+struct mei_wdt {
+       struct watchdog_device wdd;
+
+       struct mei_cl_device *cldev;
+       enum mei_wdt_state state;
+       bool resp_required;
+       struct completion response;
+       struct work_struct unregister;
+       struct mutex reg_lock;
+       u16 timeout;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+       struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+/*
+ * struct mei_mc_hdr - Management Control Command Header
+ *
+ * @command: Management Control (0x2)
+ * @bytecount: Number of bytes in the message beyond this byte
+ * @subcommand: Management Control Subcommand
+ * @versionnumber: Management Control Version (0x10)
+ */
+struct mei_mc_hdr {
+       u8 command;
+       u8 bytecount;
+       u8 subcommand;
+       u8 versionnumber;
+};
+
+/**
+ * struct mei_wdt_start_request watchdog start/ping
+ *
+ * @hdr: Management Control Command Header
+ * @timeout: timeout value
+ * @reserved: reserved (legacy)
+ */
+struct mei_wdt_start_request {
+       struct mei_mc_hdr hdr;
+       u16 timeout;
+       u8 reserved[17];
+} __packed;
+
+/**
+ * struct mei_wdt_start_response watchdog start/ping response
+ *
+ * @hdr: Management Control Command Header
+ * @status: operation status
+ * @wdstate: watchdog status bit mask
+ */
+struct mei_wdt_start_response {
+       struct mei_mc_hdr hdr;
+       u8 status;
+       u8 wdstate;
+} __packed;
+
+/**
+ * struct mei_wdt_stop_request - watchdog stop
+ *
+ * @hdr: Management Control Command Header
+ */
+struct mei_wdt_stop_request {
+       struct mei_mc_hdr hdr;
+} __packed;
+
+/**
+ * mei_wdt_ping - send wd start/ping command
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 on success,
+ *         negative errno code on failure
+ */
+static int mei_wdt_ping(struct mei_wdt *wdt)
+{
+       struct mei_wdt_start_request req;
+       const size_t req_len = sizeof(req);
+       int ret;
+
+       memset(&req, 0, req_len);
+       req.hdr.command = MEI_MANAGEMENT_CONTROL;
+       req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
+       req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ;
+       req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
+       req.timeout = wdt->timeout;
+
+       ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/**
+ * mei_wdt_stop - send wd stop command
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 on success,
+ *         negative errno code on failure
+ */
+static int mei_wdt_stop(struct mei_wdt *wdt)
+{
+       struct mei_wdt_stop_request req;
+       const size_t req_len = sizeof(req);
+       int ret;
+
+       memset(&req, 0, req_len);
+       req.hdr.command = MEI_MANAGEMENT_CONTROL;
+       req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
+       req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ;
+       req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
+
+       ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/**
+ * mei_wdt_ops_start - wd start command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 on success or -ENODEV;
+ */
+static int mei_wdt_ops_start(struct watchdog_device *wdd)
+{
+       struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+
+       wdt->state = MEI_WDT_START;
+       wdd->timeout = wdt->timeout;
+       return 0;
+}
+
+/**
+ * mei_wdt_ops_stop - wd stop command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_ops_stop(struct watchdog_device *wdd)
+{
+       struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+       int ret;
+
+       if (wdt->state != MEI_WDT_RUNNING)
+               return 0;
+
+       wdt->state = MEI_WDT_STOPPING;
+
+       ret = mei_wdt_stop(wdt);
+       if (ret)
+               return ret;
+
+       wdt->state = MEI_WDT_IDLE;
+
+       return 0;
+}
+
+/**
+ * mei_wdt_ops_ping - wd ping command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 if success, negative errno code on failure
+ */
+static int mei_wdt_ops_ping(struct watchdog_device *wdd)
+{
+       struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+       int ret;
+
+       if (wdt->state != MEI_WDT_START && wdt->state != MEI_WDT_RUNNING)
+               return 0;
+
+       if (wdt->resp_required)
+               init_completion(&wdt->response);
+
+       wdt->state = MEI_WDT_RUNNING;
+       ret = mei_wdt_ping(wdt);
+       if (ret)
+               return ret;
+
+       if (wdt->resp_required)
+               ret = wait_for_completion_killable(&wdt->response);
+
+       return ret;
+}
+
+/**
+ * mei_wdt_ops_set_timeout - wd set timeout command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ * @timeout: timeout value to set
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_ops_set_timeout(struct watchdog_device *wdd,
+                                  unsigned int timeout)
+{
+
+       struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+
+       /* valid value is already checked by the caller */
+       wdt->timeout = timeout;
+       wdd->timeout = timeout;
+
+       return 0;
+}
+
+static const struct watchdog_ops wd_ops = {
+       .owner       = THIS_MODULE,
+       .start       = mei_wdt_ops_start,
+       .stop        = mei_wdt_ops_stop,
+       .ping        = mei_wdt_ops_ping,
+       .set_timeout = mei_wdt_ops_set_timeout,
+};
+
+/* not const as the firmware_version field need to be retrieved */
+static struct watchdog_info wd_info = {
+       .identity = INTEL_AMT_WATCHDOG_ID,
+       .options  = WDIOF_KEEPALIVEPING |
+                   WDIOF_SETTIMEOUT |
+                   WDIOF_ALARMONLY,
+};
+
+/**
+ * __mei_wdt_is_registered - check if wdt is registered
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: true if the wdt is registered with the watchdog subsystem
+ * Locking: should be called under wdt->reg_lock
+ */
+static inline bool __mei_wdt_is_registered(struct mei_wdt *wdt)
+{
+       return !!watchdog_get_drvdata(&wdt->wdd);
+}
+
+/**
+ * mei_wdt_unregister - unregister from the watchdog subsystem
+ *
+ * @wdt: mei watchdog device
+ */
+static void mei_wdt_unregister(struct mei_wdt *wdt)
+{
+       mutex_lock(&wdt->reg_lock);
+
+       if (__mei_wdt_is_registered(wdt)) {
+               watchdog_unregister_device(&wdt->wdd);
+               watchdog_set_drvdata(&wdt->wdd, NULL);
+               memset(&wdt->wdd, 0, sizeof(wdt->wdd));
+       }
+
+       mutex_unlock(&wdt->reg_lock);
+}
+
+/**
+ * mei_wdt_register - register with the watchdog subsystem
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_register(struct mei_wdt *wdt)
+{
+       struct device *dev;
+       int ret;
+
+       if (!wdt || !wdt->cldev)
+               return -EINVAL;
+
+       dev = &wdt->cldev->dev;
+
+       mutex_lock(&wdt->reg_lock);
+
+       if (__mei_wdt_is_registered(wdt)) {
+               ret = 0;
+               goto out;
+       }
+
+       wdt->wdd.info = &wd_info;
+       wdt->wdd.ops = &wd_ops;
+       wdt->wdd.parent = dev;
+       wdt->wdd.timeout = MEI_WDT_DEFAULT_TIMEOUT;
+       wdt->wdd.min_timeout = MEI_WDT_MIN_TIMEOUT;
+       wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT;
+
+       watchdog_set_drvdata(&wdt->wdd, wdt);
+       ret = watchdog_register_device(&wdt->wdd);
+       if (ret) {
+               dev_err(dev, "unable to register watchdog device = %d.\n", ret);
+               watchdog_set_drvdata(&wdt->wdd, NULL);
+       }
+
+       wdt->state = MEI_WDT_IDLE;
+
+out:
+       mutex_unlock(&wdt->reg_lock);
+       return ret;
+}
+
+static void mei_wdt_unregister_work(struct work_struct *work)
+{
+       struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister);
+
+       mei_wdt_unregister(wdt);
+}
+
+/**
+ * mei_wdt_event_rx - callback for data receive
+ *
+ * @cldev: bus device
+ */
+static void mei_wdt_event_rx(struct mei_cl_device *cldev)
+{
+       struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+       struct mei_wdt_start_response res;
+       const size_t res_len = sizeof(res);
+       int ret;
+
+       ret = mei_cldev_recv(wdt->cldev, (u8 *)&res, res_len);
+       if (ret < 0) {
+               dev_err(&cldev->dev, "failure in recv %d\n", ret);
+               return;
+       }
+
+       /* Empty response can be sent on stop */
+       if (ret == 0)
+               return;
+
+       if (ret < sizeof(struct mei_mc_hdr)) {
+               dev_err(&cldev->dev, "recv small data %d\n", ret);
+               return;
+       }
+
+       if (res.hdr.command != MEI_MANAGEMENT_CONTROL ||
+           res.hdr.versionnumber != MEI_MC_VERSION_NUMBER) {
+               dev_err(&cldev->dev, "wrong command received\n");
+               return;
+       }
+
+       if (res.hdr.subcommand != MEI_MC_START_WD_TIMER_RES) {
+               dev_warn(&cldev->dev, "unsupported command %d :%s[%d]\n",
+                        res.hdr.subcommand,
+                        mei_wdt_state_str(wdt->state),
+                        wdt->state);
+               return;
+       }
+
+       /* Run the unregistration in a worker as this can be
+        * run only after ping completion, otherwise the flow will
+        * deadlock on watchdog core mutex.
+        */
+       if (wdt->state == MEI_WDT_RUNNING) {
+               if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
+                       wdt->state = MEI_WDT_NOT_REQUIRED;
+                       schedule_work(&wdt->unregister);
+               }
+               goto out;
+       }
+
+       if (wdt->state == MEI_WDT_PROBE) {
+               if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
+                       wdt->state = MEI_WDT_NOT_REQUIRED;
+               } else {
+                       /* stop the watchdog and register watchdog device */
+                       mei_wdt_stop(wdt);
+                       mei_wdt_register(wdt);
+               }
+               return;
+       }
+
+       dev_warn(&cldev->dev, "not in correct state %s[%d]\n",
+                        mei_wdt_state_str(wdt->state), wdt->state);
+
+out:
+       if (!completion_done(&wdt->response))
+               complete(&wdt->response);
+}
+
+/*
+ * mei_wdt_notify_event - callback for event notification
+ *
+ * @cldev: bus device
+ */
+static void mei_wdt_notify_event(struct mei_cl_device *cldev)
+{
+       struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+
+       if (wdt->state != MEI_WDT_NOT_REQUIRED)
+               return;
+
+       mei_wdt_register(wdt);
+}
+
+/**
+ * mei_wdt_event - callback for event receive
+ *
+ * @cldev: bus device
+ * @events: event mask
+ * @context: callback context
+ */
+static void mei_wdt_event(struct mei_cl_device *cldev,
+                         u32 events, void *context)
+{
+       if (events & BIT(MEI_CL_EVENT_RX))
+               mei_wdt_event_rx(cldev);
+
+       if (events & BIT(MEI_CL_EVENT_NOTIF))
+               mei_wdt_notify_event(cldev);
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
+                                       size_t cnt, loff_t *ppos)
+{
+       struct mei_wdt *wdt = file->private_data;
+       const size_t bufsz = 32;
+       char buf[32];
+       ssize_t pos;
+
+       mutex_lock(&wdt->reg_lock);
+       pos = scnprintf(buf, bufsz, "%s\n",
+               __mei_wdt_is_registered(wdt) ? "activated" : "deactivated");
+       mutex_unlock(&wdt->reg_lock);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+}
+
+static const struct file_operations dbgfs_fops_activation = {
+       .open    = simple_open,
+       .read    = mei_dbgfs_read_activation,
+       .llseek  = generic_file_llseek,
+};
+
+static ssize_t mei_dbgfs_read_state(struct file *file, char __user *ubuf,
+                                   size_t cnt, loff_t *ppos)
+{
+       struct mei_wdt *wdt = file->private_data;
+       const size_t bufsz = 32;
+       char buf[bufsz];
+       ssize_t pos;
+
+       pos = scnprintf(buf, bufsz, "state: %s\n",
+                        mei_wdt_state_str(wdt->state));
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+}
+
+static const struct file_operations dbgfs_fops_state = {
+       .open = simple_open,
+       .read = mei_dbgfs_read_state,
+       .llseek = generic_file_llseek,
+};
+
+static void dbgfs_unregister(struct mei_wdt *wdt)
+{
+       debugfs_remove_recursive(wdt->dbgfs_dir);
+       wdt->dbgfs_dir = NULL;
+}
+
+static int dbgfs_register(struct mei_wdt *wdt)
+{
+       struct dentry *dir, *f;
+
+       dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+       if (!dir)
+               return -ENOMEM;
+
+       wdt->dbgfs_dir = dir;
+       f = debugfs_create_file("state", S_IRUSR, dir, wdt, &dbgfs_fops_state);
+       if (!f)
+               goto err;
+
+       f = debugfs_create_file("activation",  S_IRUSR,
+                               dir, wdt, &dbgfs_fops_activation);
+       if (!f)
+               goto err;
+
+       return 0;
+err:
+       dbgfs_unregister(wdt);
+       return -ENODEV;
+}
+
+#else
+
+static inline void dbgfs_unregister(struct mei_wdt *wdt) {}
+
+static inline int dbgfs_register(struct mei_wdt *wdt)
+{
+       return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int mei_wdt_probe(struct mei_cl_device *cldev,
+                        const struct mei_cl_device_id *id)
+{
+       struct mei_wdt *wdt;
+       int ret;
+
+       wdt = kzalloc(sizeof(struct mei_wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+
+       wdt->timeout = MEI_WDT_DEFAULT_TIMEOUT;
+       wdt->state = MEI_WDT_PROBE;
+       wdt->cldev = cldev;
+       wdt->resp_required = mei_cldev_ver(cldev) > 0x1;
+       mutex_init(&wdt->reg_lock);
+       init_completion(&wdt->response);
+       INIT_WORK(&wdt->unregister, mei_wdt_unregister_work);
+
+       mei_cldev_set_drvdata(cldev, wdt);
+
+       ret = mei_cldev_enable(cldev);
+       if (ret < 0) {
+               dev_err(&cldev->dev, "Could not enable cl device\n");
+               goto err_out;
+       }
+
+       ret = mei_cldev_register_event_cb(wdt->cldev,
+                                         BIT(MEI_CL_EVENT_RX) |
+                                         BIT(MEI_CL_EVENT_NOTIF),
+                                         mei_wdt_event, NULL);
+
+       /* on legacy devices notification is not supported
+        * this doesn't fail the registration for RX event
+        */
+       if (ret && ret != -EOPNOTSUPP) {
+               dev_err(&cldev->dev, "Could not register event ret=%d\n", ret);
+               goto err_disable;
+       }
+
+       wd_info.firmware_version = mei_cldev_ver(cldev);
+
+       if (wdt->resp_required)
+               ret = mei_wdt_ping(wdt);
+       else
+               ret = mei_wdt_register(wdt);
+
+       if (ret)
+               goto err_disable;
+
+       if (dbgfs_register(wdt))
+               dev_warn(&cldev->dev, "cannot register debugfs\n");
+
+       return 0;
+
+err_disable:
+       mei_cldev_disable(cldev);
+
+err_out:
+       kfree(wdt);
+
+       return ret;
+}
+
+static int mei_wdt_remove(struct mei_cl_device *cldev)
+{
+       struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+
+       /* Free the caller in case of fw initiated or unexpected reset */
+       if (!completion_done(&wdt->response))
+               complete(&wdt->response);
+
+       cancel_work_sync(&wdt->unregister);
+
+       mei_wdt_unregister(wdt);
+
+       mei_cldev_disable(cldev);
+
+       dbgfs_unregister(wdt);
+
+       kfree(wdt);
+
+       return 0;
+}
+
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+                           0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
+static struct mei_cl_device_id mei_wdt_tbl[] = {
+       { .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY },
+       /* required last entry */
+       { }
+};
+MODULE_DEVICE_TABLE(mei, mei_wdt_tbl);
+
+static struct mei_cl_driver mei_wdt_driver = {
+       .id_table = mei_wdt_tbl,
+       .name = KBUILD_MODNAME,
+
+       .probe = mei_wdt_probe,
+       .remove = mei_wdt_remove,
+};
+
+static int __init mei_wdt_init(void)
+{
+       int ret;
+
+       ret = mei_cldev_driver_register(&mei_wdt_driver);
+       if (ret) {
+               pr_err(KBUILD_MODNAME ": module registration failed\n");
+               return ret;
+       }
+       return 0;
+}
+
+static void __exit mei_wdt_exit(void)
+{
+       mei_cldev_driver_unregister(&mei_wdt_driver);
+}
+
+module_init(mei_wdt_init);
+module_exit(mei_wdt_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog");
index 6402eaf8ab958f27cc3371d0773ad45046dc0b8e..bd01b92aad98eb7a8b212a802854bf1546f23c0b 100644 (file)
@@ -1040,28 +1040,6 @@ COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
 /* PPPOX */
 COMPATIBLE_IOCTL(PPPOEIOCSFWD)
 COMPATIBLE_IOCTL(PPPOEIOCDFWD)
-/* ppdev */
-COMPATIBLE_IOCTL(PPSETMODE)
-COMPATIBLE_IOCTL(PPRSTATUS)
-COMPATIBLE_IOCTL(PPRCONTROL)
-COMPATIBLE_IOCTL(PPWCONTROL)
-COMPATIBLE_IOCTL(PPFCONTROL)
-COMPATIBLE_IOCTL(PPRDATA)
-COMPATIBLE_IOCTL(PPWDATA)
-COMPATIBLE_IOCTL(PPCLAIM)
-COMPATIBLE_IOCTL(PPRELEASE)
-COMPATIBLE_IOCTL(PPYIELD)
-COMPATIBLE_IOCTL(PPEXCL)
-COMPATIBLE_IOCTL(PPDATADIR)
-COMPATIBLE_IOCTL(PPNEGOT)
-COMPATIBLE_IOCTL(PPWCTLONIRQ)
-COMPATIBLE_IOCTL(PPCLRIRQ)
-COMPATIBLE_IOCTL(PPSETPHASE)
-COMPATIBLE_IOCTL(PPGETMODES)
-COMPATIBLE_IOCTL(PPGETMODE)
-COMPATIBLE_IOCTL(PPGETPHASE)
-COMPATIBLE_IOCTL(PPGETFLAGS)
-COMPATIBLE_IOCTL(PPSETFLAGS)
 /* Big A */
 /* sparc only */
 /* Big Q for sound/OSS */
index 9006c4e75cf737a90335eadcd73e14d59b0f753e..3d8dcdd1aeae902ad2f587e2f2f875a77ef985f7 100644 (file)
@@ -163,4 +163,13 @@ struct amba_device name##_device = {                               \
 #define module_amba_driver(__amba_drv) \
        module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
 
+/*
+ * builtin_amba_driver() - Helper macro for drivers that don't do anything
+ * special in driver initcall.  This eliminates a lot of boilerplate.  Each
+ * driver may only use this macro once, and calling it replaces the instance
+ * device_initcall().
+ */
+#define builtin_amba_driver(__amba_drv) \
+       builtin_driver(__amba_drv, amba_driver_register)
+
 #endif
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
new file mode 100644 (file)
index 0000000..7d41026
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_CORESIGHT_PMU_H
+#define _LINUX_CORESIGHT_PMU_H
+
+#define CORESIGHT_ETM_PMU_NAME "cs_etm"
+#define CORESIGHT_ETM_PMU_SEED  0x10
+
+/* ETMv3.5/PTM's ETMCR config bit */
+#define ETM_OPT_CYCACC  12
+#define ETM_OPT_TS      28
+
+static inline int coresight_get_trace_id(int cpu)
+{
+       /*
+        * A trace ID of value 0 is invalid, so let's start at some
+        * random value that fits in 7 bits and go from there.  Since
+        * the common convention is to have data trace IDs be I(N) + 1,
+        * set instruction trace IDs as a function of the CPU number.
+        */
+       return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
+}
+
+#endif
index a7cabfa23b55823773cb91e9bfd7996ae34f3f97..385d62e64abb00218d5f52f32bd1875dde5b7b43 100644 (file)
@@ -14,6 +14,7 @@
 #define _LINUX_CORESIGHT_H
 
 #include <linux/device.h>
+#include <linux/perf_event.h>
 #include <linux/sched.h>
 
 /* Peripheral id registers (0xFD0-0xFEC) */
@@ -152,7 +153,6 @@ struct coresight_connection {
                by @coresight_ops.
  * @dev:       The device entity associated to this component.
  * @refcnt:    keep track of what is in use.
- * @path_link: link of current component into the path being enabled.
  * @orphan:    true if the component has connections that haven't been linked.
  * @enable:    'true' if component is currently part of an active path.
  * @activated: 'true' only if a _sink_ has been activated.  A sink can be
@@ -168,7 +168,6 @@ struct coresight_device {
        const struct coresight_ops *ops;
        struct device dev;
        atomic_t *refcnt;
-       struct list_head path_link;
        bool orphan;
        bool enable;    /* true only if configured as part of a path */
        bool activated; /* true only if a sink is part of a path */
@@ -183,12 +182,29 @@ struct coresight_device {
 /**
  * struct coresight_ops_sink - basic operations for a sink
  * Operations available for sinks
- * @enable:    enables the sink.
- * @disable:   disables the sink.
+ * @enable:            enables the sink.
+ * @disable:           disables the sink.
+ * @alloc_buffer:      initialises perf's ring buffer for trace collection.
+ * @free_buffer:       release memory allocated in @get_config.
+ * @set_buffer:                initialises buffer mechanic before a trace session.
+ * @reset_buffer:      finalises buffer mechanic after a trace session.
+ * @update_buffer:     update buffer pointers after a trace session.
  */
 struct coresight_ops_sink {
-       int (*enable)(struct coresight_device *csdev);
+       int (*enable)(struct coresight_device *csdev, u32 mode);
        void (*disable)(struct coresight_device *csdev);
+       void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
+                             void **pages, int nr_pages, bool overwrite);
+       void (*free_buffer)(void *config);
+       int (*set_buffer)(struct coresight_device *csdev,
+                         struct perf_output_handle *handle,
+                         void *sink_config);
+       unsigned long (*reset_buffer)(struct coresight_device *csdev,
+                                     struct perf_output_handle *handle,
+                                     void *sink_config, bool *lost);
+       void (*update_buffer)(struct coresight_device *csdev,
+                             struct perf_output_handle *handle,
+                             void *sink_config);
 };
 
 /**
@@ -205,14 +221,18 @@ struct coresight_ops_link {
 /**
  * struct coresight_ops_source - basic operations for a source
  * Operations available for sources.
+ * @cpu_id:    returns the value of the CPU number this component
+ *             is associated to.
  * @trace_id:  returns the value of the component's trace ID as known
              to the HW.
*             to the HW.
  * @enable:    enables tracing for a source.
  * @disable:   disables tracing for a source.
  */
 struct coresight_ops_source {
+       int (*cpu_id)(struct coresight_device *csdev);
        int (*trace_id)(struct coresight_device *csdev);
-       int (*enable)(struct coresight_device *csdev);
+       int (*enable)(struct coresight_device *csdev,
+                     struct perf_event_attr *attr,  u32 mode);
        void (*disable)(struct coresight_device *csdev);
 };
 
index 5428885049941c86e46184deac5bbafa6e9df13a..05b97144d342512db6019230d3f0925e4ede47c1 100644 (file)
@@ -12,7 +12,7 @@
 #define _LINUX_DAVINCI_EMAC_H
 
 #include <linux/if_ether.h>
-#include <linux/memory.h>
+#include <linux/nvmem-consumer.h>
 
 struct mdio_platform_data {
        unsigned long           bus_freq;
@@ -46,5 +46,5 @@ enum {
        EMAC_VERSION_2, /* DM646x */
 };
 
-void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context);
+void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context);
 #endif
index 06791811e49de1a001f45e0b92e803dc2a554dd7..885f587a35550cb1e4318beaeced8633fe4321e0 100644 (file)
@@ -3,16 +3,25 @@
  * platform description for 93xx46 EEPROMs.
  */
 
+struct gpio_desc;
+
 struct eeprom_93xx46_platform_data {
        unsigned char   flags;
 #define EE_ADDR8       0x01            /*  8 bit addr. cfg */
 #define EE_ADDR16      0x02            /* 16 bit addr. cfg */
 #define EE_READONLY    0x08            /* forbid writing */
 
+       unsigned int    quirks;
+/* Single word read transfers only; no sequential read. */
+#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ           (1 << 0)
+/* Instructions such as EWEN are (addrlen + 2) in length. */
+#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH         (1 << 1)
+
        /*
         * optional hooks to control additional logic
         * before and after spi transfer.
         */
        void (*prepare)(void *);
        void (*finish)(void *);
+       struct gpio_desc *select;
 };
index 753dbad0bf94302c09c6807fcb47027c2f230dbb..aa0fadce9308c3d03a090e84dced8fdde2afe5fa 100644 (file)
@@ -235,6 +235,7 @@ struct vmbus_channel_offer {
 #define VMBUS_CHANNEL_LOOPBACK_OFFER                   0x100
 #define VMBUS_CHANNEL_PARENT_OFFER                     0x200
 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION   0x400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER             0x2000
 
 struct vmpacket_descriptor {
        u16 type;
@@ -391,6 +392,10 @@ enum vmbus_channel_message_type {
        CHANNELMSG_VERSION_RESPONSE             = 15,
        CHANNELMSG_UNLOAD                       = 16,
        CHANNELMSG_UNLOAD_RESPONSE              = 17,
+       CHANNELMSG_18                           = 18,
+       CHANNELMSG_19                           = 19,
+       CHANNELMSG_20                           = 20,
+       CHANNELMSG_TL_CONNECT_REQUEST           = 21,
        CHANNELMSG_COUNT
 };
 
@@ -561,6 +566,13 @@ struct vmbus_channel_initiate_contact {
        u64 monitor_page2;
 } __packed;
 
+/* Hyper-V socket: guest's connect()-ing to host */
+struct vmbus_channel_tl_connect_request {
+       struct vmbus_channel_message_header header;
+       uuid_le guest_endpoint_id;
+       uuid_le host_service_id;
+} __packed;
+
 struct vmbus_channel_version_response {
        struct vmbus_channel_message_header header;
        u8 version_supported;
@@ -633,6 +645,32 @@ enum hv_signal_policy {
        HV_SIGNAL_POLICY_EXPLICIT,
 };
 
+enum vmbus_device_type {
+       HV_IDE = 0,
+       HV_SCSI,
+       HV_FC,
+       HV_NIC,
+       HV_ND,
+       HV_PCIE,
+       HV_FB,
+       HV_KBD,
+       HV_MOUSE,
+       HV_KVP,
+       HV_TS,
+       HV_HB,
+       HV_SHUTDOWN,
+       HV_FCOPY,
+       HV_BACKUP,
+       HV_DM,
+       HV_UNKOWN,
+};
+
+struct vmbus_device {
+       u16  dev_type;
+       uuid_le guid;
+       bool perf_device;
+};
+
 struct vmbus_channel {
        /* Unique channel id */
        int id;
@@ -727,6 +765,12 @@ struct vmbus_channel {
         */
        void (*sc_creation_callback)(struct vmbus_channel *new_sc);
 
+       /*
+        * Channel rescind callback. Some channels (the hvsock ones), need to
+        * register a callback which is invoked in vmbus_onoffer_rescind().
+        */
+       void (*chn_rescind_callback)(struct vmbus_channel *channel);
+
        /*
         * The spinlock to protect the structure. It is being used to protect
         * test-and-set access to various attributes of the structure as well
@@ -767,8 +811,30 @@ struct vmbus_channel {
         * signaling control.
         */
        enum hv_signal_policy  signal_policy;
+       /*
+        * On the channel send side, many of the VMBUS
+        * device drivers explicity serialize access to the
+        * outgoing ring buffer. Give more control to the
+        * VMBUS device drivers in terms how to serialize
+        * accesss to the outgoing ring buffer.
+        * The default behavior will be to aquire the
+        * ring lock to preserve the current behavior.
+        */
+       bool acquire_ring_lock;
+
 };
 
+static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
+{
+       c->acquire_ring_lock = state;
+}
+
+static inline bool is_hvsock_channel(const struct vmbus_channel *c)
+{
+       return !!(c->offermsg.offer.chn_flags &
+                 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
 static inline void set_channel_signal_state(struct vmbus_channel *c,
                                            enum hv_signal_policy policy)
 {
@@ -790,6 +856,12 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
        return c->per_channel_state;
 }
 
+static inline void set_channel_pending_send_size(struct vmbus_channel *c,
+                                                u32 size)
+{
+       c->outbound.ring_buffer->pending_send_sz = size;
+}
+
 void vmbus_onmessage(void *context);
 
 int vmbus_request_offers(void);
@@ -801,6 +873,9 @@ int vmbus_request_offers(void);
 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
                        void (*sc_cr_cb)(struct vmbus_channel *new_sc));
 
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+               void (*chn_rescind_cb)(struct vmbus_channel *));
+
 /*
  * Retrieve the (sub) channel on which to send an outgoing request.
  * When a primary channel has multiple sub-channels, we choose a
@@ -940,6 +1015,20 @@ extern void vmbus_ontimer(unsigned long data);
 struct hv_driver {
        const char *name;
 
+       /*
+        * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
+        * channel flag, actually doesn't mean a synthetic device because the
+        * offer's if_type/if_instance can change for every new hvsock
+        * connection.
+        *
+        * However, to facilitate the notification of new-offer/rescind-offer
+        * from vmbus driver to hvsock driver, we can handle hvsock offer as
+        * a special vmbus device, and hence we need the below flag to
+        * indicate if the driver is the hvsock driver or not: we need to
+        * specially treat the hvosck offer & driver in vmbus_match().
+        */
+       bool hvsock;
+
        /* the device type supported by this driver */
        uuid_le dev_type;
        const struct hv_vmbus_device_id *id_table;
@@ -959,6 +1048,8 @@ struct hv_device {
 
        /* the device instance id of this device */
        uuid_le dev_instance;
+       u16 vendor_id;
+       u16 device_id;
 
        struct device device;
 
@@ -994,6 +1085,8 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
                                         const char *mod_name);
 void vmbus_driver_unregister(struct hv_driver *hv_driver);
 
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
+
 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
                        resource_size_t min, resource_size_t max,
                        resource_size_t size, resource_size_t align,
@@ -1158,6 +1251,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
 
 struct hv_util_service {
        u8 *recv_buffer;
+       void *channel;
        void (*util_cb)(void *);
        int (*util_init)(struct hv_util_service *);
        void (*util_deinit)(void);
@@ -1242,4 +1336,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
 
 extern __u32 vmbus_proto_version;
 
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+                                 const uuid_le *shv_host_servie_id);
 #endif /* _HYPERV_H */
index 82730adba950ed196bb7006fa953704393f6578e..093607f90b9116f091d631a5e7516cff8d816cbd 100644 (file)
@@ -139,17 +139,6 @@ extern struct memory_block *find_memory_block(struct mem_section *);
 #define unregister_hotmemory_notifier(nb)  ({ (void)(nb); })
 #endif
 
-/*
- * 'struct memory_accessor' is a generic interface to provide
- * in-kernel access to persistent memory such as i2c or SPI EEPROMs
- */
-struct memory_accessor {
-       ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
-                       size_t count);
-       ssize_t (*write)(struct memory_accessor *, const char *buf,
-                        off_t offset, size_t count);
-};
-
 /*
  * Kernel text modification mutex, used for code patching. Users of this lock
  * can sleep.
index c800dbc420795c37c78b2008b4cbe5991d90b881..5c9a1d44c125a98f679838b36e2be21fbd7fd3a2 100644 (file)
@@ -580,7 +580,9 @@ struct palmas_usb {
        int vbus_irq;
 
        int gpio_id_irq;
+       int gpio_vbus_irq;
        struct gpio_desc *id_gpiod;
+       struct gpio_desc *vbus_gpiod;
        unsigned long sw_debounce_jiffies;
        struct delayed_work wq_detectid;
 
@@ -589,6 +591,7 @@ struct palmas_usb {
        bool enable_vbus_detection;
        bool enable_id_detection;
        bool enable_gpio_id_detection;
+       bool enable_gpio_vbus_detection;
 };
 
 #define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
index 0b68caff1b3c55c16d194307ab1d0b070bf4c98d..a4fcc90b0f208550561470549d2e0a187dc5b8a4 100644 (file)
@@ -23,6 +23,10 @@ struct nvmem_config {
        const struct nvmem_cell_info    *cells;
        int                     ncells;
        bool                    read_only;
+       bool                    root_only;
+       /* To be only used by old driver/misc/eeprom drivers */
+       bool                    compat;
+       struct device           *base_dev;
 };
 
 #if IS_ENABLED(CONFIG_NVMEM)
@@ -43,5 +47,4 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem)
 }
 
 #endif /* CONFIG_NVMEM */
-
 #endif  /* ifndef _LINUX_NVMEM_PROVIDER_H */
index c42aa89d34eeb46ff0ec5f4e68229ad1fa0fad2a..dc9a13e5acda164839dfd723b3f63562e7df21d5 100644 (file)
@@ -9,7 +9,7 @@
 #define _LINUX_AT24_H
 
 #include <linux/types.h>
-#include <linux/memory.h>
+#include <linux/nvmem-consumer.h>
 
 /**
  * struct at24_platform_data - data to set up at24 (generic eeprom) driver
@@ -17,7 +17,7 @@
  * @page_size: number of byte which can be written in one go
  * @flags: tunable options, check AT24_FLAG_* defines
  * @setup: an optional callback invoked after eeprom is probed; enables kernel
-       code to access eeprom via memory_accessor, see example
+       code to access eeprom via nvmem, see example
  * @context: optional parameter passed to setup()
  *
  * If you set up a custom eeprom type, please double-check the parameters.
  *
  * An example in pseudo code for a setup() callback:
  *
- * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
+ * void get_mac_addr(struct mvmem_device *nvmem, void *context)
  * {
  *     u8 *mac_addr = ethernet_pdata->mac_addr;
  *     off_t offset = context;
  *
  *     // Read MAC addr from EEPROM
- *     if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN)
+ *     if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
  *             pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
  * }
  *
@@ -48,7 +48,7 @@ struct at24_platform_data {
 #define AT24_FLAG_IRUGO                0x20    /* sysfs-entry will be world-readable */
 #define AT24_FLAG_TAKE8ADDR    0x10    /* take always 8 addresses (24c00) */
 
-       void            (*setup)(struct memory_accessor *, void *context);
+       void            (*setup)(struct nvmem_device *nvmem, void *context);
        void            *context;
 };
 
index 403e007aef6825e2bffc7df3f8c0b8f0fee9d937..e34e169f9dcb8f73801649dbdf0bab205a8162f9 100644 (file)
@@ -30,8 +30,6 @@ struct spi_eeprom {
         */
 #define EE_INSTR_BIT3_IS_ADDR  0x0010
 
-       /* for exporting this chip's data to other kernel code */
-       void (*setup)(struct memory_accessor *mem, void *context);
        void *context;
 };
 
index 9d0083d364e642b6ac68ae56cdae3bc197ffebde..1a79ed8e43dadcb8d5884d17d73b2cd15e3a11d4 100644 (file)
@@ -67,6 +67,16 @@ struct stm_device;
  * description. That is, the lowest master that can be allocated to software
  * writers is @sw_start and data from this writer will appear is @sw_start
  * master in the STP stream.
+ *
+ * The @packet callback should adhere to the following rules:
+ *   1) it must return the number of bytes it consumed from the payload;
+ *   2) therefore, if it sent a packet that does not have payload (like FLAG),
+ *      it must return zero;
+ *   3) if it does not support the requested packet type/flag combination,
+ *      it must return -ENOTSUPP.
+ *
+ * The @unlink callback is called when there are no more active writers so
+ * that the master/channel can be quiesced.
  */
 struct stm_data {
        const char              *name;
index 65ac54c61c180a6c0faa91fd4d08185387950214..1bd31a38c51edfe699f4e26a0607a882cd31f828 100644 (file)
@@ -733,6 +733,41 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
        return (void *)vmci_event_data_const_payload(ev_data);
 }
 
+/*
+ * Helper to read a value from a head or tail pointer. For X86_32, the
+ * pointer is treated as a 32bit value, since the pointer value
+ * never exceeds a 32bit value in this case. Also, doing an
+ * atomic64_read on X86_32 uniprocessor systems may be implemented
+ * as a non locked cmpxchg8b, that may end up overwriting updates done
+ * by the VMCI device to the memory location. On 32bit SMP, the lock
+ * prefix will be used, so correctness isn't an issue, but using a
+ * 64bit operation still adds unnecessary overhead.
+ */
+static inline u64 vmci_q_read_pointer(atomic64_t *var)
+{
+#if defined(CONFIG_X86_32)
+       return atomic_read((atomic_t *)var);
+#else
+       return atomic64_read(var);
+#endif
+}
+
+/*
+ * Helper to set the value of a head or tail pointer. For X86_32, the
+ * pointer is treated as a 32bit value, since the pointer value
+ * never exceeds a 32bit value in this case. On 32bit SMP, using a
+ * locked cmpxchg8b adds unnecessary overhead.
+ */
+static inline void vmci_q_set_pointer(atomic64_t *var,
+                                     u64 new_val)
+{
+#if defined(CONFIG_X86_32)
+       return atomic_set((atomic_t *)var, (u32)new_val);
+#else
+       return atomic64_set(var, new_val);
+#endif
+}
+
 /*
  * Helper to add a given offset to a head or tail pointer. Wraps the
  * value of the pointer around the max size of the queue.
@@ -741,14 +776,14 @@ static inline void vmci_qp_add_pointer(atomic64_t *var,
                                       size_t add,
                                       u64 size)
 {
-       u64 new_val = atomic64_read(var);
+       u64 new_val = vmci_q_read_pointer(var);
 
        if (new_val >= size - add)
                new_val -= size;
 
        new_val += add;
 
-       atomic64_set(var, new_val);
+       vmci_q_set_pointer(var, new_val);
 }
 
 /*
@@ -758,7 +793,7 @@ static inline u64
 vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
 {
        struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
-       return atomic64_read(&qh->producer_tail);
+       return vmci_q_read_pointer(&qh->producer_tail);
 }
 
 /*
@@ -768,7 +803,7 @@ static inline u64
 vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
 {
        struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
-       return atomic64_read(&qh->consumer_head);
+       return vmci_q_read_pointer(&qh->consumer_head);
 }
 
 /*
index 8c85672639d3e08aa6e9cdf88b545ba273baaac2..cb1464c411a2b4fa88778343bf33208ccd8dae76 100644 (file)
@@ -236,7 +236,7 @@ struct pcim_iomap_devres {
 
 static void pcim_iomap_release(struct device *gendev, void *res)
 {
-       struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+       struct pci_dev *dev = to_pci_dev(gendev);
        struct pcim_iomap_devres *this = res;
        int i;
 
index 024a11ac8b97a00f3843358a1ebcfa087ac4ec28..0d8bd29b1bd6fe5cfc61dc7c245ecee70420458a 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 # Before running this script please ensure that your PATH is
-# typical as you use for compilation/istallation. I use
+# typical as you use for compilation/installation. I use
 # /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may
 # differ on your system.
 #
index a8ab795569266f6e48550e4fb400d27a6f4c817d..a8c4644022a6b6cfb5f46785847178ed28159e62 100644 (file)
@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
 WARNINGS = -Wall -Wextra
 CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
 
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+
 all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
 %: %.c
        $(CC) $(CFLAGS) -o $@ $^