Merge tag 'ceph-for-4.20-rc4' of https://github.com/ceph/ceph-client
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Nov 2018 19:24:55 +0000 (11:24 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Nov 2018 19:24:55 +0000 (11:24 -0800)
Pullk ceph fix from Ilya Dryomov:
 "A messenger fix, marked for stable"

* tag 'ceph-for-4.20-rc4' of https://github.com/ceph/ceph-client:
  libceph: fall back to sendmsg for slab pages

177 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/security-bugs.rst
Documentation/devicetree/bindings/net/can/holt_hi311x.txt
Documentation/devicetree/bindings/net/can/rcar_can.txt
Documentation/media/uapi/v4l/dev-meta.rst
Documentation/media/uapi/v4l/vidioc-g-fmt.rst
Documentation/networking/rxrpc.txt
MAINTAINERS
arch/mips/configs/cavium_octeon_defconfig
arch/mips/kernel/setup.c
arch/mips/kernel/traps.c
arch/mips/loongson64/loongson-3/numa.c
arch/mips/sgi-ip27/ip27-memory.c
arch/riscv/Makefile
arch/riscv/boot/.gitignore [new file with mode: 0644]
arch/riscv/boot/Makefile [new file with mode: 0644]
arch/riscv/boot/install.sh [new file with mode: 0644]
arch/riscv/include/asm/module.h
arch/riscv/include/asm/uaccess.h
arch/riscv/include/asm/unistd.h
arch/riscv/include/uapi/asm/syscalls.h [deleted file]
arch/riscv/include/uapi/asm/unistd.h [new file with mode: 0644]
arch/riscv/kernel/cpu.c
arch/riscv/kernel/head.S
arch/riscv/kernel/vmlinux.lds.S
drivers/acpi/acpi_platform.c
drivers/cpufreq/ti-cpufreq.c
drivers/dma-buf/udmabuf.c
drivers/gnss/serial.c
drivers/gnss/sirf.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/hv/hv_kvp.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-svm.c
drivers/iommu/ipmmu-vmsa.c
drivers/media/cec/cec-adap.c
drivers/media/i2c/tc358743.c
drivers/media/pci/intel/ipu3/ipu3-cio2.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/vicodec/vicodec-core.c
drivers/media/platform/vim2m.c
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/misc/atmel-ssc.c
drivers/misc/sgi-gru/grukdump.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mtd/nand/raw/atmel/nand-controller.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/rcar/rcar_can.c
drivers/net/can/rx-offload.c
drivers/net/can/spi/hi311x.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
drivers/net/can/usb/ucan.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/alloc.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_rdma.h
drivers/net/phy/mdio-gpio.c
drivers/net/tun.c
drivers/nvme/host/fc.c
drivers/nvmem/core.c
drivers/opp/ti-opp-supply.c
drivers/s390/net/ism_drv.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/ufs/ufs-hisi.c
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/slimbus/slimbus.h
drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
drivers/staging/media/sunxi/cedrus/cedrus.c
drivers/uio/uio.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/pci.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/host/xhci-histb.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/appledisplay.c
fs/afs/rxrpc.c
fs/exec.c
include/linux/can/dev.h
include/linux/can/rx-offload.h
include/linux/usb/quirks.h
include/media/v4l2-mem2mem.h
include/net/af_rxrpc.h
include/trace/events/rxrpc.h
include/uapi/linux/v4l2-controls.h
lib/test_firmware.c
net/batman-adv/bat_v_elp.c
net/batman-adv/fragmentation.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/can/raw.c
net/core/dev.c
net/ipv4/ip_tunnel_core.c
net/ipv6/route.c
net/l2tp/l2tp_core.c
net/rxrpc/af_rxrpc.c
net/sched/act_pedit.c
net/sched/sch_fq.c
net/socket.c
net/tipc/discover.c
net/tipc/net.c
net/tipc/net.h
net/tipc/socket.c
sound/core/oss/pcm_oss.c
sound/core/oss/pcm_plugin.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
tools/power/cpupower/Makefile
tools/power/cpupower/bench/Makefile
tools/power/cpupower/debug/x86_64/Makefile
tools/power/cpupower/lib/cpufreq.c
tools/power/cpupower/lib/cpuidle.c
tools/power/cpupower/lib/cpupower.c
tools/power/cpupower/lib/cpupower_intern.h
tools/testing/selftests/tc-testing/tdc.py

index 81d1d5a7472804e50a663480ef92dc67abc71ab5..19f4423e70d913ea2bf0801e4e14b57dc74e320f 100644 (file)
                                        prevent spurious wakeup);
                                n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
                                        pause after every control message);
+                               o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
+                                       delay after resetting its port);
                        Example: quirks=0781:5580:bk,0a5c:5834:gij
 
        usbhid.mousepoll=
index 164bf71149fdf2e6ad099b45196cbd6d1d8347cc..30187d49dc2c7d38869c4073e60093d315c3fc4e 100644 (file)
@@ -32,16 +32,17 @@ Disclosure and embargoed information
 The security list is not a disclosure channel.  For that, see Coordination
 below.
 
-Once a robust fix has been developed, our preference is to release the
-fix in a timely fashion, treating it no differently than any of the other
-thousands of changes and fixes the Linux kernel project releases every
-month.
-
-However, at the request of the reporter, we will postpone releasing the
-fix for up to 5 business days after the date of the report or after the
-embargo has lifted; whichever comes first.  The only exception to that
-rule is if the bug is publicly known, in which case the preference is to
-release the fix as soon as it's available.
+Once a robust fix has been developed, the release process starts.  Fixes
+for publicly known bugs are released immediately.
+
+Although our preference is to release fixes for publicly undisclosed bugs
+as soon as they become available, this may be postponed at the request of
+the reporter or an affected party for up to 7 calendar days from the start
+of the release process, with an exceptional extension to 14 calendar days
+if it is agreed that the criticality of the bug requires more time.  The
+only valid reason for deferring the publication of a fix is to accommodate
+the logistics of QA and large scale rollouts which require release
+coordination.
 
 Whilst embargoed information may be shared with trusted individuals in
 order to develop a fix, such information will not be published alongside
index 903a78da65be288cf750af872584b60e9f42c06f..3a9926f99937039022d283817beac8e9bcbbc926 100644 (file)
@@ -17,7 +17,7 @@ Example:
                reg = <1>;
                clocks = <&clk32m>;
                interrupt-parent = <&gpio4>;
-               interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+               interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
                vdd-supply = <&reg5v0>;
                xceiver-supply = <&reg5v0>;
        };
index cc4372842bf37670284d9b676699b5eb00882de9..9936b9ee67c36672afeb050a5641aa44cebbb728 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
 - compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
              "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
              "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
+             "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
              "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
              "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
              "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -14,26 +15,32 @@ Required properties:
              "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
              "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
              "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
+             "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
              "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
              "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
              compatible device.
-             "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
+             "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
+             compatible device.
              When compatible with the generic version, nodes must list the
              SoC-specific version corresponding to the platform first
              followed by the generic version.
 
 - reg: physical base address and size of the R-Car CAN register map.
 - interrupts: interrupt specifier for the sole interrupt.
-- clocks: phandles and clock specifiers for 3 CAN clock inputs.
-- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk".
+- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
+         devices.
+         phandles and clock specifiers for 3 CAN clock inputs for every other
+         SoC.
+- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
+              3 clock input name strings for every other SoC: "clkp1", "clkp2",
+              "can_clk".
 - pinctrl-0: pin control group to be used for this controller.
 - pinctrl-names: must be "default".
 
-Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796"
-compatible:
-In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock
-and can be used by both CAN and CAN FD controller at the same time. It needs to
-be scaled to maximum frequency if any of these controllers use it. This is done
+Required properties for R8A7795, R8A7796 and R8A77965:
+For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
+be used by both CAN and CAN FD controller at the same time. It needs to be
+scaled to maximum frequency if any of these controllers use it. This is done
 using the below properties:
 
 - assigned-clocks: phandle of clkp2(CANFD) clock.
@@ -42,8 +49,9 @@ using the below properties:
 Optional properties:
 - renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
                            <0x0> (default) : Peripheral clock (clkp1)
-                           <0x1> : Peripheral clock (clkp2)
-                           <0x3> : Externally input clock
+                           <0x1> : Peripheral clock (clkp2) (not supported by
+                                   RZ/G2 devices)
+                           <0x3> : External input clock
 
 Example
 -------
index f7ac8d0d3af14a1a6d951df74847972220711133..b65dc078abeb8ca4a1a9a045eddc2d1b8844c333 100644 (file)
@@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
 the desired operation. Both drivers and applications must set the remainder of
 the :c:type:`v4l2_format` structure to 0.
 
-.. _v4l2-meta-format:
+.. c:type:: v4l2_meta_format
 
 .. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
 
index 3ead350e099f97a6146ff98bf0126a8388149883..9ea494a8facab2cca0b51745cda1af6f0f53fd4c 100644 (file)
@@ -132,6 +132,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
       - ``sdr``
       - Definition of a data format, see :ref:`pixfmt`, used by SDR
        capture and output devices.
+    * -
+      - struct :c:type:`v4l2_meta_format`
+      - ``meta``
+      - Definition of a metadata format, see :ref:`meta-formats`, used by
+       metadata capture devices.
     * -
       - __u8
       - ``raw_data``\ [200]
index 605e00cdd6beb1d024519ab8417464fc12064724..89f1302d593a5c0404ed8a434ba580e8440a9139 100644 (file)
@@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
 
        u32 rxrpc_kernel_check_life(struct socket *sock,
                                    struct rxrpc_call *call);
+       void rxrpc_kernel_probe_life(struct socket *sock,
+                                    struct rxrpc_call *call);
 
-     This returns a number that is updated when ACKs are received from the peer
-     (notably including PING RESPONSE ACKs which we can elicit by sending PING
-     ACKs to see if the call still exists on the server).  The caller should
-     compare the numbers of two calls to see if the call is still alive after
-     waiting for a suitable interval.
+     The first function returns a number that is updated when ACKs are received
+     from the peer (notably including PING RESPONSE ACKs which we can elicit by
+     sending PING ACKs to see if the call still exists on the server).  The
+     caller should compare the numbers of two calls to see if the call is still
+     alive after waiting for a suitable interval.
 
      This allows the caller to work out if the server is still contactable and
      if the call is still alive on the server whilst waiting for the server to
      process a client operation.
 
-     This function may transmit a PING ACK.
+     The second function causes a ping ACK to be transmitted to try to provoke
+     the peer into responding, which would then cause the value returned by the
+     first function to change.  Note that this must be called in TASK_RUNNING
+     state.
 
  (*) Get reply timestamp.
 
index b755a89fa3256e557115f0bd3b8a05dcaa5a21cf..eac2bf1abec91225ff6d979f9d33bb4f5cea147d 100644 (file)
@@ -717,7 +717,7 @@ F:  include/linux/mfd/altera-a10sr.h
 F:     include/dt-bindings/reset/altr,rst-mgr-a10sr.h
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridger@opensource.altera.com>
+M:     Thor Thayer <thor.thayer@linux.intel.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -3276,6 +3276,12 @@ F:       include/uapi/linux/caif/
 F:     include/net/caif/
 F:     net/caif/
 
+CAKE QDISC
+M:     Toke Høiland-Jørgensen <toke@toke.dk>
+L:     cake@lists.bufferbloat.net (moderated for non-subscribers)
+S:     Maintained
+F:     net/sched/sch_cake.c
+
 CALGARY x86-64 IOMMU
 M:     Muli Ben-Yehuda <mulix@mulix.org>
 M:     Jon Mason <jdmason@kudzu.us>
@@ -6299,6 +6305,7 @@ F:        tools/testing/selftests/gpio/
 
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
+M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
 L:     linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
 S:     Maintained
@@ -7436,6 +7443,20 @@ S:       Maintained
 F:     Documentation/fb/intelfb.txt
 F:     drivers/video/fbdev/intelfb/
 
+INTEL GPIO DRIVERS
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L:     linux-gpio@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
+F:     drivers/gpio/gpio-ich.c
+F:     drivers/gpio/gpio-intel-mid.c
+F:     drivers/gpio/gpio-lynxpoint.c
+F:     drivers/gpio/gpio-merrifield.c
+F:     drivers/gpio/gpio-ml-ioh.c
+F:     drivers/gpio/gpio-pch.c
+F:     drivers/gpio/gpio-sch.c
+F:     drivers/gpio/gpio-sodaville.c
+
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:     Zhenyu Wang <zhenyuw@linux.intel.com>
 M:     Zhi Wang <zhi.a.wang@intel.com>
@@ -7446,12 +7467,6 @@ T:       git https://github.com/intel/gvt-linux.git
 S:     Supported
 F:     drivers/gpu/drm/i915/gvt/
 
-INTEL PMIC GPIO DRIVER
-R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-S:     Maintained
-F:     drivers/gpio/gpio-*cove.c
-F:     drivers/gpio/gpio-msic.c
-
 INTEL HID EVENT DRIVER
 M:     Alex Hung <alex.hung@canonical.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -7539,12 +7554,6 @@ W:       https://01.org/linux-acpi
 S:     Supported
 F:     drivers/platform/x86/intel_menlow.c
 
-INTEL MERRIFIELD GPIO DRIVER
-M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-L:     linux-gpio@vger.kernel.org
-S:     Maintained
-F:     drivers/gpio/gpio-merrifield.c
-
 INTEL MIC DRIVERS (mic)
 M:     Sudeep Dutt <sudeep.dutt@intel.com>
 M:     Ashutosh Dixit <ashutosh.dixit@intel.com>
@@ -7577,6 +7586,13 @@ F:       drivers/platform/x86/intel_punit_ipc.c
 F:     arch/x86/include/asm/intel_pmc_ipc.h
 F:     arch/x86/include/asm/intel_punit_ipc.h
 
+INTEL PMIC GPIO DRIVERS
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
+F:     drivers/gpio/gpio-*cove.c
+F:     drivers/gpio/gpio-msic.c
+
 INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
 R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:     Maintained
@@ -14079,6 +14095,7 @@ F:      Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
 
 STABLE BRANCH
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M:     Sasha Levin <sashal@kernel.org>
 L:     stable@vger.kernel.org
 S:     Supported
 F:     Documentation/process/stable-kernel-rules.rst
index 490b12af103c1285043ecfec912b1839c5586f06..c52d0efacd1466f0320a025d3519ffb8ba212a09 100644 (file)
@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_STAGING=y
 CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_USB=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_RAS=y
 CONFIG_EXT4_FS=y
index ea09ed6a80a9f2dc0aa625e2fcec0b34e2c6acea..8c6c48ed786a1527c22ba5b46bcdad70029e5865 100644 (file)
@@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
 
        /* call board setup routine */
        plat_mem_setup();
+       memblock_set_bottom_up(true);
 
        /*
         * Make sure all kernel memory is in the maps.  The "UP" and
index 0f852e1b589193d43f9125cfbfa303d6b47c6fed..15e103c6d799ebd90ad3f55a954c1d40329d5bac 100644 (file)
@@ -2260,10 +2260,8 @@ void __init trap_init(void)
                unsigned long size = 0x200 + VECTORSPACING*64;
                phys_addr_t ebase_pa;
 
-               memblock_set_bottom_up(true);
                ebase = (unsigned long)
                        memblock_alloc_from(size, 1 << fls(size), 0);
-               memblock_set_bottom_up(false);
 
                /*
                 * Try to ensure ebase resides in KSeg0 if possible.
@@ -2307,6 +2305,7 @@ void __init trap_init(void)
        if (board_ebase_setup)
                board_ebase_setup();
        per_cpu_trap_init(true);
+       memblock_set_bottom_up(false);
 
        /*
         * Copy the generic exception handlers to their final destination.
index 622761878cd11bd54e53affa43e5504c0aadc9c0..60bf0a1cb75719d731b60f8b2fe2954074dea77b 100644 (file)
@@ -231,6 +231,8 @@ static __init void prom_meminit(void)
                        cpumask_clear(&__node_data[(node)]->cpumask);
                }
        }
+       max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
        for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
                node = cpu / loongson_sysconf.cores_per_node;
                if (node >= num_online_nodes())
@@ -248,19 +250,9 @@ static __init void prom_meminit(void)
 
 void __init paging_init(void)
 {
-       unsigned node;
        unsigned long zones_size[MAX_NR_ZONES] = {0, };
 
        pagetable_init();
-
-       for_each_online_node(node) {
-               unsigned long  start_pfn, end_pfn;
-
-               get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
-               if (end_pfn > max_low_pfn)
-                       max_low_pfn = end_pfn;
-       }
 #ifdef CONFIG_ZONE_DMA32
        zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
 #endif
index d8b8444d679527e3843ea1de091be70926355f34..813d13f92957ed00715ac5914d62dc09bcba1861 100644 (file)
@@ -435,6 +435,7 @@ void __init prom_meminit(void)
 
        mlreset();
        szmem();
+       max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
 
        for (node = 0; node < MAX_COMPACT_NODES; node++) {
                if (node_online(node)) {
@@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
 void __init paging_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = {0, };
-       unsigned node;
 
        pagetable_init();
-
-       for_each_online_node(node) {
-               unsigned long start_pfn, end_pfn;
-
-               get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
-               if (end_pfn > max_low_pfn)
-                       max_low_pfn = end_pfn;
-       }
        zones_size[ZONE_NORMAL] = max_low_pfn;
        free_area_init_nodes(zones_size);
 }
index 4af153a182b071fcd4853a8853fb94fb1333312b..4b594f2e4f7ebd9eb791f6c2709f4023d3aa76c6 100644 (file)
@@ -71,6 +71,10 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
 # arch specific predefines for sparse
 CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
 
+# Default target when executing plain make
+boot           := arch/riscv/boot
+KBUILD_IMAGE   := $(boot)/Image.gz
+
 head-y := arch/riscv/kernel/head.o
 
 core-y += arch/riscv/kernel/ arch/riscv/mm/
@@ -81,4 +85,13 @@ PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
 
-all: vmlinux
+all: Image.gz
+
+Image: vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: Image
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+zinstall install:
+       $(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
new file mode 100644 (file)
index 0000000..8dab0bb
--- /dev/null
@@ -0,0 +1,2 @@
+Image
+Image.gz
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
new file mode 100644 (file)
index 0000000..0990a9f
--- /dev/null
@@ -0,0 +1,33 @@
+#
+# arch/riscv/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2018, Anup Patel.
+# Author: Anup Patel <anup@brainfault.org>
+#
+# Based on the ia64 and arm64 boot/Makefile.
+#
+
+OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+targets := Image
+
+$(obj)/Image: vmlinux FORCE
+       $(call if_changed,objcopy)
+
+$(obj)/Image.gz: $(obj)/Image FORCE
+       $(call if_changed,gzip)
+
+install:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(obj)/Image System.map "$(INSTALL_PATH)"
+
+zinstall:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
new file mode 100644 (file)
index 0000000..18c3915
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/sh
+#
+# arch/riscv/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for the RISC-V Linux port
+#
+# Arguments:
+#   $1 - kernel version
+#   $2 - kernel image file
+#   $3 - kernel map file
+#   $4 - default install path (blank if root directory)
+#
+
+verify () {
+       if [ ! -f "$1" ]; then
+               echo ""                                                   1>&2
+               echo " *** Missing file: $1"                              1>&2
+               echo ' *** You need to run "make" before "make install".' 1>&2
+               echo ""                                                   1>&2
+               exit 1
+       fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+if [ "$(basename $2)" = "Image.gz" ]; then
+# Compressed install
+  echo "Installing compressed kernel"
+  base=vmlinuz
+else
+# Normal install
+  echo "Installing normal kernel"
+  base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+  mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+  mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
index 349df33808c4231d155d2e6018e19cc4cb19cb4d..cd2af4b013e3826e3b43f44565a9b6c1c6ae7b70 100644 (file)
@@ -8,6 +8,7 @@
 
 #define MODULE_ARCH_VERMAGIC    "riscv"
 
+struct module;
 u64 module_emit_got_entry(struct module *mod, u64 val);
 u64 module_emit_plt_entry(struct module *mod, u64 val);
 
index 473cfc84e412f3827703caaadffa34a8983c978d..8c3e3e3c8be1204b67076985a9b54e80217707b1 100644 (file)
@@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
 static inline unsigned long
 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       return __asm_copy_to_user(to, from, n);
+       return __asm_copy_from_user(to, from, n);
 }
 
 static inline unsigned long
 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       return __asm_copy_from_user(to, from, n);
+       return __asm_copy_to_user(to, from, n);
 }
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
index eff7aa9aa1637851aadd871807644560bef5c412..fef96f117b4def3fe9a99ec9ca8b26d2278e087c 100644 (file)
 
 /*
  * There is explicitly no include guard here because this file is expected to
- * be included multiple times.  See uapi/asm/syscalls.h for more info.
+ * be included multiple times.
  */
 
-#define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SYS_CLONE
+
 #include <uapi/asm/unistd.h>
-#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
deleted file mode 100644 (file)
index 206dc4b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2017-2018 SiFive
- */
-
-/*
- * There is explicitly no include guard here because this file is expected to
- * be included multiple times in order to define the syscall macros via
- * __SYSCALL.
- */
-
-/*
- * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
- * having a direct 'fence.i' instruction available to userspace (which we
- * can't trap!), that's not actually viable when running on Linux because the
- * kernel might schedule a process on another hart.  There is no way for
- * userspace to handle this without invoking the kernel (as it doesn't know the
- * thread->hart mappings), so we've defined a RISC-V specific system call to
- * flush the instruction cache.
- *
- * __NR_riscv_flush_icache is defined to flush the instruction cache over an
- * address range, with the flush applying to either all threads or just the
- * caller.  We don't currently do anything with the address range, that's just
- * in there for forwards compatibility.
- */
-#ifndef __NR_riscv_flush_icache
-#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-#endif
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..1f3bd3e
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#endif /* __LP64__ */
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
index 3a5a2ee31547b2ca1f3ec0f0cf613ff3448e85e3..b4a7d4427fbb9430e02cdc1855729a9eba90c815 100644 (file)
@@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
 
 static void print_isa(struct seq_file *f, const char *orig_isa)
 {
-       static const char *ext = "mafdc";
+       static const char *ext = "mafdcsu";
        const char *isa = orig_isa;
        const char *e;
 
@@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
        /*
         * Check the rest of the ISA string for valid extensions, printing those
         * we find.  RISC-V ISA strings define an order, so we only print the
-        * extension bits when they're in order.
+        * extension bits when they're in order. Hide the supervisor (S)
+        * extension from userspace as it's not accessible from there.
         */
        for (e = ext; *e != '\0'; ++e) {
                if (isa[0] == e[0]) {
-                       seq_write(f, isa, 1);
+                       if (isa[0] != 's')
+                               seq_write(f, isa, 1);
+
                        isa++;
                }
        }
index 711190d473d41f47dd52f9fc4720df720ebf57be..fe884cd69abd8f0d7b3fe3a9e20b781937a32502 100644 (file)
@@ -44,6 +44,16 @@ ENTRY(_start)
        amoadd.w a3, a2, (a3)
        bnez a3, .Lsecondary_start
 
+       /* Clear BSS for flat non-ELF images */
+       la a3, __bss_start
+       la a4, __bss_stop
+       ble a4, a3, clear_bss_done
+clear_bss:
+       REG_S zero, (a3)
+       add a3, a3, RISCV_SZPTR
+       blt a3, a4, clear_bss
+clear_bss_done:
+
        /* Save hart ID and DTB physical address */
        mv s0, a0
        mv s1, a1
index ece84991609ca56d2d3549d1a2dde2139a943028..65df1dfdc30385be7a9a149034a0ba66a5bbbd8e 100644 (file)
@@ -74,7 +74,7 @@ SECTIONS
                *(.sbss*)
        }
 
-       BSS_SECTION(0, 0, 0)
+       BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
 
        EXCEPTION_TABLE(0x10)
        NOTES
index eaa60c94205a82f685190a5c0790d6ca91df69cb..1f32caa87686e2369b0f4841447d5ab5093b993d 100644 (file)
@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
        {"PNP0200",  0},        /* AT DMA Controller */
        {"ACPI0009", 0},        /* IOxAPIC */
        {"ACPI000A", 0},        /* IOAPIC */
+       {"SMB0001",  0},        /* ACPI SMBUS virtual device */
        {"", 0},
 };
 
index 3f0e2a14895a03dc0c4bc1d01c032d0f50a4b4ae..22b53bf268179ad8c2e873dd9637af61adf2f54d 100644 (file)
@@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
        {},
 };
 
+static const struct of_device_id *ti_cpufreq_match_node(void)
+{
+       struct device_node *np;
+       const struct of_device_id *match;
+
+       np = of_find_node_by_path("/");
+       match = of_match_node(ti_cpufreq_of_match, np);
+       of_node_put(np);
+
+       return match;
+}
+
 static int ti_cpufreq_probe(struct platform_device *pdev)
 {
        u32 version[VERSION_COUNT];
-       struct device_node *np;
        const struct of_device_id *match;
        struct opp_table *ti_opp_table;
        struct ti_cpufreq_data *opp_data;
        const char * const reg_names[] = {"vdd", "vbb"};
        int ret;
 
-       np = of_find_node_by_path("/");
-       match = of_match_node(ti_cpufreq_of_match, np);
-       of_node_put(np);
+       match = dev_get_platdata(&pdev->dev);
        if (!match)
                return -ENODEV;
 
@@ -290,7 +299,14 @@ fail_put_node:
 
 static int ti_cpufreq_init(void)
 {
-       platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+       const struct of_device_id *match;
+
+       /* Check to ensure we are on a compatible platform */
+       match = ti_cpufreq_match_node();
+       if (match)
+               platform_device_register_data(NULL, "ti-cpufreq", -1, match,
+                                             sizeof(*match));
+
        return 0;
 }
 module_init(ti_cpufreq_init);
index 5b44ef226904f9be2530b992c3d06ed338dca53c..fc359ca4503d127fda5f9f631f95d98c4a1d671a 100644 (file)
@@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
        exp_info.ops  = &udmabuf_ops;
        exp_info.size = ubuf->pagecount << PAGE_SHIFT;
        exp_info.priv = ubuf;
+       exp_info.flags = O_RDWR;
 
        buf = dma_buf_export(&exp_info);
        if (IS_ERR(buf)) {
index b01ba4438501a959de7796dc2eff67a6126d88e3..31e891f00175c635a9ee92c7e0f090eb135fc29b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
+#include <linux/sched.h>
 #include <linux/serdev.h>
 #include <linux/slab.h>
 
@@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
        int ret;
 
        /* write is only buffered synchronously */
-       ret = serdev_device_write(serdev, buf, count, 0);
+       ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
        if (ret < 0)
                return ret;
 
index 79cb98950013bbb60f4ff4126cd562453441045a..71d014edd16760d6c37dad72836ed9e13cbfffba 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
+#include <linux/sched.h>
 #include <linux/serdev.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
@@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
        int ret;
 
        /* write is only buffered synchronously */
-       ret = serdev_device_write(serdev, buf, count, 0);
+       ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
        if (ret < 0)
                return ret;
 
index 8269cffc2967f772ba2da14e566fd136bb68da56..6a50f9f59c901b6d38069a67b5b9f7881e8a43b6 100644 (file)
@@ -35,8 +35,8 @@
 #define gpio_mockup_err(...)   pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
 
 enum {
-       GPIO_MOCKUP_DIR_OUT = 0,
-       GPIO_MOCKUP_DIR_IN = 1,
+       GPIO_MOCKUP_DIR_IN = 0,
+       GPIO_MOCKUP_DIR_OUT = 1,
 };
 
 /*
@@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
 {
        struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
 
-       return chip->lines[offset].dir;
+       return !chip->lines[offset].dir;
 }
 
 static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
index bfe4c5c9f41cef3a9c3a483e5283e015d840bc91..e9600b556f397babf8c472ceb2b012f2de97b42a 100644 (file)
@@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 
        if (pxa_gpio_has_pinctrl()) {
                ret = pinctrl_gpio_direction_input(chip->base + offset);
-               if (!ret)
-                       return 0;
+               if (ret)
+                       return ret;
        }
 
        spin_lock_irqsave(&gpio_lock, flags);
index 230e41562462b27fdf5d11874b3de8c34c107707..a2cbb474901c224bebae335cd0789273a78955ae 100644 (file)
@@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
        if (!gdev->descs) {
                status = -ENOMEM;
-               goto err_free_gdev;
+               goto err_free_ida;
        }
 
        if (chip->ngpio == 0) {
@@ -1427,8 +1427,9 @@ err_free_label:
        kfree_const(gdev->label);
 err_free_descs:
        kfree(gdev->descs);
-err_free_gdev:
+err_free_ida:
        ida_simple_remove(&gpio_ida, gdev->id);
+err_free_gdev:
        /* failures here can mean systems won't boot... */
        pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
               gdev->base, gdev->base + gdev->ngpio - 1,
index c31a8849e9f87705ed3abac2ac23bfcbc7303d85..1580ec60b89f753ce2e12018b2a07eb936802d2e 100644 (file)
@@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
-       amdgpu_dpm_switch_power_profile(adev,
-                                       PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+       if (adev->powerplay.pp_funcs &&
+           adev->powerplay.pp_funcs->switch_power_profile)
+               amdgpu_dpm_switch_power_profile(adev,
+                                               PP_SMC_POWER_PROFILE_COMPUTE,
+                                               !idle);
 }
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
index 6748cd7fc129b0e7b83966da865f674c676c04e1..686a26de50f91e816471548bf3c1a0fc3f86db86 100644 (file)
@@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
                                         "dither",
                                         amdgpu_dither_enum_list, sz);
 
+       if (amdgpu_device_has_dc_support(adev)) {
+               adev->mode_info.max_bpc_property =
+                       drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
+               if (!adev->mode_info.max_bpc_property)
+                       return -ENOMEM;
+       }
+
        return 0;
 }
 
index b9e9e8b02fb756a0d7291c605353d59cd228826e..d1b4d9b6aae0d1743f77dc2373d0c9159d03f937 100644 (file)
@@ -339,6 +339,8 @@ struct amdgpu_mode_info {
        struct drm_property *audio_property;
        /* FMT dithering */
        struct drm_property *dither_property;
+       /* maximum number of bits per channel for monitor color */
+       struct drm_property *max_bpc_property;
        /* hardcoded DFP edid from BIOS */
        struct edid *bios_hardcoded_edid;
        int bios_hardcoded_edid_size;
index e1c2b4e9c7b23a10ac3b1b2b5375d2bf84eae9c2..73ad02aea2b2e802f0dbce340d70877bdc3b6ca1 100644 (file)
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
index bf5e6a413dee6e9b5de53f62ad7547ecb5e5b23e..4cc0dcb1a1875bfc559affd1f55e268a25e6282e 100644 (file)
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
 
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL   0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK   0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK  0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK            0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX  0
 /*
  * Indirect registers accessor
  */
@@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
 {
        uint32_t def, data;
 
-       def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+       if (adev->asic_type == CHIP_VEGA20) {
+               def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
 
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
-               data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-       else
-               data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+               if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+                       data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+               else
+                       data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
 
-       if (def != data)
-               WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+               if (def != data)
+                       WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+       } else {
+               def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+               if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+                       data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+               else
+                       data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+               if (def != data)
+                       WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+       }
 }
 
 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
index c1262f62cd9f21400c68a6040000d31e4a9a9716..ca925200fe09240ae4f96a2ef7f633726b4cf883 100644 (file)
@@ -2358,8 +2358,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector)
 {
+       struct dm_connector_state *dm_conn_state =
+               to_dm_connector_state(connector->state);
        uint32_t bpc = connector->display_info.bpc;
 
+       /* TODO: Remove this when there's support for max_bpc in drm */
+       if (dm_conn_state && bpc > dm_conn_state->max_bpc)
+               /* Round down to nearest even number. */
+               bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
+
        switch (bpc) {
        case 0:
                /*
@@ -2943,6 +2950,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
        } else if (property == adev->mode_info.underscan_property) {
                dm_new_state->underscan_enable = val;
                ret = 0;
+       } else if (property == adev->mode_info.max_bpc_property) {
+               dm_new_state->max_bpc = val;
+               ret = 0;
        }
 
        return ret;
@@ -2985,6 +2995,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
        } else if (property == adev->mode_info.underscan_property) {
                *val = dm_state->underscan_enable;
                ret = 0;
+       } else if (property == adev->mode_info.max_bpc_property) {
+               *val = dm_state->max_bpc;
+               ret = 0;
        }
        return ret;
 }
@@ -3795,6 +3808,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        drm_object_attach_property(&aconnector->base.base,
                                adev->mode_info.underscan_vborder_property,
                                0);
+       drm_object_attach_property(&aconnector->base.base,
+                               adev->mode_info.max_bpc_property,
+                               0);
 
 }
 
index 924a38a1fc446019a0aac035b018b952e3be9923..6e069d777ab22d0e733bb7bc1c62d3d3370cc265 100644 (file)
@@ -204,6 +204,7 @@ struct dm_connector_state {
        enum amdgpu_rmx_type scaling;
        uint8_t underscan_vborder;
        uint8_t underscan_hborder;
+       uint8_t max_bpc;
        bool underscan_enable;
        bool freesync_enable;
        bool freesync_capable;
index ed35ec0341e671ab8f5cee4d89d2417283147bf3..88f6b35ea6fee9cb7bdf40df0eeef3b3d71ee233 100644 (file)
@@ -4525,12 +4525,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
        struct smu7_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.sclk_table);
-       int value;
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
+                       [golden_sclk_table->count - 1].value;
 
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -4567,12 +4567,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
        struct smu7_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mclk_table);
-       int value;
+        int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
+                       [golden_mclk_table->count - 1].value;
 
-       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
index 8c4db86bb4b770b345575564ad335d48e2681835..e2bc6e0c229f96dde7baf50d2f2189efce4c2c2e 100644 (file)
@@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
        struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
        struct vega10_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.gfx_table);
-       int value;
-
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels
-                       [golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
                        [golden_sclk_table->count - 1].value;
 
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
+
        return value;
 }
 
@@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
        struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
        struct vega10_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mem_table);
-       int value;
-
-       value = (mclk_table->dpm_levels
-                       [mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels
-                       [golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels
+       int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
                        [golden_mclk_table->count - 1].value;
 
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
+
        return value;
 }
 
index 74bc37308dc09cb8303cd42146c7d88af3a27c58..54364444ecd121dd611c30148f4e292a8ee18e1a 100644 (file)
@@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
        struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
        struct vega12_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.gfx_table);
-       int value;
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
+                       [golden_sclk_table->count - 1].value;
 
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
        struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
        struct vega12_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mem_table);
-       int value;
-
-       value = (mclk_table->dpm_levels
-                       [mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels
-                       [golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels
+       int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
                        [golden_mclk_table->count - 1].value;
 
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
+
        return value;
 }
 
index 99861f32b1f95aedfb5c64d39814baca329662c2..b4eadd47f3a44a22b95ccd12effd99924a493e24 100644 (file)
@@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
        data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
 
-       data->registry_data.disallowed_features = 0x0;
+       /*
+        * Disable the following features for now:
+        *   GFXCLK DS
+        *   SOCLK DS
+        *   LCLK DS
+        *   DCEFCLK DS
+        *   FCLK DS
+        *   MP1CLK DS
+        *   MP0CLK DS
+        */
+       data->registry_data.disallowed_features = 0xE0041C00;
        data->registry_data.od_state_in_dc_support = 0;
        data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
@@ -1313,12 +1323,13 @@ static int vega20_get_sclk_od(
                        &(data->dpm_table.gfx_table);
        struct vega20_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.gfx_table);
-       int value;
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
+                       [golden_sclk_table->count - 1].value;
 
        /* od percentage */
-       value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
-               golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
-               golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -1358,12 +1369,13 @@ static int vega20_get_mclk_od(
                        &(data->dpm_table.mem_table);
        struct vega20_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mem_table);
-       int value;
+       int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
+                       [golden_mclk_table->count - 1].value;
 
        /* od percentage */
-       value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
-               golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
-               golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
index 69dab82a37714853b5dfdb74dbe479f7d6c10fb1..bf589c53b908d66789679df6f4098c883150fa87 100644 (file)
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
 
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
+static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+       bool primary = false;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+       primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
+       kfree(ap);
+}
+
 static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       ast_kick_out_firmware_fb(pdev);
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
index 5e77d456d9bb9434040107a69536815a270c7865..7c6ac3cadb6b7fabcfe9a309d5fc201db6b75bf7 100644 (file)
@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
        }
        ast_bo_unreserve(bo);
 
+       ast_set_offset_reg(crtc);
        ast_set_start_address_crt1(crtc, (u32)gpu_addr);
 
        return 0;
@@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
        ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
 
        /* dummy write to fire HWC */
-       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+       ast_show_cursor(crtc);
 
        return 0;
 }
index a502f3e519fdb668c685f392d518361e1e0a1ef2..dd852a25d37540fdf9ee6254f08a68b98df3e9a5 100644 (file)
@@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
        mutex_lock(&fb_helper->lock);
        drm_connector_list_iter_begin(dev, &conn_iter);
        drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+                       continue;
+
                ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
                if (ret)
                        goto fail;
index 1aaccbe7e1debd0c11440ac9acae9c15b07880d5..d4fac09095f862aed3131243957059de2df4f6b0 100644 (file)
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
                else if (gen >= 4)
                        len = 4;
                else
-                       len = 3;
+                       len = 6;
 
                batch = reloc_gpu(eb, vma, len);
                if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
                        *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *batch++ = addr;
                        *batch++ = target_offset;
+
+                       /* And again for good measure (blb/pnv) */
+                       *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+                       *batch++ = addr;
+                       *batch++ = target_offset;
                }
 
                goto out;
index 47c302543799007bee421d85039be8b016aed6e7..07999fe09ad231a037bc73379b5fc27aaf484b94 100644 (file)
@@ -3413,6 +3413,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
                ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
                if (ggtt->vm.clear_range != nop_clear_range)
                        ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+
+               /* Prevent recursively calling stop_machine() and deadlocks. */
+               dev_info(dev_priv->drm.dev,
+                        "Disabling error capture for VT-d workaround\n");
+               i915_disable_error_state(dev_priv, -ENODEV);
        }
 
        ggtt->invalidate = gen6_ggtt_invalidate;
index 8762d17b66591e2afc8fc9647bef2784145f2c19..3eb33e000d6f00f3ae4b3fdbc8f37f38e97b4d83 100644 (file)
@@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                return 0;
        }
 
+       if (IS_ERR(error))
+               return PTR_ERR(error);
+
        if (*error->error_msg)
                err_printf(m, "%s\n", error->error_msg);
        err_printf(m, "Kernel: " UTS_RELEASE "\n");
@@ -1859,6 +1862,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
        error = i915_capture_gpu_state(i915);
        if (!error) {
                DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               i915_disable_error_state(i915, -ENOMEM);
                return;
        }
 
@@ -1914,5 +1918,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
        i915->gpu_error.first_error = NULL;
        spin_unlock_irq(&i915->gpu_error.lock);
 
-       i915_gpu_state_put(error);
+       if (!IS_ERR(error))
+               i915_gpu_state_put(error);
+}
+
+void i915_disable_error_state(struct drm_i915_private *i915, int err)
+{
+       spin_lock_irq(&i915->gpu_error.lock);
+       if (!i915->gpu_error.first_error)
+               i915->gpu_error.first_error = ERR_PTR(err);
+       spin_unlock_irq(&i915->gpu_error.lock);
 }
index 8710fb18ed746cface7e9a7b2d6d6ac7cd06b2b4..3ec89a504de52331ade6a9452a844527d84ec515 100644 (file)
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
 
 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
 void i915_reset_error_state(struct drm_i915_private *i915);
+void i915_disable_error_state(struct drm_i915_private *i915, int err);
 
 #else
 
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
 static inline struct i915_gpu_state *
 i915_first_error_state(struct drm_i915_private *i915)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline void i915_reset_error_state(struct drm_i915_private *i915)
 {
 }
 
+static inline void i915_disable_error_state(struct drm_i915_private *i915,
+                                           int err)
+{
+}
+
 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
 
 #endif /* _I915_GPU_ERROR_H_ */
index a54843fdeb2f04a353c30af6e86e131c203b2868..c9878dd1f7cd04dd3a6a7b8877d8f5d95eafc620 100644 (file)
@@ -2890,6 +2890,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       intel_state->base.rotation = plane_config->rotation;
        intel_fill_fb_ggtt_view(&intel_state->view, fb,
                                intel_state->base.rotation);
        intel_state->color_plane[0].stride =
@@ -7882,8 +7883,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                        plane_config->tiling = I915_TILING_X;
                        fb->modifier = I915_FORMAT_MOD_X_TILED;
                }
+
+               if (val & DISPPLANE_ROTATE_180)
+                       plane_config->rotation = DRM_MODE_ROTATE_180;
        }
 
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+           val & DISPPLANE_MIRROR)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
        fourcc = i9xx_format_to_fourcc(pixel_format);
        fb->format = drm_format_info(fourcc);
@@ -8952,6 +8960,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                goto error;
        }
 
+       /*
+        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+        * while i915 HW rotation is clockwise, thats why this swapping.
+        */
+       switch (val & PLANE_CTL_ROTATE_MASK) {
+       case PLANE_CTL_ROTATE_0:
+               plane_config->rotation = DRM_MODE_ROTATE_0;
+               break;
+       case PLANE_CTL_ROTATE_90:
+               plane_config->rotation = DRM_MODE_ROTATE_270;
+               break;
+       case PLANE_CTL_ROTATE_180:
+               plane_config->rotation = DRM_MODE_ROTATE_180;
+               break;
+       case PLANE_CTL_ROTATE_270:
+               plane_config->rotation = DRM_MODE_ROTATE_90;
+               break;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           val & PLANE_CTL_FLIP_HORIZONTAL)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
        base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
        plane_config->base = base;
 
@@ -15267,6 +15298,14 @@ retry:
                        ret = drm_atomic_add_affected_planes(state, crtc);
                        if (ret)
                                goto out;
+
+                       /*
+                        * FIXME hack to force a LUT update to avoid the
+                        * plane update forcing the pipe gamma on without
+                        * having a proper LUT loaded. Remove once we
+                        * have readout for pipe gamma enable.
+                        */
+                       crtc_state->color_mgmt_changed = true;
                }
        }
 
index 8b298e5f012dac121385df61fa72d6a653a21354..db6fa1d0cbdae3efea1e3ddb1c3ce43bf5f8a10e 100644 (file)
@@ -547,6 +547,7 @@ struct intel_initial_plane_config {
        unsigned int tiling;
        int size;
        u32 base;
+       u8 rotation;
 };
 
 #define SKL_MIN_SRC_W 8
index 245f0022bcfd00c730f020b24326bfba90de4ea2..3fe358db12768f23a2fcf7fe971821c4cac3f1b9 100644 (file)
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 {
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+       /*
+        * On some SNB machines (Thinkpad X220 Tablet at least)
+        * LP3 usage can cause vblank interrupts to be lost.
+        * The DEIIR bit will go high but it looks like the CPU
+        * never gets interrupted.
+        *
+        * It's not clear whether other interrupt source could
+        * be affected or if this is somehow limited to vblank
+        * interrupts only. To play it safe we disable LP3
+        * watermarks entirely.
+        */
+       if (dev_priv->wm.pri_latency[3] == 0 &&
+           dev_priv->wm.spr_latency[3] == 0 &&
+           dev_priv->wm.cur_latency[3] == 0)
+               return;
+
+       dev_priv->wm.pri_latency[3] = 0;
+       dev_priv->wm.spr_latency[3] = 0;
+       dev_priv->wm.cur_latency[3] = 0;
+
+       DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+       intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
        intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN6(dev_priv)) {
                snb_wm_latency_quirk(dev_priv);
+               snb_wm_lp3_irq_quirk(dev_priv);
+       }
 }
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
index 127468785f7484495df89a00989c17388548f9c5..1f94b9affe4bbafddede75306c3109b1368bc172 100644 (file)
@@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
                return 0;
        }
 
+       /* We know for sure we don't want an async update here. Set
+        * state->legacy_cursor_update to false to prevent
+        * drm_atomic_helper_setup_commit() from auto-completing
+        * commit->flip_done.
+        */
+       state->legacy_cursor_update = false;
        ret = drm_atomic_helper_setup_commit(state, nonblock);
        if (ret)
                return ret;
index 9dc3fcbd290bef915cea9d12472b79b3dd36db4a..c6635f23918a8c1ec07531fc6aeffede5e6e528f 100644 (file)
@@ -807,7 +807,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
 static void vc4_plane_atomic_async_update(struct drm_plane *plane,
                                          struct drm_plane_state *state)
 {
-       struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+       struct vc4_plane_state *vc4_state, *new_vc4_state;
 
        if (plane->state->fb != state->fb) {
                vc4_plane_async_set_fb(plane, state->fb);
@@ -828,7 +828,18 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_y = state->src_y;
 
        /* Update the display list based on the new crtc_x/y. */
-       vc4_plane_atomic_check(plane, plane->state);
+       vc4_plane_atomic_check(plane, state);
+
+       new_vc4_state = to_vc4_plane_state(state);
+       vc4_state = to_vc4_plane_state(plane->state);
+
+       /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
+       vc4_state->dlist[vc4_state->pos0_offset] =
+               new_vc4_state->dlist[vc4_state->pos0_offset];
+       vc4_state->dlist[vc4_state->pos2_offset] =
+               new_vc4_state->dlist[vc4_state->pos2_offset];
+       vc4_state->dlist[vc4_state->ptr0_offset] =
+               new_vc4_state->dlist[vc4_state->ptr0_offset];
 
        /* Note that we can't just call vc4_plane_write_dlist()
         * because that would smash the context data that the HVS is
index a7513a8a8e3728d0de762d62f7337f527df34544..d6106e1a0d4af597d04cb833215c3b27c0b2aa0d 100644 (file)
@@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
 
                out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
 
+               /* fallthrough */
+
+       case KVP_OP_GET_IP_INFO:
                utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
                                MAX_ADAPTER_ID_SIZE,
                                UTF16_LITTLE_ENDIAN,
@@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy)
                process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
                break;
        case KVP_OP_GET_IP_INFO:
-               /* We only need to pass on message->kvp_hdr.operation.  */
+               /*
+                * We only need to pass on the info of operation, adapter_id
+                * and addr_family to the userland kvp daemon.
+                */
+               process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
                break;
        case KVP_OP_SET:
                switch (in_msg->body.kvp_set.data.value_type) {
@@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy)
 
                }
 
-               break;
-
-       case KVP_OP_GET:
+               /*
+                * The key is always a string - utf16 encoding.
+                */
                message->body.kvp_set.data.key_size =
                        utf16s_to_utf8s(
                        (wchar_t *)in_msg->body.kvp_set.data.key,
@@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy)
                        UTF16_LITTLE_ENDIAN,
                        message->body.kvp_set.data.key,
                        HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+
+               break;
+
+       case KVP_OP_GET:
+               message->body.kvp_get.data.key_size =
+                       utf16s_to_utf8s(
+                       (wchar_t *)in_msg->body.kvp_get.data.key,
+                       in_msg->body.kvp_get.data.key_size,
+                       UTF16_LITTLE_ENDIAN,
+                       message->body.kvp_get.data.key,
+                       HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
                break;
 
        case KVP_OP_DELETE:
index bb2cd29e165885d1697e4d77614a42333c5457dc..d8f7000a466aa4d2d3d79874428738fec70a4324 100644 (file)
@@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
        entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
        memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
                    &entry, sizeof(entry));
-       entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
+       entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+                (BIT_ULL(52)-1)) & ~7ULL;
        memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
                    &entry, sizeof(entry));
        writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
index f3ccf025108b4c377f2f9bc6de80e8222b817a01..41a4b8808802b8bcc30106c37b748eb3507943c0 100644 (file)
@@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu,
                        }
 
                        if (old_ce)
-                               iounmap(old_ce);
+                               memunmap(old_ce);
 
                        ret = 0;
                        if (devfn < 0x80)
index db301efe126d4ac9cf2aeb606489565135db7e7d..88715090752670a5701cde52f8a4ce11baec1da4 100644 (file)
@@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        pr_err("%s: Page request without PASID: %08llx %08llx\n",
                               iommu->name, ((unsigned long long *)req)[0],
                               ((unsigned long long *)req)[1]);
-                       goto bad_req;
+                       goto no_pasid;
                }
 
                if (!svm || svm->pasid != req->pasid) {
index b98a031895803a16b8e2d9d0df0a4b1686789aef..ddf3a492e1d59c1fd867f12881e6c3391038cf0f 100644 (file)
@@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 
 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
 {
+       if (!domain->mmu)
+               return;
+
        /*
         * Disable the context. Flush the TLB as required when modifying the
         * context registers.
index 31d1f4ab915ea7e07f8843d1feb99217409a1af4..65a933a21e685b9c97fadc7662e34ad31c088798 100644 (file)
@@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
        }
 
        if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
-               dprintk(1, "%s: transmit queue full\n", __func__);
+               dprintk(2, "%s: transmit queue full\n", __func__);
                return -EBUSY;
        }
 
@@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
 {
        struct cec_log_addrs *las = &adap->log_addrs;
        struct cec_msg msg = { };
+       const unsigned int max_retries = 2;
+       unsigned int i;
        int err;
 
        if (cec_has_log_addr(adap, log_addr))
@@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
        /* Send poll message */
        msg.len = 1;
        msg.msg[0] = (log_addr << 4) | log_addr;
-       err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
-       /*
-        * While trying to poll the physical address was reset
-        * and the adapter was unconfigured, so bail out.
-        */
-       if (!adap->is_configuring)
-               return -EINTR;
+       for (i = 0; i < max_retries; i++) {
+               err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
-       if (err)
-               return err;
+               /*
+                * While trying to poll the physical address was reset
+                * and the adapter was unconfigured, so bail out.
+                */
+               if (!adap->is_configuring)
+                       return -EINTR;
+
+               if (err)
+                       return err;
 
-       if (msg.tx_status & CEC_TX_STATUS_OK)
+               /*
+                * The message was aborted due to a disconnect or
+                * unconfigure, just bail out.
+                */
+               if (msg.tx_status & CEC_TX_STATUS_ABORTED)
+                       return -EINTR;
+               if (msg.tx_status & CEC_TX_STATUS_OK)
+                       return 0;
+               if (msg.tx_status & CEC_TX_STATUS_NACK)
+                       break;
+               /*
+                * Retry up to max_retries times if the message was neither
+                * OKed or NACKed. This can happen due to e.g. a Lost
+                * Arbitration condition.
+                */
+       }
+
+       /*
+        * If we are unable to get an OK or a NACK after max_retries attempts
+        * (and note that each attempt already consists of four polls), then
+        * then we assume that something is really weird and that it is not a
+        * good idea to try and claim this logical address.
+        */
+       if (i == max_retries)
                return 0;
 
        /*
index ca5d92942820a2fcb439ec1e7d277dd445214276..41d470d9ca943ea84653f9dd8bfb0ab83dafe1a4 100644 (file)
@@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state)
        ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint);
        if (ret) {
                dev_err(dev, "failed to parse endpoint\n");
-               ret = ret;
                goto put_node;
        }
 
index 452eb9b42140bb927e7af8287dcbe99b45ef51dd..447baaebca4486c4b5c3b8c5dc261ec3e4cd2ce8 100644 (file)
@@ -1844,14 +1844,12 @@ fail_mutex_destroy:
 static void cio2_pci_remove(struct pci_dev *pci_dev)
 {
        struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
-       unsigned int i;
 
+       media_device_unregister(&cio2->media_dev);
        cio2_notifier_exit(cio2);
+       cio2_queues_exit(cio2);
        cio2_fbpt_exit_dummy(cio2);
-       for (i = 0; i < CIO2_QUEUES; i++)
-               cio2_queue_exit(cio2, &cio2->queue[i]);
        v4l2_device_unregister(&cio2->v4l2_dev);
-       media_device_unregister(&cio2->media_dev);
        media_device_cleanup(&cio2->media_dev);
        mutex_destroy(&cio2->lock);
 }
index 77fb7987b42f33cda57dc8b6627d4befc2d7a83e..13f2828d880df373ff494d10416ecb16be873bf7 100644 (file)
@@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
 
 static void isp_unregister_entities(struct isp_device *isp)
 {
+       media_device_unregister(&isp->media_dev);
+
        omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
        omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
        omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
@@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
        omap3isp_stat_unregister_entities(&isp->isp_hist);
 
        v4l2_device_unregister(&isp->v4l2_dev);
-       media_device_unregister(&isp->media_dev);
        media_device_cleanup(&isp->media_dev);
 }
 
index 1eb9132bfc85fdad1ab68d81442fb435be0ac0f0..b292cff26c8663636a3cc959688032c8d79e6b3d 100644 (file)
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
 #define MAX_WIDTH              4096U
 #define MIN_WIDTH              640U
 #define MAX_HEIGHT             2160U
-#define MIN_HEIGHT             480U
+#define MIN_HEIGHT             360U
 
 #define dprintk(dev, fmt, arg...) \
        v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
index af150a0395dfb55ef55bf606df4825ec5b807912..d82db738f174ef048d663d366b4950cd732c2b43 100644 (file)
@@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
 
 static const struct media_device_ops m2m_media_ops = {
        .req_validate = vb2_request_validate,
-       .req_queue = vb2_m2m_request_queue,
+       .req_queue = v4l2_m2m_request_queue,
 };
 
 static int vim2m_probe(struct platform_device *pdev)
index 6e37950292cd9b832d589e1728fba7d28d469794..5f2b033a7a42f1cb35dd13e40a498232bd3a225d 100644 (file)
@@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
                    p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
                        return -EINVAL;
 
+               if (p_mpeg2_slice_params->pad ||
+                   p_mpeg2_slice_params->picture.pad ||
+                   p_mpeg2_slice_params->sequence.pad)
+                       return -EINVAL;
+
                return 0;
 
        case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
index a3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56..481e3c65cf97a63202e1223b106c59b5e670651b 100644 (file)
@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
 }
 EXPORT_SYMBOL_GPL(v4l2_event_pending);
 
+static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
+{
+       struct v4l2_fh *fh = sev->fh;
+       unsigned int i;
+
+       lockdep_assert_held(&fh->subscribe_lock);
+       assert_spin_locked(&fh->vdev->fh_lock);
+
+       /* Remove any pending events for this subscription */
+       for (i = 0; i < sev->in_use; i++) {
+               list_del(&sev->events[sev_pos(sev, i)].list);
+               fh->navailable--;
+       }
+       list_del(&sev->list);
+}
+
 int v4l2_event_subscribe(struct v4l2_fh *fh,
                         const struct v4l2_event_subscription *sub, unsigned elems,
                         const struct v4l2_subscribed_event_ops *ops)
@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+       if (!found_ev)
+               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
                /* Already listening */
                kvfree(sev);
-               goto out_unlock;
-       }
-
-       if (sev->ops && sev->ops->add) {
+       } else if (sev->ops && sev->ops->add) {
                ret = sev->ops->add(sev, elems);
                if (ret) {
+                       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+                       __v4l2_event_unsubscribe(sev);
+                       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
                        kvfree(sev);
-                       goto out_unlock;
                }
        }
 
-       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
-       list_add(&sev->list, &fh->subscribed);
-       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
-
-out_unlock:
        mutex_unlock(&fh->subscribe_lock);
 
        return ret;
@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 {
        struct v4l2_subscribed_event *sev;
        unsigned long flags;
-       int i;
 
        if (sub->type == V4L2_EVENT_ALL) {
                v4l2_event_unsubscribe_all(fh);
@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (sev != NULL) {
-               /* Remove any pending events for this subscription */
-               for (i = 0; i < sev->in_use; i++) {
-                       list_del(&sev->events[sev_pos(sev, i)].list);
-                       fh->navailable--;
-               }
-               list_del(&sev->list);
-       }
+       if (sev != NULL)
+               __v4l2_event_unsubscribe(sev);
 
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
index d7806db222d83b87f39175fb53c06178d98b363e..1ed2465972acab4d0fae6aa4a8d28c966167d224 100644 (file)
@@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
 
-void vb2_m2m_request_queue(struct media_request *req)
+void v4l2_m2m_request_queue(struct media_request *req)
 {
        struct media_request_object *obj, *obj_safe;
        struct v4l2_m2m_ctx *m2m_ctx = NULL;
@@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req)
        if (m2m_ctx)
                v4l2_m2m_try_schedule(m2m_ctx);
 }
-EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
+EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
 
 /* Videobuf2 ioctl helpers */
 
index b2a0340f277e268739c288e6bb73f62321475c30..d8e3cc2dc7470d8deaa3b89a5855b22b034725db 100644 (file)
@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
 #endif
 
-static inline const struct atmel_ssc_platform_data * __init
+static inline const struct atmel_ssc_platform_data *
        atmel_ssc_get_driver_data(struct platform_device *pdev)
 {
        if (pdev->dev.of_node) {
index 313da31502626897a61a65606aa117d0d67dc83b..1540a7785e14743ae1b035aeb21d391af8516050 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/delay.h>
 #include <linux/bitops.h>
 #include <asm/uv/uv_hub.h>
+
+#include <linux/nospec.h>
+
 #include "gru.h"
 #include "grutables.h"
 #include "gruhandles.h"
@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
        /* Currently, only dump by gid is implemented */
        if (req.gid >= gru_max_gids)
                return -EINVAL;
+       req.gid = array_index_nospec(req.gid, gru_max_gids);
 
        gru = GID_TO_GRU(req.gid);
        ubuf = req.buf;
index 7bfd366d970dae374bae6acd36624eb23f719506..c4115bae5db187f1a331efd5c495b20bc31a5c86 100644 (file)
@@ -12,6 +12,7 @@
  *     - JMicron (hardware and technical support)
  */
 
+#include <linux/bitfield.h>
 #include <linux/string.h>
 #include <linux/delay.h>
 #include <linux/highmem.h>
@@ -462,6 +463,9 @@ struct intel_host {
        u32     dsm_fns;
        int     drv_strength;
        bool    d3_retune;
+       bool    rpm_retune_ok;
+       u32     glk_rx_ctrl1;
+       u32     glk_tun_val;
 };
 
 static const guid_t intel_dsm_guid =
@@ -791,6 +795,77 @@ cleanup:
        return ret;
 }
 
+#ifdef CONFIG_PM
+#define GLK_RX_CTRL1   0x834
+#define GLK_TUN_VAL    0x840
+#define GLK_PATH_PLL   GENMASK(13, 8)
+#define GLK_DLY                GENMASK(6, 0)
+/* Workaround firmware failing to restore the tuning value */
+static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
+{
+       struct sdhci_pci_slot *slot = chip->slots[0];
+       struct intel_host *intel_host = sdhci_pci_priv(slot);
+       struct sdhci_host *host = slot->host;
+       u32 glk_rx_ctrl1;
+       u32 glk_tun_val;
+       u32 dly;
+
+       if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
+               return;
+
+       glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
+       glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
+
+       if (susp) {
+               intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
+               intel_host->glk_tun_val = glk_tun_val;
+               return;
+       }
+
+       if (!intel_host->glk_tun_val)
+               return;
+
+       if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
+               intel_host->rpm_retune_ok = true;
+               return;
+       }
+
+       dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
+                                 (intel_host->glk_tun_val << 1));
+       if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
+               return;
+
+       glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
+       sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
+
+       intel_host->rpm_retune_ok = true;
+       chip->rpm_retune = true;
+       mmc_retune_needed(host->mmc);
+       pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
+}
+
+static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
+{
+       if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+           !chip->rpm_retune)
+               glk_rpm_retune_wa(chip, susp);
+}
+
+static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+       glk_rpm_retune_chk(chip, true);
+
+       return sdhci_cqhci_runtime_suspend(chip);
+}
+
+static int glk_runtime_resume(struct sdhci_pci_chip *chip)
+{
+       glk_rpm_retune_chk(chip, false);
+
+       return sdhci_cqhci_runtime_resume(chip);
+}
+#endif
+
 #ifdef CONFIG_ACPI
 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
 {
@@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
        .resume                 = sdhci_cqhci_resume,
 #endif
 #ifdef CONFIG_PM
-       .runtime_suspend        = sdhci_cqhci_runtime_suspend,
-       .runtime_resume         = sdhci_cqhci_runtime_resume,
+       .runtime_suspend        = glk_runtime_suspend,
+       .runtime_resume         = glk_runtime_resume,
 #endif
        .quirks                 = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
        .quirks2                = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
                device_init_wakeup(&pdev->dev, true);
 
        if (slot->cd_idx >= 0) {
-               ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
+               ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
                                           slot->cd_override_level, 0, NULL);
+               if (ret && ret != -EPROBE_DEFER)
+                       ret = mmc_gpiod_request_cd(host->mmc, NULL,
+                                                  slot->cd_idx,
+                                                  slot->cd_override_level,
+                                                  0, NULL);
                if (ret == -EPROBE_DEFER)
                        goto remove;
 
index fb33f6be7c4ff7306f391931e1843f6712edbdb7..ad720494e8f78dfd74995ba067a8ea973f555980 100644 (file)
@@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
        int ret;
 
        nand_np = dev->of_node;
-       nfc_np = of_find_compatible_node(dev->of_node, NULL,
-                                        "atmel,sama5d3-nfc");
+       nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
        if (!nfc_np) {
                dev_err(dev, "Could not find device node for sama5d3-nfc\n");
                return -ENODEV;
@@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
        }
 
        if (caps->legacy_of_bindings) {
+               struct device_node *nfc_node;
                u32 ale_offs = 21;
 
                /*
                 * If we are parsing legacy DT props and the DT contains a
                 * valid NFC node, forward the request to the sama5 logic.
                 */
-               if (of_find_compatible_node(pdev->dev.of_node, NULL,
-                                           "atmel,sama5d3-nfc"))
+               nfc_node = of_get_compatible_child(pdev->dev.of_node,
+                                                  "atmel,sama5d3-nfc");
+               if (nfc_node) {
                        caps = &atmel_sama5_nand_caps;
+                       of_node_put(nfc_node);
+               }
 
                /*
                 * Even if the compatible says we are dealing with an
index ef75dfa62a4f816f49d6008dcc3bbd7561293530..699d3cf49c6da04b49180cda81d3b5d05fc66563 100644 (file)
 #define        NAND_VERSION_MINOR_SHIFT        16
 
 /* NAND OP_CMDs */
-#define        PAGE_READ                       0x2
-#define        PAGE_READ_WITH_ECC              0x3
-#define        PAGE_READ_WITH_ECC_SPARE        0x4
-#define        PROGRAM_PAGE                    0x6
-#define        PAGE_PROGRAM_WITH_ECC           0x7
-#define        PROGRAM_PAGE_SPARE              0x9
-#define        BLOCK_ERASE                     0xa
-#define        FETCH_ID                        0xb
-#define        RESET_DEVICE                    0xd
+#define        OP_PAGE_READ                    0x2
+#define        OP_PAGE_READ_WITH_ECC           0x3
+#define        OP_PAGE_READ_WITH_ECC_SPARE     0x4
+#define        OP_PROGRAM_PAGE                 0x6
+#define        OP_PAGE_PROGRAM_WITH_ECC        0x7
+#define        OP_PROGRAM_PAGE_SPARE           0x9
+#define        OP_BLOCK_ERASE                  0xa
+#define        OP_FETCH_ID                     0xb
+#define        OP_RESET_DEVICE                 0xd
 
 /* Default Value for NAND_DEV_CMD_VLD */
 #define NAND_DEV_CMD_VLD_VAL           (READ_START_VLD | WRITE_START_VLD | \
@@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
 
        if (read) {
                if (host->use_ecc)
-                       cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+                       cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
                else
-                       cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
+                       cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
        } else {
-                       cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+               cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
        }
 
        if (host->use_ecc) {
@@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
         * in use. we configure the controller to perform a raw read of 512
         * bytes to read onfi params
         */
-       nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
+       nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
        nandc_set_reg(nandc, NAND_ADDR0, 0);
        nandc_set_reg(nandc, NAND_ADDR1, 0);
        nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 
        nandc_set_reg(nandc, NAND_FLASH_CMD,
-                     BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+                     OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
        nandc_set_reg(nandc, NAND_ADDR0, page_addr);
        nandc_set_reg(nandc, NAND_ADDR1, 0);
        nandc_set_reg(nandc, NAND_DEV0_CFG0,
@@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
        if (column == -1)
                return 0;
 
-       nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+       nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
        nandc_set_reg(nandc, NAND_ADDR0, column);
        nandc_set_reg(nandc, NAND_ADDR1, 0);
        nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
        struct nand_chip *chip = &host->chip;
        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 
-       nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+       nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
 
        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
index d846428ef038e6b76f84591f73d40e9a4d30db33..04cedd3a2bf6634c5d1f05ef3d27ba302d12935f 100644 (file)
@@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
                ndelay(cqspi->wr_delay);
 
        while (remaining > 0) {
+               size_t write_words, mod_bytes;
+
                write_bytes = remaining > page_size ? page_size : remaining;
-               iowrite32_rep(cqspi->ahb_base, txbuf,
-                             DIV_ROUND_UP(write_bytes, 4));
+               write_words = write_bytes / 4;
+               mod_bytes = write_bytes % 4;
+               /* Write 4 bytes at a time then single bytes. */
+               if (write_words) {
+                       iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
+                       txbuf += (write_words * 4);
+               }
+               if (mod_bytes) {
+                       unsigned int temp = 0xFFFFFFFF;
+
+                       memcpy(&temp, txbuf, mod_bytes);
+                       iowrite32(temp, cqspi->ahb_base);
+                       txbuf += mod_bytes;
+               }
 
                if (!wait_for_completion_timeout(&cqspi->transfer_complete,
                                        msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
                        goto failwr;
                }
 
-               txbuf += write_bytes;
                remaining -= write_bytes;
 
                if (remaining > 0)
index 3e54e31889c7b53bbba362a5bd497279dc91b128..93c9bc8931fcd9137c99f95c2f34d31e521eb557 100644 (file)
@@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
  * @nor:       pointer to a 'struct spi_nor'
  * @addr:      offset in the serial flash memory
  * @len:       number of bytes to read
- * @buf:       buffer where the data is copied into
+ * @buf:       buffer where the data is copied into (dma-safe memory)
  *
  * Return: 0 on success, -errno otherwise.
  */
@@ -2521,6 +2521,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
        return left->size - right->size;
 }
 
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map:       the erase map of the SPI NOR
+ * @erase_mask:        the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+       struct spi_nor_erase_type *erase_type = map->erase_type;
+       int i;
+       u8 sorted_erase_mask = 0;
+
+       if (!erase_mask)
+               return 0;
+
+       /* Replicate the sort done for the map's erase types. */
+       for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+               if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+                       sorted_erase_mask |= BIT(i);
+
+       return sorted_erase_mask;
+}
+
 /**
  * spi_nor_regions_sort_erase_types() - sort erase types in each region
  * @map:       the erase map of the SPI NOR
@@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
 {
        struct spi_nor_erase_region *region = map->regions;
-       struct spi_nor_erase_type *erase_type = map->erase_type;
-       int i;
        u8 region_erase_mask, sorted_erase_mask;
 
        while (region) {
                region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
 
-               /* Replicate the sort done for the map's erase types. */
-               sorted_erase_mask = 0;
-               for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
-                       if (erase_type[i].size &&
-                           region_erase_mask & BIT(erase_type[i].idx))
-                               sorted_erase_mask |= BIT(i);
+               sorted_erase_mask = spi_nor_sort_erase_mask(map,
+                                                           region_erase_mask);
 
                /* Overwrite erase mask. */
                region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
  * spi_nor_get_map_in_use() - get the configuration map in use
  * @nor:       pointer to a 'struct spi_nor'
  * @smpt:      pointer to the sector map parameter table
+ * @smpt_len:  sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
  */
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt)
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+                                        u8 smpt_len)
 {
-       const u32 *ret = NULL;
-       u32 i, addr;
+       const u32 *ret;
+       u8 *buf;
+       u32 addr;
        int err;
+       u8 i;
        u8 addr_width, read_opcode, read_dummy;
-       u8 read_data_mask, data_byte, map_id;
+       u8 read_data_mask, map_id;
+
+       /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+       buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+       if (!buf)
+               return ERR_PTR(-ENOMEM);
 
        addr_width = nor->addr_width;
        read_dummy = nor->read_dummy;
        read_opcode = nor->read_opcode;
 
        map_id = 0;
-       i = 0;
        /* Determine if there are any optional Detection Command Descriptors */
-       while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) {
+       for (i = 0; i < smpt_len; i += 2) {
+               if (smpt[i] & SMPT_DESC_TYPE_MAP)
+                       break;
+
                read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
                nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
                nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
                nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
                addr = smpt[i + 1];
 
-               err = spi_nor_read_raw(nor, addr, 1, &data_byte);
-               if (err)
+               err = spi_nor_read_raw(nor, addr, 1, buf);
+               if (err) {
+                       ret = ERR_PTR(err);
                        goto out;
+               }
 
                /*
                 * Build an index value that is used to select the Sector Map
                 * Configuration that is currently in use.
                 */
-               map_id = map_id << 1 | !!(data_byte & read_data_mask);
-               i = i + 2;
+               map_id = map_id << 1 | !!(*buf & read_data_mask);
        }
 
-       /* Find the matching configuration map */
-       while (SMPT_MAP_ID(smpt[i]) != map_id) {
+       /*
+        * If command descriptors are provided, they always precede map
+        * descriptors in the table. There is no need to start the iteration
+        * over smpt array all over again.
+        *
+        * Find the matching configuration map.
+        */
+       ret = ERR_PTR(-EINVAL);
+       while (i < smpt_len) {
+               if (SMPT_MAP_ID(smpt[i]) == map_id) {
+                       ret = smpt + i;
+                       break;
+               }
+
+               /*
+                * If there are no more configuration map descriptors and no
+                * configuration ID matched the configuration identifier, the
+                * sector address map is unknown.
+                */
                if (smpt[i] & SMPT_DESC_END)
-                       goto out;
+                       break;
+
                /* increment the table index to the next map */
                i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
        }
 
-       ret = smpt + i;
        /* fall through */
 out:
+       kfree(buf);
        nor->addr_width = addr_width;
        nor->read_dummy = read_dummy;
        nor->read_opcode = read_opcode;
@@ -2946,7 +3000,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
        u64 offset;
        u32 region_count;
        int i, j;
-       u8 erase_type;
+       u8 erase_type, uniform_erase_type;
 
        region_count = SMPT_MAP_REGION_COUNT(*smpt);
        /*
@@ -2959,7 +3013,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
                return -ENOMEM;
        map->regions = region;
 
-       map->uniform_erase_type = 0xff;
+       uniform_erase_type = 0xff;
        offset = 0;
        /* Populate regions. */
        for (i = 0; i < region_count; i++) {
@@ -2974,12 +3028,15 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
                 * Save the erase types that are supported in all regions and
                 * can erase the entire flash memory.
                 */
-               map->uniform_erase_type &= erase_type;
+               uniform_erase_type &= erase_type;
 
                offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
                         region[i].size;
        }
 
+       map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+                                                         uniform_erase_type);
+
        spi_nor_region_mark_end(&region[i - 1]);
 
        return 0;
@@ -3020,9 +3077,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
        for (i = 0; i < smpt_header->length; i++)
                smpt[i] = le32_to_cpu(smpt[i]);
 
-       sector_map = spi_nor_get_map_in_use(nor, smpt);
-       if (!sector_map) {
-               ret = -EINVAL;
+       sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+       if (IS_ERR(sector_map)) {
+               ret = PTR_ERR(sector_map);
                goto out;
        }
 
@@ -3125,7 +3182,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
        if (err)
                goto exit;
 
-       /* Parse other parameter headers. */
+       /* Parse optional parameter tables. */
        for (i = 0; i < header.nph; i++) {
                param_header = &param_headers[i];
 
@@ -3138,8 +3195,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
                        break;
                }
 
-               if (err)
-                       goto exit;
+               if (err) {
+                       dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+                                SFDP_PARAM_HEADER_ID(param_header));
+                       /*
+                        * Let's not drop all information we extracted so far
+                        * if optional table parsers fail. In case of failing,
+                        * each optional parser is responsible to roll back to
+                        * the previously known spi_nor data.
+                        */
+                       err = 0;
+               }
        }
 
 exit:
index 49163570a63afad2e36777993a57319370d6c8b0..3b3f88ffab53cded04a2c1586727bf82472f43ad 100644 (file)
@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(can_put_echo_skb);
 
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+{
+       struct can_priv *priv = netdev_priv(dev);
+       struct sk_buff *skb = priv->echo_skb[idx];
+       struct canfd_frame *cf;
+
+       if (idx >= priv->echo_skb_max) {
+               netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+                          __func__, idx, priv->echo_skb_max);
+               return NULL;
+       }
+
+       if (!skb) {
+               netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
+                          __func__, idx);
+               return NULL;
+       }
+
+       /* Using "struct canfd_frame::len" for the frame
+        * length is supported on both CAN and CANFD frames.
+        */
+       cf = (struct canfd_frame *)skb->data;
+       *len_ptr = cf->len;
+       priv->echo_skb[idx] = NULL;
+
+       return skb;
+}
+
 /*
  * Get the skb from the stack and loop it back locally
  *
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
  */
 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
 {
-       struct can_priv *priv = netdev_priv(dev);
-
-       BUG_ON(idx >= priv->echo_skb_max);
-
-       if (priv->echo_skb[idx]) {
-               struct sk_buff *skb = priv->echo_skb[idx];
-               struct can_frame *cf = (struct can_frame *)skb->data;
-               u8 dlc = cf->can_dlc;
+       struct sk_buff *skb;
+       u8 len;
 
-               netif_rx(priv->echo_skb[idx]);
-               priv->echo_skb[idx] = NULL;
+       skb = __can_get_echo_skb(dev, idx, &len);
+       if (!skb)
+               return 0;
 
-               return dlc;
-       }
+       netif_rx(skb);
 
-       return 0;
+       return len;
 }
 EXPORT_SYMBOL_GPL(can_get_echo_skb);
 
index 8e972ef0863769e88a2c9d6cec37408d66566292..75ce11395ee8196f0dc3ed8b1368e591047d5edd 100644 (file)
 
 /* FLEXCAN interrupt flag register (IFLAG) bits */
 /* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO        8
-#define FLEXCAN_TX_MB_OFF_FIFO         9
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO                8
 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP   0
-#define FLEXCAN_TX_MB_OFF_TIMESTAMP            1
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST      (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST       63
-#define FLEXCAN_IFLAG_MB(x)            BIT(x)
+#define FLEXCAN_TX_MB                          63
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST      (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST       (FLEXCAN_TX_MB - 1)
+#define FLEXCAN_IFLAG_MB(x)            BIT(x & 0x1f)
 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
 #define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
 #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE        BIT(5)
@@ -259,9 +258,7 @@ struct flexcan_priv {
        struct can_rx_offload offload;
 
        struct flexcan_regs __iomem *regs;
-       struct flexcan_mb __iomem *tx_mb;
        struct flexcan_mb __iomem *tx_mb_reserved;
-       u8 tx_mb_idx;
        u32 reg_ctrl_default;
        u32 reg_imask1_default;
        u32 reg_imask2_default;
@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
 static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 can_id;
        u32 data;
@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
 
        if (cf->can_dlc > 0) {
                data = be32_to_cpup((__be32 *)&cf->data[0]);
-               priv->write(data, &priv->tx_mb->data[0]);
+               priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
        }
        if (cf->can_dlc > 4) {
                data = be32_to_cpup((__be32 *)&cf->data[4]);
-               priv->write(data, &priv->tx_mb->data[1]);
+               priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
        }
 
        can_put_echo_skb(skb, dev, 0);
 
-       priv->write(can_id, &priv->tx_mb->can_id);
-       priv->write(ctrl, &priv->tx_mb->can_ctrl);
+       priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
+       priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
 
        /* Errata ERR005829 step8:
         * Write twice INACTIVE(0x8) code to first MB.
@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
 static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
        struct sk_buff *skb;
        struct can_frame *cf;
        bool rx_errors = false, tx_errors = false;
+       u32 timestamp;
+
+       timestamp = priv->read(&regs->timer) << 16;
 
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
        if (tx_errors)
                dev->stats.tx_errors++;
 
-       can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
 }
 
 static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
        struct sk_buff *skb;
        struct can_frame *cf;
        enum can_state new_state, rx_state, tx_state;
        int flt;
        struct can_berr_counter bec;
+       u32 timestamp;
+
+       timestamp = priv->read(&regs->timer) << 16;
 
        flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
        if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
        if (unlikely(new_state == CAN_STATE_BUS_OFF))
                can_bus_off(dev);
 
-       can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
 }
 
 static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
                        priv->write(BIT(n - 32), &regs->iflag2);
        } else {
                priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-               priv->read(&regs->timer);
        }
 
+       /* Read the Free Running Timer. It is optional but recommended
+        * to unlock Mailbox as soon as possible and make it available
+        * for reception.
+        */
+       priv->read(&regs->timer);
+
        return 1;
 }
 
@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
        struct flexcan_regs __iomem *regs = priv->regs;
        u32 iflag1, iflag2;
 
-       iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
-       iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
-               ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+       iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
+               ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
+       iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
 
        return (u64)iflag2 << 32 | iflag1;
 }
@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->regs;
        irqreturn_t handled = IRQ_NONE;
-       u32 reg_iflag1, reg_esr;
+       u32 reg_iflag2, reg_esr;
        enum can_state last_state = priv->can.state;
 
-       reg_iflag1 = priv->read(&regs->iflag1);
-
        /* reception interrupt */
        if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
                u64 reg_iflag;
@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                                break;
                }
        } else {
+               u32 reg_iflag1;
+
+               reg_iflag1 = priv->read(&regs->iflag1);
                if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
                        handled = IRQ_HANDLED;
                        can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                }
        }
 
+       reg_iflag2 = priv->read(&regs->iflag2);
+
        /* transmission complete interrupt */
-       if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+       if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
+               u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
+
                handled = IRQ_HANDLED;
-               stats->tx_bytes += can_get_echo_skb(dev, 0);
+               stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
+                                                              0, reg_ctrl << 16);
                stats->tx_packets++;
                can_led_event(dev, CAN_LED_EVENT_TX);
 
                /* after sending a RTR frame MB is in RX mode */
                priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                           &priv->tx_mb->can_ctrl);
-               priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+                           &regs->mb[FLEXCAN_TX_MB].can_ctrl);
+               priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
                netif_wake_queue(dev);
        }
 
@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
        reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
        reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
                FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
-               FLEXCAN_MCR_IDAM_C;
+               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
 
-       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
                reg_mcr &= ~FLEXCAN_MCR_FEN;
-               reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last);
-       } else {
-               reg_mcr |= FLEXCAN_MCR_FEN |
-                       FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
-       }
+       else
+               reg_mcr |= FLEXCAN_MCR_FEN;
+
        netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
        priv->write(reg_mcr, &regs->mcr);
 
@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
                priv->write(reg_ctrl2, &regs->ctrl2);
        }
 
-       /* clear and invalidate all mailboxes first */
-       for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
-               priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
-                           &regs->mb[i].can_ctrl);
-       }
-
        if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
-               for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
+               for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
                        priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
                                    &regs->mb[i].can_ctrl);
+               }
+       } else {
+               /* clear and invalidate unused mailboxes first */
+               for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
+                       priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+                                   &regs->mb[i].can_ctrl);
+               }
        }
 
        /* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
 
        /* mark TX mailbox as INACTIVE */
        priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                   &priv->tx_mb->can_ctrl);
+                   &regs->mb[FLEXCAN_TX_MB].can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
        priv->write(0x0, &regs->rxgmask);
@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->devtype_data = devtype_data;
        priv->reg_xceiver = reg_xceiver;
 
-       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
-               priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
                priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
-       } else {
-               priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+       else
                priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
-       }
-       priv->tx_mb = &regs->mb[priv->tx_mb_idx];
 
-       priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
-       priv->reg_imask2_default = 0;
+       priv->reg_imask1_default = 0;
+       priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
 
        priv->offload.mailbox_read = flexcan_mailbox_read;
 
index 11662f479e760ba77f613c90bfc8026b005da3ea..771a4608373978c31b7011a45cf5659f543820c1 100644 (file)
@@ -24,6 +24,9 @@
 
 #define RCAR_CAN_DRV_NAME      "rcar_can"
 
+#define RCAR_SUPPORTED_CLOCKS  (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
+                                BIT(CLKR_CLKEXT))
+
 /* Mailbox configuration:
  * mailbox 60 - 63 - Rx FIFO mailboxes
  * mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
                goto fail_clk;
        }
 
-       if (clock_select >= ARRAY_SIZE(clock_names)) {
+       if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
                err = -EINVAL;
                dev_err(&pdev->dev, "invalid CAN clock selected\n");
                goto fail_clk;
index c7d05027a7a07ea34f862aa3595ebf6026a93897..2ce4fa8698c73b437051de20e29eaad70b263f64 100644 (file)
@@ -211,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
 
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+                               struct sk_buff *skb, u32 timestamp)
+{
+       struct can_rx_offload_cb *cb;
+       unsigned long flags;
+
+       if (skb_queue_len(&offload->skb_queue) >
+           offload->skb_queue_len_max)
+               return -ENOMEM;
+
+       cb = can_rx_offload_get_cb(skb);
+       cb->timestamp = timestamp;
+
+       spin_lock_irqsave(&offload->skb_queue.lock, flags);
+       __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+       spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+       can_rx_offload_schedule(offload);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+                                        unsigned int idx, u32 timestamp)
+{
+       struct net_device *dev = offload->dev;
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       u8 len;
+       int err;
+
+       skb = __can_get_echo_skb(dev, idx, &len);
+       if (!skb)
+               return 0;
+
+       err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+       if (err) {
+               stats->rx_errors++;
+               stats->tx_fifo_errors++;
+       }
+
+       return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+                             struct sk_buff *skb)
 {
        if (skb_queue_len(&offload->skb_queue) >
            offload->skb_queue_len_max)
@@ -222,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
 
 static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
 {
index 53e320c92a8be21e286ab4f7ada738fd223a08fa..ddaf46239e39e92337a4ed54ecb3feb1ab94cc59 100644 (file)
@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
 {
        struct hi3110_priv *priv = netdev_priv(net);
        struct spi_device *spi = priv->spi;
-       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
        int ret;
 
        ret = open_candev(net);
index b939a4c10b8409f5fe58e700204fe4e5183b23c0..c89c7d4900d75068badc7a7234c36b5b7345f675 100644 (file)
@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                        context = &priv->tx_contexts[i];
 
                        context->echo_index = i;
-                       can_put_echo_skb(skb, netdev, context->echo_index);
                        ++priv->active_tx_contexts;
                        if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
                                netif_stop_queue(netdev);
@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                dev_kfree_skb(skb);
                spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
-               can_free_echo_skb(netdev, context->echo_index);
                context->echo_index = dev->max_tx_urbs;
                --priv->active_tx_contexts;
                netif_wake_queue(netdev);
@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
 
        context->priv = priv;
 
+       can_put_echo_skb(skb, netdev, context->echo_index);
+
        usb_fill_bulk_urb(urb, dev->udev,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
index c084bae5ec0a4d936f6a7f121272d972903a8023..5fc0be564274375f3d5c579521a2d3b89ecd4a88 100644 (file)
@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
                                        new_state : CAN_STATE_ERROR_ACTIVE;
 
                        can_change_state(netdev, cf, tx_state, rx_state);
+
+                       if (priv->can.restart_ms &&
+                           old_state >= CAN_STATE_BUS_OFF &&
+                           new_state < CAN_STATE_BUS_OFF)
+                               cf->can_id |= CAN_ERR_RESTARTED;
                }
 
                if (new_state == CAN_STATE_BUS_OFF) {
@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
 
                        can_bus_off(netdev);
                }
-
-               if (priv->can.restart_ms &&
-                   old_state >= CAN_STATE_BUS_OFF &&
-                   new_state < CAN_STATE_BUS_OFF)
-                       cf->can_id |= CAN_ERR_RESTARTED;
        }
 
        if (!skb) {
index 0678a38b1af4588135f04074c56b37cfe6d09b70..f3d5bda012a107e430bca91bd09dd1d15be1bbf2 100644 (file)
 #include <linux/slab.h>
 #include <linux/usb.h>
 
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
 #define UCAN_DRIVER_NAME "ucan"
 #define UCAN_MAX_RX_URBS 8
 /* the CAN controller needs a while to enable/disable the bus */
@@ -1575,11 +1571,8 @@ err_firmware_needs_update:
 /* disconnect the device */
 static void ucan_disconnect(struct usb_interface *intf)
 {
-       struct usb_device *udev;
        struct ucan_priv *up = usb_get_intfdata(intf);
 
-       udev = interface_to_usbdev(intf);
-
        usb_set_intfdata(intf, NULL);
 
        if (up) {
index be1506169076f0a89f6a621d01dce81afe720ba7..0de487a8f0eb22d7b033c4f9751b68cf2454286e 100644 (file)
@@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define PMF_DMAE_C(bp)                 (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
                                         E1HVN_MAX)
 
+/* Following is the DMAE channel number allocation for the clients.
+ *   MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
+ *   Driver: 0-3 and 8-11 (for PF dmae operations)
+ *           4 and 12 (for stats requests)
+ */
+#define BNX2X_FW_DMAE_C                 13 /* Channel for FW DMAE operations */
+
 /* PCIE link and speed */
 #define PCICFG_LINK_WIDTH              0x1f00000
 #define PCICFG_LINK_WIDTH_SHIFT                20
index 3f4d2c8da21a3a848b4149758883333522b6f77a..a9eaaf3e73a4c41f6dc6808f723a400750fd1ba1 100644 (file)
@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
        rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
        rdata->path_id          = BP_PATH(bp);
        rdata->network_cos_mode = start_params->network_cos_mode;
+       rdata->dmae_cmd_id      = BNX2X_FW_DMAE_C;
 
        rdata->vxlan_dst_port   = cpu_to_le16(start_params->vxlan_dst_port);
        rdata->geneve_dst_port  = cpu_to_le16(start_params->geneve_dst_port);
index dd85d790f638939e552042c642abceb108d61fb3..d4c30011752992ccf6d0f48d6f119e6950041e19 100644 (file)
@@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        } else {
                if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
                        if (dev->features & NETIF_F_RXCSUM)
-                               cpr->rx_l4_csum_errors++;
+                               bnapi->cp_ring.rx_l4_csum_errors++;
                }
        }
 
@@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
        return rc;
 }
 
+static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
+                                      u32 ring_id, u32 *prod, u32 *cons)
+{
+       struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_dbg_ring_info_get_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
+       req.ring_type = ring_type;
+       req.fw_ring_id = cpu_to_le32(ring_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               *prod = le32_to_cpu(resp->producer_index);
+               *cons = le32_to_cpu(resp->consumer_index);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
 {
        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t)
                        bnxt_queue_sp_work(bp);
                }
        }
+
+       if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
+               set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
+               bnxt_queue_sp_work(bp);
+       }
 bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
@@ -8851,6 +8876,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
        bnxt_rtnl_unlock_sp(bp);
 }
 
+static void bnxt_chk_missed_irq(struct bnxt *bp)
+{
+       int i;
+
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               u32 fw_ring_id;
+               int j;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               for (j = 0; j < 2; j++) {
+                       struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+                       u32 val[2];
+
+                       if (!cpr2 || cpr2->has_more_work ||
+                           !bnxt_has_work(bp, cpr2))
+                               continue;
+
+                       if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
+                               cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
+                               continue;
+                       }
+                       fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
+                       bnxt_dbg_hwrm_ring_info_get(bp,
+                               DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
+                               fw_ring_id, &val[0], &val[1]);
+                       cpr->missed_irqs++;
+               }
+       }
+}
+
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
@@ -8930,6 +8993,9 @@ static void bnxt_sp_task(struct work_struct *work)
        if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
                bnxt_tc_flow_stats_work(bp);
 
+       if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
+               bnxt_chk_missed_irq(bp);
+
        /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
         * must be the last functions to be called before exiting.
         */
@@ -10087,6 +10153,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        bnxt_hwrm_func_qcfg(bp);
+       bnxt_hwrm_vnic_qcaps(bp);
        bnxt_hwrm_port_led_qcaps(bp);
        bnxt_ethtool_init(bp);
        bnxt_dcb_init(bp);
@@ -10120,7 +10187,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
        }
 
-       bnxt_hwrm_vnic_qcaps(bp);
        if (bnxt_rfs_supported(bp)) {
                dev->hw_features |= NETIF_F_NTUPLE;
                if (bnxt_rfs_capable(bp)) {
index 498b373c992d372504278eb666d79f0ad83a9cd6..9e99d4ab3e062fe14fee7cd78b8626032fa5c705 100644 (file)
@@ -798,6 +798,8 @@ struct bnxt_cp_ring_info {
        u8                      had_work_done:1;
        u8                      has_more_work:1;
 
+       u32                     last_cp_raw_cons;
+
        struct bnxt_coal        rx_ring_coal;
        u64                     rx_packets;
        u64                     rx_bytes;
@@ -816,6 +818,7 @@ struct bnxt_cp_ring_info {
        dma_addr_t              hw_stats_map;
        u32                     hw_stats_ctx_id;
        u64                     rx_l4_csum_errors;
+       u64                     missed_irqs;
 
        struct bnxt_ring_struct cp_ring_struct;
 
@@ -1527,6 +1530,7 @@ struct bnxt {
 #define BNXT_LINK_SPEED_CHNG_SP_EVENT  14
 #define BNXT_FLOW_STATS_SP_EVENT       15
 #define BNXT_UPDATE_PHY_SP_EVENT       16
+#define BNXT_RING_COAL_NOW_SP_EVENT    17
 
        struct bnxt_hw_resc     hw_resc;
        struct bnxt_pf_info     pf;
index 48078564f0258cf2d862bd6d57c9d0da79d072a1..6cc69a58478a5ffaad8b7a0511d68e36a93657f3 100644 (file)
@@ -137,7 +137,7 @@ reset_coalesce:
        return rc;
 }
 
-#define BNXT_NUM_STATS 21
+#define BNXT_NUM_STATS 22
 
 #define BNXT_RX_STATS_ENTRY(counter)   \
        { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
                for (k = 0; k < stat_fields; j++, k++)
                        buf[j] = le64_to_cpu(hw_stats[k]);
                buf[j++] = cpr->rx_l4_csum_errors;
+               buf[j++] = cpr->missed_irqs;
 
                bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
                        le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        buf += ETH_GSTRING_LEN;
                        sprintf(buf, "[%d]: rx_l4_csum_errors", i);
                        buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: missed_irqs", i);
+                       buf += ETH_GSTRING_LEN;
                }
                for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
                        strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
        record->asic_state = 0;
        strlcpy(record->system_name, utsname()->nodename,
                sizeof(record->system_name));
-       record->year = cpu_to_le16(tm.tm_year);
-       record->month = cpu_to_le16(tm.tm_mon);
+       record->year = cpu_to_le16(tm.tm_year + 1900);
+       record->month = cpu_to_le16(tm.tm_mon + 1);
        record->day = cpu_to_le16(tm.tm_mday);
        record->hour = cpu_to_le16(tm.tm_hour);
        record->minute = cpu_to_le16(tm.tm_min);
index beee61292d5e522bae0842e5e76253eeb4e1d1f4..b59b382d34f94277a9a33e52223fda8ab7f19af3 100644 (file)
@@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
        if (ulp_id == BNXT_ROCE_ULP) {
                unsigned int max_stat_ctxs;
 
+               if (bp->flags & BNXT_FLAG_CHIP_P5)
+                       return -EOPNOTSUPP;
+
                max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
                if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
                    bp->num_stat_ctxs == max_stat_ctxs)
index 75c1c5ed23878441664d2c7db867b67ee8ba21aa..e2cdfa75673fd58cf6dbacd1e67d7a05dd4b33c8 100644 (file)
@@ -67,7 +67,6 @@ config CHELSIO_T3
 config CHELSIO_T4
        tristate "Chelsio Communications T4/T5/T6 Ethernet support"
        depends on PCI && (IPV6 || IPV6=n)
-       depends on THERMAL || !THERMAL
        select FW_LOADER
        select MDIO
        select ZLIB_DEFLATE
index 78e5d17a1d5fb2306f104ce1bf1c4d02c42003ed..91d8a885deba9b8ea82125c3f0f9009fae9a3f91 100644 (file)
@@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
 cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
 cxgb4-$(CONFIG_CHELSIO_T4_FCOE) +=  cxgb4_fcoe.o
 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
-ifdef CONFIG_THERMAL
-cxgb4-objs += cxgb4_thermal.o
-endif
+cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o
index 05a46926016a5e2d53bc57b0c9c5ca19f5484f29..d49db46254cd7d528f6f181e8237277069c8a916 100644 (file)
@@ -5863,7 +5863,7 @@ fw_attach_fail:
        if (!is_t4(adapter->params.chip))
                cxgb4_ptp_init(adapter);
 
-       if (IS_ENABLED(CONFIG_THERMAL) &&
+       if (IS_REACHABLE(CONFIG_THERMAL) &&
            !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
                cxgb4_thermal_init(adapter);
 
@@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev)
 
                if (!is_t4(adapter->params.chip))
                        cxgb4_ptp_stop(adapter);
-               if (IS_ENABLED(CONFIG_THERMAL))
+               if (IS_REACHABLE(CONFIG_THERMAL))
                        cxgb4_thermal_remove(adapter);
 
                /* If we allocated filters, free up state associated with any
index 8c5ba4b81fb7332aba15a0f4199155f39a4da8a3..2d4d10a017e59bfc96870e9fd94338986f6ef7f2 100644 (file)
@@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev)
        err = register_netdev(net_dev);
        if (err)
                goto err_unprepare_clk;
-       return err;
+
+       return 0;
 
 err_unprepare_clk:
        clk_disable_unprepare(priv->clk);
@@ -520,7 +521,7 @@ err_unprepare_clk:
 err_uninit_dma:
        xrx200_hw_cleanup(priv);
 
-       return 0;
+       return err;
 }
 
 static int xrx200_remove(struct platform_device *pdev)
index 3ba672e9e353d2cf6cd38ad64172e4219b9f5602..e5397c8197b9c3713c48e925dad0dfcee732c0b9 100644 (file)
@@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
        if (state->interface != PHY_INTERFACE_MODE_NA &&
            state->interface != PHY_INTERFACE_MODE_QSGMII &&
            state->interface != PHY_INTERFACE_MODE_SGMII &&
-           state->interface != PHY_INTERFACE_MODE_2500BASEX &&
            !phy_interface_mode_is_8023z(state->interface) &&
            !phy_interface_mode_is_rgmii(state->interface)) {
                bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
        /* Asymmetric pause is unsupported */
        phylink_set(mask, Pause);
 
-       /* We cannot use 1Gbps when using the 2.5G interface. */
-       if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
-               phylink_set(mask, 2500baseT_Full);
-               phylink_set(mask, 2500baseX_Full);
-       } else {
-               phylink_set(mask, 1000baseT_Full);
-               phylink_set(mask, 1000baseX_Full);
-       }
+       /* Half-duplex at speeds higher than 100Mbit is unsupported */
+       phylink_set(mask, 1000baseT_Full);
+       phylink_set(mask, 1000baseX_Full);
 
        if (!phy_interface_mode_is_8023z(state->interface)) {
                /* 10M and 100M are only supported in non-802.3z mode */
index deef5a998985a9f8398d693b8bd219a5fc083313..9af34e03892c19e780149a3a8405d936fe417d45 100644 (file)
@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
 static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
                                  int align, u32 skip_mask, u32 *puid)
 {
-       u32 uid;
+       u32 uid = 0;
        u32 res;
        struct mlx4_zone_allocator *zone_alloc = zone->allocator;
        struct mlx4_zone_entry *curr_node;
index ebcd2778eeb3e1f22524064ff2db7762e1f852ba..23f1b5b512c2198cb664167e42fb91ff9c549f13 100644 (file)
@@ -540,8 +540,8 @@ struct slave_list {
 struct resource_allocator {
        spinlock_t alloc_lock; /* protect quotas */
        union {
-               int res_reserved;
-               int res_port_rsvd[MLX4_MAX_PORTS];
+               unsigned int res_reserved;
+               unsigned int res_port_rsvd[MLX4_MAX_PORTS];
        };
        union {
                int res_free;
index 2e84f10f59ba9ca0a69d980b87368f7310e5bb13..1a11bc0e16123e918e68e7a8f8bed703825665fa 100644 (file)
@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                        container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
                                     buf);
 
+               (*mpt_entry)->lkey = 0;
                err = mlx4_SW2HW_MPT(dev, mailbox, key);
        }
 
index 8e8fa823d611878c1ce166ae271a082b5ec84acd..69966dfc6e3d12f6b754dd94f0ca7fcc4018713f 100644 (file)
@@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
 static void
 qed_dcbx_set_params(struct qed_dcbx_results *p_data,
                    struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                   bool enable, u8 prio, u8 tc,
+                   bool app_tlv, bool enable, u8 prio, u8 tc,
                    enum dcbx_protocol_type type,
                    enum qed_pci_personality personality)
 {
@@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
                p_data->arr[type].dont_add_vlan0 = true;
 
        /* QM reconf data */
-       if (p_hwfn->hw_info.personality == personality)
+       if (app_tlv && p_hwfn->hw_info.personality == personality)
                qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
 
        /* Configure dcbx vlan priority in doorbell block for roce EDPM */
@@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
 static void
 qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
                         struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                        bool enable, u8 prio, u8 tc,
+                        bool app_tlv, bool enable, u8 prio, u8 tc,
                         enum dcbx_protocol_type type)
 {
        enum qed_pci_personality personality;
@@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
 
                personality = qed_dcbx_app_update[i].personality;
 
-               qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
+               qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
                                    prio, tc, type, personality);
        }
 }
@@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                                enable = true;
                        }
 
-                       qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
-                                                priority, tc, type);
+                       qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
+                                                enable, priority, tc, type);
                }
        }
 
@@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                        continue;
 
                enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
-               qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
+               qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
                                         priority, tc, type);
        }
 
index 7ceb2b97538d25d767c3d8cc7e7ab79d8b03e760..cff141077558c8c3abbe5909b23549f05aad2b60 100644 (file)
@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev)
                        qed_iscsi_free(p_hwfn);
                        qed_ooo_free(p_hwfn);
                }
+
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+                       qed_rdma_info_free(p_hwfn);
+
                qed_iov_free(p_hwfn);
                qed_l2_free(p_hwfn);
                qed_dmae_info_free(p_hwfn);
@@ -1081,6 +1085,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
                                goto alloc_err;
                }
 
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+                       rc = qed_rdma_info_alloc(p_hwfn);
+                       if (rc)
+                               goto alloc_err;
+               }
+
                /* DMA info initialization */
                rc = qed_dmae_info_alloc(p_hwfn);
                if (rc)
@@ -2102,11 +2112,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
        if (!p_ptt)
                return -EAGAIN;
 
-       /* If roce info is allocated it means roce is initialized and should
-        * be enabled in searcher.
-        */
        if (p_hwfn->p_rdma_info &&
-           p_hwfn->b_rdma_enabled_in_prs)
+           p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
                qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
 
        /* Re-open incoming traffic */
index 0f0aba793352c406404b53306f4bfb454b70a8b6..b22f464ea3fa770e94327640e32a35972ee35745 100644 (file)
@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
         */
        do {
                index = p_sb_attn->sb_index;
+               /* finish reading index before the loop condition */
+               dma_rmb();
                attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
                attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
        } while (index != p_sb_attn->sb_index);
index 35fd0db6a67777629fbaa7e1ab89c21b8030d31e..fff7f04d4525c51f15f7e349670a698354edc0f3 100644 (file)
@@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev)
                        return -EBUSY;
                }
                rc = qed_mcp_drain(hwfn, ptt);
+               qed_ptt_release(hwfn, ptt);
                if (rc)
                        return rc;
-               qed_ptt_release(hwfn, ptt);
        }
 
        return 0;
index 62113438c8809c34e5c2dc48bccda67fc9ae21ad..7873d6dfd91f55607a6d60b23b568488edf7d360 100644 (file)
@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
        return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
 }
 
-static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt,
-                         struct qed_rdma_start_in_params *params)
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
 {
        struct qed_rdma_info *p_rdma_info;
-       u32 num_cons, num_tasks;
-       int rc = -ENOMEM;
 
-       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
-
-       /* Allocate a struct with current pf rdma info */
        p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
        if (!p_rdma_info)
-               return rc;
+               return -ENOMEM;
+
+       spin_lock_init(&p_rdma_info->lock);
 
        p_hwfn->p_rdma_info = p_rdma_info;
+       return 0;
+}
+
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->p_rdma_info);
+       p_hwfn->p_rdma_info = NULL;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       u32 num_cons, num_tasks;
+       int rc = -ENOMEM;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
        if (QED_IS_IWARP_PERSONALITY(p_hwfn))
                p_rdma_info->proto = PROTOCOLID_IWARP;
        else
@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
        /* Allocate a struct with device params and fill it */
        p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
        if (!p_rdma_info->dev)
-               goto free_rdma_info;
+               return rc;
 
        /* Allocate a struct with port params and fill it */
        p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
@@ -298,8 +310,6 @@ free_rdma_port:
        kfree(p_rdma_info->port);
 free_rdma_dev:
        kfree(p_rdma_info->dev);
-free_rdma_info:
-       kfree(p_rdma_info);
 
        return rc;
 }
@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 
        kfree(p_rdma_info->port);
        kfree(p_rdma_info->dev);
-
-       kfree(p_rdma_info);
 }
 
 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
 
-       spin_lock_init(&p_hwfn->p_rdma_info->lock);
-
        qed_rdma_init_devinfo(p_hwfn, params);
        qed_rdma_init_port(p_hwfn);
        qed_rdma_init_events(p_hwfn, params);
@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
        /* Disable RoCE search */
        qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
        p_hwfn->b_rdma_enabled_in_prs = false;
-
+       p_hwfn->p_rdma_info->active = 0;
        qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
 
        ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
        u8 max_stats_queues;
        int rc;
 
-       if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+       if (!rdma_cxt || !in_params || !out_params ||
+           !p_hwfn->p_rdma_info->active) {
                DP_ERR(p_hwfn->cdev,
                       "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
                       rdma_cxt, in_params, out_params);
@@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
 {
        bool result;
 
-       /* if rdma info has not been allocated, naturally there are no qps */
-       if (!p_hwfn->p_rdma_info)
+       /* if rdma wasn't activated yet, naturally there are no qps */
+       if (!p_hwfn->p_rdma_info->active)
                return false;
 
        spin_lock_bh(&p_hwfn->p_rdma_info->lock);
@@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
        if (!p_ptt)
                goto err;
 
-       rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+       rc = qed_rdma_alloc(p_hwfn);
        if (rc)
                goto err1;
 
@@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
                goto err2;
 
        qed_ptt_release(p_hwfn, p_ptt);
+       p_hwfn->p_rdma_info->active = 1;
 
        return rc;
 
index 6f722ee8ee945b13ee6f33df82d1692c0ae18304..3689fe3e593542fc487167aae73da99156add030 100644 (file)
@@ -102,6 +102,7 @@ struct qed_rdma_info {
        u16 max_queue_zones;
        enum protocol_type proto;
        struct qed_iwarp_info iwarp;
+       u8 active:1;
 };
 
 struct qed_rdma_qp {
@@ -176,10 +177,14 @@ struct qed_rdma_qp {
 #if IS_ENABLED(CONFIG_QED_RDMA)
 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
 #else
 static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
                                    struct qed_ptt *p_ptt) {}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
+static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
 #endif
 
 int
index 33265747bf3994c668cfcb2a8f7f3d9f768d370a..0fbcedcdf6e2ae5b6d9d8ecca66937532db263cb 100644 (file)
@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
                 * assume the pin serves as pull-up. If direction is
                 * output, the default value is high.
                 */
-               gpiod_set_value(bitbang->mdo, 1);
+               gpiod_set_value_cansleep(bitbang->mdo, 1);
                return;
        }
 
@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       return gpiod_get_value(bitbang->mdio);
+       return gpiod_get_value_cansleep(bitbang->mdio);
 }
 
 static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
        if (bitbang->mdo)
-               gpiod_set_value(bitbang->mdo, what);
+               gpiod_set_value_cansleep(bitbang->mdo, what);
        else
-               gpiod_set_value(bitbang->mdio, what);
+               gpiod_set_value_cansleep(bitbang->mdio, what);
 }
 
 static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       gpiod_set_value(bitbang->mdc, what);
+       gpiod_set_value_cansleep(bitbang->mdc, what);
 }
 
 static const struct mdiobb_ops mdio_gpio_ops = {
index 060135ceaf0e1a76615b2e6cd6830d42ac8becdf..e244f5d7512a6e8f8a2a5b5eb141a2024f80a078 100644 (file)
@@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
 
        if (!rx_batched || (!more && skb_queue_empty(queue))) {
                local_bh_disable();
+               skb_record_rx_queue(skb, tfile->queue_index);
                netif_receive_skb(skb);
                local_bh_enable();
                return;
@@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
                struct sk_buff *nskb;
 
                local_bh_disable();
-               while ((nskb = __skb_dequeue(&process_queue)))
+               while ((nskb = __skb_dequeue(&process_queue))) {
+                       skb_record_rx_queue(nskb, tfile->queue_index);
                        netif_receive_skb(nskb);
+               }
+               skb_record_rx_queue(skb, tfile->queue_index);
                netif_receive_skb(skb);
                local_bh_enable();
        }
@@ -2451,6 +2455,7 @@ build:
        if (!rcu_dereference(tun->steering_prog))
                rxhash = __skb_get_hash_symmetric(skb);
 
+       skb_record_rx_queue(skb, tfile->queue_index);
        netif_receive_skb(skb);
 
        stats = get_cpu_ptr(tun->pcpu_stats);
index 0b70c8bab045ac1a544cf3c72eb1a8766152fcd2..54032c4666361e3f3cb65f60900acb0772b99988 100644 (file)
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
 
        bool                    ioq_live;
        bool                    assoc_active;
+       atomic_t                err_work_active;
        u64                     association_id;
 
        struct list_head        ctrl_list;      /* rport->ctrl_list */
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
        struct blk_mq_tag_set   tag_set;
 
        struct delayed_work     connect_work;
+       struct work_struct      err_work;
 
        struct kref             ref;
        u32                     flags;
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
        struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
        int i;
 
+       /* ensure we've initialized the ops once */
+       if (!(aen_op->flags & FCOP_FLAGS_AEN))
+               return;
+
        for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
                __nvme_fc_abort_op(ctrl, aen_op);
 }
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
 static void
 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
 {
-       /* only proceed if in LIVE state - e.g. on first error */
+       int active;
+
+       /*
+        * if an error (io timeout, etc) while (re)connecting,
+        * it's an error on creating the new association.
+        * Start the error recovery thread if it hasn't already
+        * been started. It is expected there could be multiple
+        * ios hitting this path before things are cleaned up.
+        */
+       if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+               active = atomic_xchg(&ctrl->err_work_active, 1);
+               if (!active && !schedule_work(&ctrl->err_work)) {
+                       atomic_set(&ctrl->err_work_active, 0);
+                       WARN_ON(1);
+               }
+               return;
+       }
+
+       /* Otherwise, only proceed if in LIVE state - e.g. on first error */
        if (ctrl->ctrl.state != NVME_CTRL_LIVE)
                return;
 
@@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
+       cancel_work_sync(&ctrl->err_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
        /*
         * kill the association on the link side.  this will block
@@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 }
 
 static void
-nvme_fc_reset_ctrl_work(struct work_struct *work)
+__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
 {
-       struct nvme_fc_ctrl *ctrl =
-               container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
-       int ret;
-
-       nvme_stop_ctrl(&ctrl->ctrl);
+       nvme_stop_keep_alive(&ctrl->ctrl);
 
        /* will block will waiting for io to terminate */
        nvme_fc_delete_association(ctrl);
 
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
+           !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
                dev_err(ctrl->ctrl.device,
                        "NVME-FC{%d}: error_recovery: Couldn't change state "
                        "to CONNECTING\n", ctrl->cnum);
-               return;
-       }
+}
+
+static void
+nvme_fc_reset_ctrl_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+               container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
+       int ret;
+
+       __nvme_fc_terminate_io(ctrl);
+
+       nvme_stop_ctrl(&ctrl->ctrl);
 
        if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
                ret = nvme_fc_create_association(ctrl);
@@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
                        ctrl->cnum);
 }
 
+static void
+nvme_fc_connect_err_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+                       container_of(work, struct nvme_fc_ctrl, err_work);
+
+       __nvme_fc_terminate_io(ctrl);
+
+       atomic_set(&ctrl->err_work_active, 0);
+
+       /*
+        * Rescheduling the connection after recovering
+        * from the io error is left to the reconnect work
+        * item, which is what should have stalled waiting on
+        * the io that had the error that scheduled this work.
+        */
+}
+
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .name                   = "fc",
        .module                 = THIS_MODULE,
@@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        ctrl->cnum = idx;
        ctrl->ioq_live = false;
        ctrl->assoc_active = false;
+       atomic_set(&ctrl->err_work_active, 0);
        init_waitqueue_head(&ctrl->ioabort_wait);
 
        get_device(ctrl->dev);
@@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
        INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+       INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
@@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 fail_ctrl:
        nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
        cancel_work_sync(&ctrl->ctrl.reset_work);
+       cancel_work_sync(&ctrl->err_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
 
        ctrl->ctrl.opts = NULL;
index 9b18ce90f90733a0b4d6ceec64921488cd19aa80..27f67dfa649d08373de5d898cffb19f8e851ea93 100644 (file)
@@ -44,6 +44,7 @@ struct nvmem_cell {
        int                     bytes;
        int                     bit_offset;
        int                     nbits;
+       struct device_node      *np;
        struct nvmem_device     *nvmem;
        struct list_head        node;
 };
@@ -298,6 +299,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
        mutex_lock(&nvmem_mutex);
        list_del(&cell->node);
        mutex_unlock(&nvmem_mutex);
+       of_node_put(cell->np);
        kfree(cell->name);
        kfree(cell);
 }
@@ -530,6 +532,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
                        return -ENOMEM;
 
                cell->nvmem = nvmem;
+               cell->np = of_node_get(child);
                cell->offset = be32_to_cpup(addr++);
                cell->bytes = be32_to_cpup(addr);
                cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -960,14 +963,13 @@ out:
 
 #if IS_ENABLED(CONFIG_OF)
 static struct nvmem_cell *
-nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
+nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
 {
        struct nvmem_cell *cell = NULL;
-       int i = 0;
 
        mutex_lock(&nvmem_mutex);
        list_for_each_entry(cell, &nvmem->cells, node) {
-               if (index == i++)
+               if (np == cell->np)
                        break;
        }
        mutex_unlock(&nvmem_mutex);
@@ -1011,7 +1013,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
        if (IS_ERR(nvmem))
                return ERR_CAST(nvmem);
 
-       cell = nvmem_find_cell_by_index(nvmem, index);
+       cell = nvmem_find_cell_by_node(nvmem, cell_np);
        if (!cell) {
                __nvmem_device_put(nvmem);
                return ERR_PTR(-ENOENT);
index 9e5a9a3112c9cec57abcffab6c9b23640682756c..3f4fb4dbbe33b0ca755dc547707b815a337597e9 100644 (file)
@@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
        int ret;
 
        vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
-                                         new_supply_vbb->u_volt);
+                                         new_supply_vdd->u_volt);
+
+       if (new_supply_vdd->u_volt_min < vdd_uv)
+               new_supply_vdd->u_volt_min = vdd_uv;
 
        /* Scaling up? Scale voltage before frequency */
        if (freq > old_freq) {
index f96ec68af2e58aabfa0bdb71dde1f05ff8ad4151..dcbf5c857743782871d9be3dfe51f9fee3d83d91 100644 (file)
@@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
                        break;
 
                clear_bit_inv(bit, bv);
+               ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
                barrier();
                smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
-               ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
        }
 
        if (ism->sba->e) {
index 20c85eed1a7504997a9ab60d2c8247f04ab82995..b658b9a5eb1e172b6549d05b1e8c3aece913f715 100644 (file)
@@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
 static void
 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
 {
-       int cnt;
+       int cnt, status;
        unsigned long flags;
        srb_t *sp;
        scsi_qla_host_t *vha = qp->vha;
@@ -1799,10 +1799,16 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
                                        if (!sp_get(sp)) {
                                                spin_unlock_irqrestore
                                                        (qp->qp_lock_ptr, flags);
-                                               qla2xxx_eh_abort(
+                                               status = qla2xxx_eh_abort(
                                                        GET_CMD_SP(sp));
                                                spin_lock_irqsave
                                                        (qp->qp_lock_ptr, flags);
+                                               /*
+                                                * Get rid of extra reference caused
+                                                * by early exit from qla2xxx_eh_abort
+                                                */
+                                               if (status == FAST_IO_FAIL)
+                                                       atomic_dec(&sp->ref_count);
                                        }
                                }
                                sp->done(sp, res);
index 46df707e6f2c0404a3c1c4b5f92a76e03feee622..452e19f8fb47027ab4c264f60da67b69eafdbf49 100644 (file)
@@ -20,6 +20,7 @@
 #include "unipro.h"
 #include "ufs-hisi.h"
 #include "ufshci.h"
+#include "ufs_quirks.h"
 
 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
 {
@@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
 
 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
 {
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
+               pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
+               /* VS_DebugSaveConfigTime */
+               ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
+               /* sync length */
+               ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
+       }
+
        /* update */
        ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
        /* PA_TxSkip */
index 71f73d1d1ad1fb9b7c357c65baf0f49ec9dfa780..5d2dfdb41a6ffcc6c20a88189c070a3917d8d4e3 100644 (file)
@@ -131,4 +131,10 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME        (1 << 8)
 
+/*
+ * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
+ * enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME   (1 << 9)
+
 #endif /* UFS_QUIRKS_H_ */
index 27db55b0ca7f860787d34f35d30ec2919937e490..f1c57cd33b5ba3bd78c7b66e0b9b4ba171f17f14 100644 (file)
@@ -231,6 +231,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
                UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+       UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
+               UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
 
        END_FIX
 };
index 7218fb963d0a111d5ccc15cc6d96d1370f9fe3ff..1382a8df6c75f8439f98261811768ca3126883af 100644 (file)
@@ -777,9 +777,6 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
        u8 la = txn->la;
        bool usr_msg = false;
 
-       if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
-               return -EPROTONOSUPPORT;
-
        if (txn->mt == SLIM_MSG_MT_CORE &&
                (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
                 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
index 4399d1873e2d929b4897ad7fedfd8ca69d08ae72..9be41089edde7385abd89b87385376d730be7d29 100644 (file)
 #define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL          0x58
 #define SLIM_MSG_MC_RECONFIGURE_NOW              0x5F
 
-/*
- * Clock pause flag to indicate that the reconfig message
- * corresponds to clock pause sequence
- */
-#define SLIM_MSG_CLK_PAUSE_SEQ_FLG             (1U << 8)
-
 /* Clock pause values per SLIMbus spec */
 #define SLIM_CLK_FAST                          0
 #define SLIM_CLK_CONST_PHASE                   1
index a53231b08d30ee390e9565cfc52c185023692778..e3425bf082ae986ab4c28893c11112654a9fbb2a 100644 (file)
@@ -310,6 +310,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
                        ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
                        break;
                }
+               /* fall through */
 
        case IPIPEIF_SDRAM_YUV:
                /* Set clock divider */
index 82558455384af6f1bbd576e0181261163c1bdce3..dd121f66fa2de82db949a0027cafad53b1d08225 100644 (file)
@@ -253,7 +253,7 @@ static const struct v4l2_m2m_ops cedrus_m2m_ops = {
 
 static const struct media_device_ops cedrus_m2m_media_ops = {
        .req_validate   = cedrus_request_validate,
-       .req_queue      = vb2_m2m_request_queue,
+       .req_queue      = v4l2_m2m_request_queue,
 };
 
 static int cedrus_probe(struct platform_device *pdev)
index 85644669fbe7b013b59b24adff5fd466230a102b..0a357db4b31b37ddd3ceedd25ecde5f7572d7690 100644 (file)
@@ -961,6 +961,8 @@ int __uio_register_device(struct module *owner,
        if (ret)
                goto err_uio_dev_add_attributes;
 
+       info->uio_dev = idev;
+
        if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
                /*
                 * Note that we deliberately don't use devm_request_irq
@@ -972,11 +974,12 @@ int __uio_register_device(struct module *owner,
                 */
                ret = request_irq(info->irq, uio_interrupt,
                                  info->irq_flags, info->name, idev);
-               if (ret)
+               if (ret) {
+                       info->uio_dev = NULL;
                        goto err_request_irq;
+               }
        }
 
-       info->uio_dev = idev;
        return 0;
 
 err_request_irq:
index 47d75c20c211c6f70cad75fcae4ae6b75c458c09..1b68fed464cb96456fcd566f409608bfda34b4f1 100644 (file)
@@ -1696,6 +1696,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
+       { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
        { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
        .driver_info = QUIRK_CONTROL_LINE_STATE, },
        { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
index c6077d582d2966a4fbeffd092092c1bf58429db1..0f9381b69a3b563bc63d8010a639bb270f0fc2c8 100644 (file)
@@ -2794,6 +2794,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
        int i, status;
        u16 portchange, portstatus;
        struct usb_port *port_dev = hub->ports[port1 - 1];
+       int reset_recovery_time;
 
        if (!hub_is_superspeed(hub->hdev)) {
                if (warm) {
@@ -2849,7 +2850,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
                                        USB_PORT_FEAT_C_BH_PORT_RESET);
                        usb_clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_PORT_LINK_STATE);
-                       usb_clear_port_feature(hub->hdev, port1,
+
+                       if (udev)
+                               usb_clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_CONNECTION);
 
                        /*
@@ -2885,11 +2888,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
 
 done:
        if (status == 0) {
-               /* TRSTRCY = 10 ms; plus some extra */
                if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
                        usleep_range(10000, 12000);
-               else
-                       msleep(10 + 40);
+               else {
+                       /* TRSTRCY = 10 ms; plus some extra */
+                       reset_recovery_time = 10 + 40;
+
+                       /* Hub needs extra delay after resetting its port. */
+                       if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
+                               reset_recovery_time += 100;
+
+                       msleep(reset_recovery_time);
+               }
 
                if (udev) {
                        struct usb_hcd *hcd = bus_to_hcd(udev->bus);
index 178d6c6063c0280a06c6dc3dc6611394d8233479..f9ff03e6af93e7e41acfbcdcb2154dc3a81bbe03 100644 (file)
@@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
                        case 'n':
                                flags |= USB_QUIRK_DELAY_CTRL_MSG;
                                break;
+                       case 'o':
+                               flags |= USB_QUIRK_HUB_SLOW_RESET;
+                               break;
                        /* Ignore unrecognized flag characters */
                        }
                }
@@ -380,6 +383,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Terminus Technology Inc. Hub */
+       { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
+
        /* Corsair K70 RGB */
        { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
 
@@ -391,6 +397,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
          USB_QUIRK_DELAY_CTRL_MSG },
 
+       /* Corsair K70 LUX RGB */
+       { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
+
        /* Corsair K70 LUX */
        { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
 
@@ -411,6 +420,11 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x2040, 0x7200), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Raydium Touchscreen */
+       { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
+
+       { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
index d257c541e51ba4e51530e5d8c3d1048794f5f013..7afc10872f1f031370727a277180f664fe8e4c56 100644 (file)
@@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci,
        dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
        if (!dwc2) {
                dev_err(dev, "couldn't allocate dwc2 device\n");
+               ret = -ENOMEM;
                goto err;
        }
 
index becfbb87f791dad714b5ab23049ede45f9dd63e6..2f2048aa5fde13dde439e52236620f9bab603118 100644 (file)
@@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev)
 
 err5:
        dwc3_event_buffers_cleanup(dwc);
+       dwc3_ulpi_exit(dwc);
 
 err4:
        dwc3_free_scratch_buffers(dwc);
index 1286076a8890308a66d6ef494b85169ae92d7798..842795856bf49e5093459777b252b33d67e89a96 100644 (file)
@@ -283,8 +283,10 @@ err:
 static void dwc3_pci_remove(struct pci_dev *pci)
 {
        struct dwc3_pci         *dwc = pci_get_drvdata(pci);
+       struct pci_dev          *pdev = dwc->pci;
 
-       gpiod_remove_lookup_table(&platform_bytcr_gpios);
+       if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
+               gpiod_remove_lookup_table(&platform_bytcr_gpios);
 #ifdef CONFIG_PM
        cancel_work_sync(&dwc->wakeup_work);
 #endif
index 679c12e145225899c1ac5ff3f4f8489d8552a928..9faad896b3a135e86a6d4aebae3678fbc20994d7 100644 (file)
@@ -1081,7 +1081,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
                        /* Now prepare one extra TRB to align transfer size */
                        trb = &dep->trb_pool[dep->trb_enqueue];
                        __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
-                                       maxp - rem, false, 0,
+                                       maxp - rem, false, 1,
                                        req->request.stream_id,
                                        req->request.short_not_ok,
                                        req->request.no_interrupt);
@@ -1125,7 +1125,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
                /* Now prepare one extra TRB to align transfer size */
                trb = &dep->trb_pool[dep->trb_enqueue];
                __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
-                               false, 0, req->request.stream_id,
+                               false, 1, req->request.stream_id,
                                req->request.short_not_ok,
                                req->request.no_interrupt);
        } else if (req->request.zero && req->request.length &&
@@ -1141,7 +1141,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
                /* Now prepare one extra TRB to handle ZLP */
                trb = &dep->trb_pool[dep->trb_enqueue];
                __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
-                               false, 0, req->request.stream_id,
+                               false, 1, req->request.stream_id,
                                req->request.short_not_ok,
                                req->request.no_interrupt);
        } else {
@@ -2259,7 +2259,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
         * with one TRB pending in the ring. We need to manually clear HWO bit
         * from that TRB.
         */
-       if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
+       if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
                trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
                return 1;
        }
index 3ada83d81bda8d2810ab04d154b65658f57e0ba2..31e8bf3578c891303a9194c6c23df987cf59f38f 100644 (file)
@@ -215,7 +215,6 @@ struct ffs_io_data {
 
        struct mm_struct *mm;
        struct work_struct work;
-       struct work_struct cancellation_work;
 
        struct usb_ep *ep;
        struct usb_request *req;
@@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static void ffs_aio_cancel_worker(struct work_struct *work)
-{
-       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
-                                                  cancellation_work);
-
-       ENTER();
-
-       usb_ep_dequeue(io_data->ep, io_data->req);
-}
-
 static int ffs_aio_cancel(struct kiocb *kiocb)
 {
        struct ffs_io_data *io_data = kiocb->private;
-       struct ffs_data *ffs = io_data->ffs;
+       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
        int value;
 
        ENTER();
 
-       if (likely(io_data && io_data->ep && io_data->req)) {
-               INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
-               queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
-               value = -EINPROGRESS;
-       } else {
+       spin_lock_irq(&epfile->ffs->eps_lock);
+
+       if (likely(io_data && io_data->ep && io_data->req))
+               value = usb_ep_dequeue(io_data->ep, io_data->req);
+       else
                value = -EINVAL;
-       }
+
+       spin_unlock_irq(&epfile->ffs->eps_lock);
 
        return value;
 }
index 27f00160332e2186327da464d7be327d82bc3170..3c4abb5a1c3fc6bdb86e749ab62cf256381c1f75 100644 (file)
@@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev)
        struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
        struct usb_hcd *hcd = histb->hcd;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct usb_hcd *shared_hcd = xhci->shared_hcd;
 
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        device_wakeup_disable(&dev->dev);
 
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
 
        xhci_histb_host_disable(histb);
        usb_put_hcd(hcd);
index 12eea73d9f20b00701e22da8debdcaa1d9283c92..94aca1b5ac8a228b6ecb690f84441f37c976fbdb 100644 (file)
@@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        status |= USB_PORT_STAT_SUSPEND;
        }
        if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
-               !DEV_SUPERSPEED_ANY(raw_port_status)) {
+               !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
                if ((raw_port_status & PORT_RESET) ||
                                !(raw_port_status & PORT_PE))
                        return 0xffffffff;
@@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        time_left = wait_for_completion_timeout(
                                        &bus_state->rexit_done[wIndex],
                                        msecs_to_jiffies(
-                                               XHCI_MAX_REXIT_TIMEOUT));
+                                               XHCI_MAX_REXIT_TIMEOUT_MS));
                        spin_lock_irqsave(&xhci->lock, flags);
 
                        if (time_left) {
@@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        } else {
                                int port_status = readl(port->addr);
                                xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
-                                               XHCI_MAX_REXIT_TIMEOUT,
+                                               XHCI_MAX_REXIT_TIMEOUT_MS,
                                                port_status);
                                status |= USB_PORT_STAT_SUSPEND;
                                clear_bit(wIndex, &bus_state->rexit_ports);
@@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        unsigned long flags;
        struct xhci_hub *rhub;
        struct xhci_port **ports;
+       u32 portsc_buf[USB_MAXCHILDREN];
+       bool wake_enabled;
 
        rhub = xhci_get_rhub(hcd);
        ports = rhub->ports;
        max_ports = rhub->num_ports;
        bus_state = &xhci->bus_state[hcd_index(hcd)];
+       wake_enabled = hcd->self.root_hub->do_remote_wakeup;
 
        spin_lock_irqsave(&xhci->lock, flags);
 
-       if (hcd->self.root_hub->do_remote_wakeup) {
+       if (wake_enabled) {
                if (bus_state->resuming_ports ||        /* USB2 */
                    bus_state->port_remote_wakeup) {    /* USB3 */
                        spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                        return -EBUSY;
                }
        }
-
-       port_index = max_ports;
+       /*
+        * Prepare ports for suspend, but don't write anything before all ports
+        * are checked and we know bus suspend can proceed
+        */
        bus_state->bus_suspended = 0;
+       port_index = max_ports;
        while (port_index--) {
-               /* suspend the port if the port is not suspended */
                u32 t1, t2;
-               int slot_id;
 
                t1 = readl(ports[port_index]->addr);
                t2 = xhci_port_state_to_neutral(t1);
+               portsc_buf[port_index] = 0;
 
-               if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
-                       xhci_dbg(xhci, "port %d not suspended\n", port_index);
-                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
-                                       port_index + 1);
-                       if (slot_id) {
+               /* Bail out if a USB3 port has a new device in link training */
+               if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+                       bus_state->bus_suspended = 0;
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+                       return -EBUSY;
+               }
+
+               /* suspend ports in U0, or bail out for new connect changes */
+               if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+                       if ((t1 & PORT_CSC) && wake_enabled) {
+                               bus_state->bus_suspended = 0;
                                spin_unlock_irqrestore(&xhci->lock, flags);
-                               xhci_stop_device(xhci, slot_id, 1);
-                               spin_lock_irqsave(&xhci->lock, flags);
+                               xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+                               return -EBUSY;
                        }
+                       xhci_dbg(xhci, "port %d not suspended\n", port_index);
                        t2 &= ~PORT_PLS_MASK;
                        t2 |= PORT_LINK_STROBE | XDEV_U3;
                        set_bit(port_index, &bus_state->bus_suspended);
@@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup) {
+               if (wake_enabled) {
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
@@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
 
                t1 = xhci_port_state_to_neutral(t1);
                if (t1 != t2)
-                       writel(t2, ports[port_index]->addr);
+                       portsc_buf[port_index] = t2;
+       }
+
+       /* write port settings, stopping and suspending ports if needed */
+       port_index = max_ports;
+       while (port_index--) {
+               if (!portsc_buf[port_index])
+                       continue;
+               if (test_bit(port_index, &bus_state->bus_suspended)) {
+                       int slot_id;
+
+                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+                                                           port_index + 1);
+                       if (slot_id) {
+                               spin_unlock_irqrestore(&xhci->lock, flags);
+                               xhci_stop_device(xhci, slot_id, 1);
+                               spin_lock_irqsave(&xhci->lock, flags);
+                       }
+               }
+               writel(portsc_buf[port_index], ports[port_index]->addr);
        }
        hcd->state = HC_STATE_SUSPENDED;
        bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
index 71d0d33c3286254b0327646720161df6ba5cc1e9..60987c787e44f457d918659140a0671ed7a717e8 100644 (file)
@@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev)
        struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
        struct usb_hcd  *hcd = mtk->hcd;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct usb_hcd  *shared_hcd = xhci->shared_hcd;
 
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        device_init_wakeup(&dev->dev, false);
 
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
        usb_put_hcd(hcd);
        xhci_mtk_sch_exit(mtk);
        xhci_mtk_clks_disable(mtk);
index 01c57055c0c5bf6b85b8fd45ce2d09c1a0ad3d15..a9515265db4dba4392cc1a834876a5546d8490b2 100644 (file)
@@ -248,6 +248,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
                xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
 
+       if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
+            pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
+            pdev->device == 0x9026)
+               xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
+
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                                "QUIRK: Resetting on resume");
@@ -380,6 +385,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
        if (xhci->shared_hcd) {
                usb_remove_hcd(xhci->shared_hcd);
                usb_put_hcd(xhci->shared_hcd);
+               xhci->shared_hcd = NULL;
        }
 
        /* Workaround for spurious wakeups at shutdown with HSW */
index 32b5574ad5c56403eb876a70b492f88182096903..ef09cb06212fd367da3593092a31dd5c32d95f58 100644 (file)
@@ -362,14 +362,16 @@ static int xhci_plat_remove(struct platform_device *dev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct clk *clk = xhci->clk;
        struct clk *reg_clk = xhci->reg_clk;
+       struct usb_hcd *shared_hcd = xhci->shared_hcd;
 
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        usb_phy_shutdown(hcd->usb_phy);
 
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
 
        clk_disable_unprepare(clk);
        clk_disable_unprepare(reg_clk);
index a8d92c90fb58755ad4359d94d4e52f50b949b2a0..65750582133f6a20eae8949e8a06c2c92c913881 100644 (file)
@@ -1521,6 +1521,35 @@ static void handle_device_notification(struct xhci_hcd *xhci,
                usb_wakeup_notification(udev->parent, udev->portnum);
 }
 
+/*
+ * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
+ * Controller.
+ * As per ThunderX2errata-129 USB 2 device may come up as USB 1
+ * If a connection to a USB 1 device is followed by another connection
+ * to a USB 2 device.
+ *
+ * Reset the PHY after the USB device is disconnected if device speed
+ * is less than HCD_USB3.
+ * Retry the reset sequence max of 4 times checking the PLL lock status.
+ *
+ */
+static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
+{
+       struct usb_hcd *hcd = xhci_to_hcd(xhci);
+       u32 pll_lock_check;
+       u32 retry_count = 4;
+
+       do {
+               /* Assert PHY reset */
+               writel(0x6F, hcd->regs + 0x1048);
+               udelay(10);
+               /* De-assert the PHY reset */
+               writel(0x7F, hcd->regs + 0x1048);
+               udelay(200);
+               pll_lock_check = readl(hcd->regs + 0x1070);
+       } while (!(pll_lock_check & 0x1) && --retry_count);
+}
+
 static void handle_port_status(struct xhci_hcd *xhci,
                union xhci_trb *event)
 {
@@ -1556,6 +1585,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
                goto cleanup;
        }
 
+       /* We might get interrupts after shared_hcd is removed */
+       if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
+               xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
+               bogus_port_status = true;
+               goto cleanup;
+       }
+
        hcd = port->rhub->hcd;
        bus_state = &xhci->bus_state[hcd_index(hcd)];
        hcd_portnum = port->hcd_portnum;
@@ -1639,7 +1675,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
         * RExit to a disconnect state).  If so, let the the driver know it's
         * out of the RExit state.
         */
-       if (!DEV_SUPERSPEED_ANY(portsc) &&
+       if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
                        test_and_clear_bit(hcd_portnum,
                                &bus_state->rexit_ports)) {
                complete(&bus_state->rexit_done[hcd_portnum]);
@@ -1647,8 +1683,12 @@ static void handle_port_status(struct xhci_hcd *xhci,
                goto cleanup;
        }
 
-       if (hcd->speed < HCD_USB3)
+       if (hcd->speed < HCD_USB3) {
                xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+               if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
+                   (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
+                       xhci_cavium_reset_phy_quirk(xhci);
+       }
 
 cleanup:
        /* Update event ring dequeue pointer before dropping the lock */
@@ -2266,6 +2306,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        goto cleanup;
                case COMP_RING_UNDERRUN:
                case COMP_RING_OVERRUN:
+               case COMP_STOPPED_LENGTH_INVALID:
                        goto cleanup;
                default:
                        xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
index 6b5db344de3011df7154f698333e97c5f62f8f69..938ff06c034959f445dc8ad764d2e8a97887c15b 100644 (file)
@@ -1303,6 +1303,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
 
        usb_remove_hcd(xhci->shared_hcd);
        usb_put_hcd(xhci->shared_hcd);
+       xhci->shared_hcd = NULL;
        usb_remove_hcd(tegra->hcd);
        usb_put_hcd(tegra->hcd);
 
index 0420eefa647a15cb5321dfa5fd95556a5a5432e5..c928dbbff8811206fd3f668564b3ffe627cdd2aa 100644 (file)
@@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd)
 
        /* Only halt host and free memory after both hcds are removed */
        if (!usb_hcd_is_primary_hcd(hcd)) {
-               /* usb core will free this hcd shortly, unset pointer */
-               xhci->shared_hcd = NULL;
                mutex_unlock(&xhci->mutex);
                return;
        }
index bf0b3692dc9a118d30664963e88d6757c7a5f31e..260b259b72bcb6abaf85db2b32c591a64b23dae5 100644 (file)
@@ -1680,7 +1680,7 @@ struct xhci_bus_state {
  * It can take up to 20 ms to transition from RExit to U0 on the
  * Intel Lynx Point LP xHCI host.
  */
-#define        XHCI_MAX_REXIT_TIMEOUT  (20 * 1000)
+#define        XHCI_MAX_REXIT_TIMEOUT_MS       20
 
 static inline unsigned int hcd_index(struct usb_hcd *hcd)
 {
@@ -1849,6 +1849,7 @@ struct xhci_hcd {
 #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
 #define XHCI_ZERO_64B_REGS     BIT_ULL(32)
 #define XHCI_DEFAULT_PM_RUNTIME_ALLOW  BIT_ULL(33)
+#define XHCI_RESET_PLL_ON_DISCONNECT   BIT_ULL(34)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index bd539f3058bcaa3d754e07bdb7034d4f2786e8a7..85b48c6ddc7e681e7aadb8ddb86d53dbffba85a9 100644 (file)
@@ -50,6 +50,7 @@ static const struct usb_device_id appledisplay_table[] = {
        { APPLEDISPLAY_DEVICE(0x9219) },
        { APPLEDISPLAY_DEVICE(0x921c) },
        { APPLEDISPLAY_DEVICE(0x921d) },
+       { APPLEDISPLAY_DEVICE(0x9222) },
        { APPLEDISPLAY_DEVICE(0x9236) },
 
        /* Terminating entry */
index 59970886690f1b6a3767b72ec33c3c2fe81fd61f..a7b44863d502e95cbb28a1f7ed2f2a17d7ba1043 100644 (file)
@@ -576,6 +576,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
 {
        signed long rtt2, timeout;
        long ret;
+       bool stalled = false;
        u64 rtt;
        u32 life, last_life;
 
@@ -609,12 +610,20 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
 
                life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
                if (timeout == 0 &&
-                   life == last_life && signal_pending(current))
+                   life == last_life && signal_pending(current)) {
+                       if (stalled)
                                break;
+                       __set_current_state(TASK_RUNNING);
+                       rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
+                       timeout = rtt2;
+                       stalled = true;
+                       continue;
+               }
 
                if (life != last_life) {
                        timeout = rtt2;
                        last_life = life;
+                       stalled = false;
                }
 
                timeout = schedule_timeout(timeout);
index fc281b738a9822a652f7d19bb60ae15acd7a7ebf..acc3a5536384cf65c6742d918cc2bd9d42305da3 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
 #include <linux/oom.h>
 #include <linux/compat.h>
 #include <linux/vmalloc.h>
+#include <linux/freezer.h>
 
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
@@ -1083,7 +1084,7 @@ static int de_thread(struct task_struct *tsk)
        while (sig->notify_count) {
                __set_current_state(TASK_KILLABLE);
                spin_unlock_irq(lock);
-               schedule();
+               freezable_schedule();
                if (unlikely(__fatal_signal_pending(tsk)))
                        goto killed;
                spin_lock_irq(lock);
@@ -1111,7 +1112,7 @@ static int de_thread(struct task_struct *tsk)
                        __set_current_state(TASK_KILLABLE);
                        write_unlock_irq(&tasklist_lock);
                        cgroup_threadgroup_change_end(tsk);
-                       schedule();
+                       freezable_schedule();
                        if (unlikely(__fatal_signal_pending(tsk)))
                                goto killed;
                }
index a83e1f632eb70eeda82581403f2916e7c7346f79..f01623aef2f77945514d4b2ef0045f0735e11851 100644 (file)
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
 
 void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
                      unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
 void can_free_echo_skb(struct net_device *dev, unsigned int idx);
 
index cb31683bbe154ab1f5b58319369aa7fe32da1c44..8268811a697e25398b6785b6d8904fc9dca46adf 100644 (file)
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
 int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+                               struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+                                        unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+                             struct sk_buff *skb);
 void can_rx_offload_reset(struct can_rx_offload *offload);
 void can_rx_offload_del(struct can_rx_offload *offload);
 void can_rx_offload_enable(struct can_rx_offload *offload);
index b7a99ce56bc9ad6cd4c8ff2c5681d6fbb781b875..a1be64c9940fb4ad4e2365f226419f050d6addba 100644 (file)
@@ -66,4 +66,7 @@
 /* Device needs a pause after every control message. */
 #define USB_QUIRK_DELAY_CTRL_MSG               BIT(13)
 
+/* Hub needs extra delay after resetting its port. */
+#define USB_QUIRK_HUB_SLOW_RESET               BIT(14)
+
 #endif /* __LINUX_USB_QUIRKS_H */
index 58c1ecf3d6489cda2cfd77a5f7de47abc7d90d3a..5467264771ec136f1b2f9b4ee538f847221f90d9 100644 (file)
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
 
 /* v4l2 request helper */
 
-void vb2_m2m_request_queue(struct media_request *req);
+void v4l2_m2m_request_queue(struct media_request *req);
 
 /* v4l2 ioctl helpers */
 
index de587948042a4ab6ce9e00dac021ce492a026621..1adefe42c0a689b839492d500327ef891c4c395c 100644 (file)
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
                            struct sockaddr_rxrpc *, struct key *);
 int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
                            enum rxrpc_call_completion *, u32 *);
-u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
index 573d5b901fb11886590a2672cf396af64476efeb..5b50fe4906d2ef95d5121a80109fdb1f130ead2b 100644 (file)
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
 enum rxrpc_propose_ack_trace {
        rxrpc_propose_ack_client_tx_end,
        rxrpc_propose_ack_input_data,
+       rxrpc_propose_ack_ping_for_check_life,
        rxrpc_propose_ack_ping_for_keepalive,
        rxrpc_propose_ack_ping_for_lost_ack,
        rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
 #define rxrpc_propose_ack_traces \
        EM(rxrpc_propose_ack_client_tx_end,     "ClTxEnd") \
        EM(rxrpc_propose_ack_input_data,        "DataIn ") \
+       EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
        EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
        EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
        EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
index 51b095898f4b5952178205b5c517c7e383066164..998983a6e6b712f76b503fc8b2950554d582659f 100644 (file)
@@ -50,6 +50,8 @@
 #ifndef __LINUX_V4L2_CONTROLS_H
 #define __LINUX_V4L2_CONTROLS_H
 
+#include <linux/types.h>
+
 /* Control classes */
 #define V4L2_CTRL_CLASS_USER           0x00980000      /* Old-style 'user' controls */
 #define V4L2_CTRL_CLASS_MPEG           0x00990000      /* MPEG-compression controls */
@@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence {
        __u8    profile_and_level_indication;
        __u8    progressive_sequence;
        __u8    chroma_format;
+       __u8    pad;
 };
 
 struct v4l2_mpeg2_picture {
@@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture {
        __u8    alternate_scan;
        __u8    repeat_first_field;
        __u8    progressive_frame;
+       __u8    pad;
 };
 
 struct v4l2_ctrl_mpeg2_slice_params {
@@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params {
 
        __u8    backward_ref_index;
        __u8    forward_ref_index;
+       __u8    pad;
 };
 
 struct v4l2_ctrl_mpeg2_quantization {
index b984806d7d7bb1f4665ffe6cecfa5849aecc8c22..7cab9a9869ace92900ec58aabffa175d2249296d 100644 (file)
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
        if (req->fw->size > PAGE_SIZE) {
                pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
                rc = -EINVAL;
+               goto out;
        }
        memcpy(buf, req->fw->data, req->fw->size);
 
index 9f481cfdf77dace2fa594585fde5079d46474c66..e8090f099eb805626ffaa1445d3d0f8254f6c9a7 100644 (file)
@@ -352,19 +352,21 @@ out:
  */
 int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
 {
+       static const size_t tvlv_padding = sizeof(__be32);
        struct batadv_elp_packet *elp_packet;
        unsigned char *elp_buff;
        u32 random_seqno;
        size_t size;
        int res = -ENOMEM;
 
-       size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
+       size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
        hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
        if (!hard_iface->bat_v.elp_skb)
                goto out;
 
        skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
-       elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+       elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
+                               BATADV_ELP_HLEN + tvlv_padding);
        elp_packet = (struct batadv_elp_packet *)elp_buff;
 
        elp_packet->packet_type = BATADV_ELP;
index 0fddc17106bd8a0e3f064fee9adba7c226f34682..5b71a289d04fc80de6c20e7a24d621727c77825a 100644 (file)
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
        kfree(entry);
 
        packet = (struct batadv_frag_packet *)skb_out->data;
-       size = ntohs(packet->total_size);
+       size = ntohs(packet->total_size) + hdr_size;
 
        /* Make room for the rest of the fragments. */
        if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
index 2920e06a540329ffb8f1286c65bc5acc6f53afad..04c19a37e500e872d564497c9410509c0df61fce 100644 (file)
@@ -102,12 +102,18 @@ struct br_tunnel_info {
        struct metadata_dst     *tunnel_dst;
 };
 
+/* private vlan flags */
+enum {
+       BR_VLFLAG_PER_PORT_STATS = BIT(0),
+};
+
 /**
  * struct net_bridge_vlan - per-vlan entry
  *
  * @vnode: rhashtable member
  * @vid: VLAN id
  * @flags: bridge vlan flags
+ * @priv_flags: private (in-kernel) bridge vlan flags
  * @stats: per-cpu VLAN statistics
  * @br: if MASTER flag set, this points to a bridge struct
  * @port: if MASTER flag unset, this points to a port struct
@@ -127,6 +133,7 @@ struct net_bridge_vlan {
        struct rhash_head               tnode;
        u16                             vid;
        u16                             flags;
+       u16                             priv_flags;
        struct br_vlan_stats __percpu   *stats;
        union {
                struct net_bridge       *br;
index 8c9297a019475fe68ee110a85cda1647deaf9075..e84be08b82854916d91bc0195ad2a9ab7dae8564 100644 (file)
@@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
        v = container_of(rcu, struct net_bridge_vlan, rcu);
        WARN_ON(br_vlan_is_master(v));
        /* if we had per-port stats configured then free them here */
-       if (v->brvlan->stats != v->stats)
+       if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
                free_percpu(v->stats);
        v->stats = NULL;
        kfree(v);
@@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
                                err = -ENOMEM;
                                goto out_filt;
                        }
+                       v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
                } else {
                        v->stats = masterv->stats;
                }
index 1051eee8258184f33d15a6142ee8b387839c9adc..3aab7664933fdc67770cdbc8b80a231b41dc2555 100644 (file)
@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        } else
                ifindex = ro->ifindex;
 
-       if (ro->fd_frames) {
+       dev = dev_get_by_index(sock_net(sk), ifindex);
+       if (!dev)
+               return -ENXIO;
+
+       err = -EINVAL;
+       if (ro->fd_frames && dev->mtu == CANFD_MTU) {
                if (unlikely(size != CANFD_MTU && size != CAN_MTU))
-                       return -EINVAL;
+                       goto put_dev;
        } else {
                if (unlikely(size != CAN_MTU))
-                       return -EINVAL;
+                       goto put_dev;
        }
 
-       dev = dev_get_by_index(sock_net(sk), ifindex);
-       if (!dev)
-               return -ENXIO;
-
        skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
                                  msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
index 0ffcbdd55fa9ee545c807f2ed3fc178830e3075a..066aa902d85c3e3f5ad83244b1506a446bebcb88 100644 (file)
@@ -5655,6 +5655,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+
+       /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+       skb->pkt_type = PACKET_HOST;
+
        skb->encapsulation = 0;
        skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
index dde671e978298b5b0239f975fe134b232db9049b..c248e0dccbe17afa397910b0d68260daf2a9eb3f 100644 (file)
@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 
        iph->version    =       4;
        iph->ihl        =       sizeof(struct iphdr) >> 2;
-       iph->frag_off   =       df;
+       iph->frag_off   =       ip_mtu_locked(&rt->dst) ? 0 : df;
        iph->protocol   =       proto;
        iph->tos        =       tos;
        iph->daddr      =       dst;
index 2a7423c394560c0bc70d6f0398781a0b35fa9fa0..059f0531f7c1c86133afcab0f8e7388c60eeaf58 100644 (file)
@@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb)
        if (rt) {
                rcu_read_lock();
                if (rt->rt6i_flags & RTF_CACHE) {
-                       if (dst_hold_safe(&rt->dst))
-                               rt6_remove_exception_rt(rt);
+                       rt6_remove_exception_rt(rt);
                } else {
                        struct fib6_info *from;
                        struct fib6_node *fn;
@@ -2360,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
+       int oif = sk->sk_bound_dev_if;
        struct dst_entry *dst;
 
-       ip6_update_pmtu(skb, sock_net(sk), mtu,
-                       sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
+       if (!oif && skb->dev)
+               oif = l3mdev_master_ifindex(skb->dev);
+
+       ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
 
        dst = __sk_dst_get(sk);
        if (!dst || !dst->obsolete ||
@@ -3214,8 +3216,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
        if (cfg->fc_flags & RTF_GATEWAY &&
            !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
                goto out;
-       if (dst_hold_safe(&rt->dst))
-               rc = rt6_remove_exception_rt(rt);
+
+       rc = rt6_remove_exception_rt(rt);
 out:
        return rc;
 }
index 82cdf9020b53921c1ed86cd6122ebe6007ea263c..26f1d435696a628aca844edb2a88f8b793839d91 100644 (file)
@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
                        goto err_sock;
        }
 
-       sk = sock->sk;
-
-       sock_hold(sk);
-       tunnel->sock = sk;
        tunnel->l2tp_net = net;
-
        pn = l2tp_pernet(net);
 
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
        list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
 
+       sk = sock->sk;
+       sock_hold(sk);
+       tunnel->sock = sk;
+
        if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
                struct udp_tunnel_sock_cfg udp_cfg = {
                        .sk_user_data = tunnel,
index 64362d078da846daead402d0f0a5b88bae04b223..a2522f9d71e266d338f1c0d223ce5bcb670fd00f 100644 (file)
@@ -375,16 +375,35 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * getting ACKs from the server.  Returns a number representing the life state
  * which can be compared to that returned by a previous call.
  *
- * If this is a client call, ping ACKs will be sent to the server to find out
- * whether it's still responsive and whether the call is still alive on the
- * server.
+ * If the life state stalls, rxrpc_kernel_probe_life() should be called and
+ * then 2RTT waited.
  */
-u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call)
+u32 rxrpc_kernel_check_life(const struct socket *sock,
+                           const struct rxrpc_call *call)
 {
        return call->acks_latest;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
+/**
+ * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
+ * @sock: The socket the call is on
+ * @call: The call to check
+ *
+ * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
+ * find out whether a call is still alive by pinging it.  This should cause the
+ * life state to be bumped in about 2*RTT.
+ *
+ * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
+ */
+void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
+{
+       rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+                         rxrpc_propose_ack_ping_for_check_life);
+       rxrpc_send_ack_packet(call, true, NULL);
+}
+EXPORT_SYMBOL(rxrpc_kernel_probe_life);
+
 /**
  * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
  * @sock: The socket the call is on
index da3dd0f68cc24d2c2b1828b43afc323a5786a3f8..2b372a06b432aec5b48209a3ac0e9424c9ae0b10 100644 (file)
@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                        goto out_release;
                }
        } else {
-               return err;
+               ret = err;
+               goto out_free;
        }
 
        p = to_pedit(*a);
index 4b1af706896c07e5a0fe6d542dfcd530acdcf8f5..25a7cf6d380fd1ef5610a43a06dea488121b8206 100644 (file)
@@ -469,22 +469,29 @@ begin:
                goto begin;
        }
        prefetch(&skb->end);
-       f->credit -= qdisc_pkt_len(skb);
+       plen = qdisc_pkt_len(skb);
+       f->credit -= plen;
 
-       if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
+       if (!q->rate_enable)
                goto out;
 
        rate = q->flow_max_rate;
-       if (skb->sk)
-               rate = min(skb->sk->sk_pacing_rate, rate);
-
-       if (rate <= q->low_rate_threshold) {
-               f->credit = 0;
-               plen = qdisc_pkt_len(skb);
-       } else {
-               plen = max(qdisc_pkt_len(skb), q->quantum);
-               if (f->credit > 0)
-                       goto out;
+
+       /* If EDT time was provided for this skb, we need to
+        * update f->time_next_packet only if this qdisc enforces
+        * a flow max rate.
+        */
+       if (!skb->tstamp) {
+               if (skb->sk)
+                       rate = min(skb->sk->sk_pacing_rate, rate);
+
+               if (rate <= q->low_rate_threshold) {
+                       f->credit = 0;
+               } else {
+                       plen = max(plen, q->quantum);
+                       if (f->credit > 0)
+                               goto out;
+               }
        }
        if (rate != ~0UL) {
                u64 len = (u64)plen * NSEC_PER_SEC;
index 593826e11a5376bd1301b2cf19c61c377653c653..334fcc617ef2737acd0a84f7921e28a88771756f 100644 (file)
@@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
        struct socket *sock = file->private_data;
 
        if (unlikely(!sock->ops->splice_read))
-               return -EINVAL;
+               return generic_file_splice_read(file, ppos, pipe, len, flags);
 
        return sock->ops->splice_read(sock, ppos, pipe, len, flags);
 }
index 2830709957bddeb13adf0f352abb9aaacba3ec55..c138d68e8a695fde8fb1464f03a539cef5a03bd4 100644 (file)
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
 
        /* Apply trial address if we just left trial period */
        if (!trial && !self) {
-               tipc_net_finalize(net, tn->trial_addr);
+               tipc_sched_net_finalize(net, tn->trial_addr);
+               msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
        }
 
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
                goto exit;
        }
 
-       /* Trial period over ? */
-       if (!time_before(jiffies, tn->addr_trial_end)) {
-               /* Did we just leave it ? */
-               if (!tipc_own_addr(net))
-                       tipc_net_finalize(net, tn->trial_addr);
-
-               msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
-               msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
+       /* Did we just leave trial period ? */
+       if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
+               mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
+               spin_unlock_bh(&d->lock);
+               tipc_sched_net_finalize(net, tn->trial_addr);
+               return;
        }
 
        /* Adjust timeout interval according to discovery phase */
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
                        d->timer_intv = TIPC_DISC_SLOW;
                else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
                        d->timer_intv = TIPC_DISC_FAST;
+               msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+               msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
        }
 
        mod_timer(&d->timer, jiffies + d->timer_intv);
index 62199cf5a56c04db99af54dad9fc2564df8d6b05..f076edb74338247f0bad99cfaa5d23e5b14730ab 100644 (file)
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
+struct tipc_net_work {
+       struct work_struct work;
+       struct net *net;
+       u32 addr;
+};
+
+static void tipc_net_finalize(struct net *net, u32 addr);
+
 int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
 {
        if (tipc_own_id(net)) {
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
        return 0;
 }
 
-void tipc_net_finalize(struct net *net, u32 addr)
+static void tipc_net_finalize(struct net *net, u32 addr)
 {
        struct tipc_net *tn = tipc_net(net);
 
-       if (!cmpxchg(&tn->node_addr, 0, addr)) {
-               tipc_set_node_addr(net, addr);
-               tipc_named_reinit(net);
-               tipc_sk_reinit(net);
-               tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
-                                    TIPC_CLUSTER_SCOPE, 0, addr);
-       }
+       if (cmpxchg(&tn->node_addr, 0, addr))
+               return;
+       tipc_set_node_addr(net, addr);
+       tipc_named_reinit(net);
+       tipc_sk_reinit(net);
+       tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+                            TIPC_CLUSTER_SCOPE, 0, addr);
+}
+
+static void tipc_net_finalize_work(struct work_struct *work)
+{
+       struct tipc_net_work *fwork;
+
+       fwork = container_of(work, struct tipc_net_work, work);
+       tipc_net_finalize(fwork->net, fwork->addr);
+       kfree(fwork);
+}
+
+void tipc_sched_net_finalize(struct net *net, u32 addr)
+{
+       struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
+
+       if (!fwork)
+               return;
+       INIT_WORK(&fwork->work, tipc_net_finalize_work);
+       fwork->net = net;
+       fwork->addr = addr;
+       schedule_work(&fwork->work);
 }
 
 void tipc_net_stop(struct net *net)
index 09ad02b50bb1ba1e9798c41810c843d74b5e0185..b7f2e364eb99e774854ea22837107991619ef00a 100644 (file)
@@ -42,7 +42,7 @@
 extern const struct nla_policy tipc_nl_net_policy[];
 
 int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
-void tipc_net_finalize(struct net *net, u32 addr);
+void tipc_sched_net_finalize(struct net *net, u32 addr);
 void tipc_net_stop(struct net *net);
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
index 636e6131769d83f9b5f7013a32210ec809835fb2..b57b1be7252baef2f6410710e225864877c08ac1 100644 (file)
@@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
 /**
  * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
  * @m: descriptor for message info
- * @msg: received message header
+ * @skb: received message buffer
  * @tsk: TIPC port associated with message
  *
  * Note: Ancillary data is not captured if not requested by receiver.
  *
  * Returns 0 if successful, otherwise errno
  */
-static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
+static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
                                 struct tipc_sock *tsk)
 {
+       struct tipc_msg *msg;
        u32 anc_data[3];
        u32 err;
        u32 dest_type;
@@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
 
        if (likely(m->msg_controllen == 0))
                return 0;
+       msg = buf_msg(skb);
 
        /* Optionally capture errored message object(s) */
        err = msg ? msg_errcode(msg) : 0;
@@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
                if (res)
                        return res;
                if (anc_data[1]) {
+                       if (skb_linearize(skb))
+                               return -ENOMEM;
+                       msg = buf_msg(skb);
                        res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
                                       msg_data(msg));
                        if (res)
@@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
 
        /* Collect msg meta data, including error code and rejected data */
        tipc_sk_set_orig_addr(m, skb);
-       rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+       rc = tipc_sk_anc_data_recv(m, skb, tsk);
        if (unlikely(rc))
                goto exit;
+       hdr = buf_msg(skb);
 
        /* Capture data if non-error msg, otherwise just set return value */
        if (likely(!err)) {
@@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
                /* Collect msg meta data, incl. error code and rejected data */
                if (!copied) {
                        tipc_sk_set_orig_addr(m, skb);
-                       rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+                       rc = tipc_sk_anc_data_recv(m, skb, tsk);
                        if (rc)
                                break;
+                       hdr = buf_msg(skb);
                }
 
                /* Copy data if msg ok, otherwise return error/partial data */
index f8d4a419f3af957a2dd53ba8cf1673da607ec7e0..467039b342b511c7c4fb95f0d7c316f7715d7c70 100644 (file)
@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        runtime->oss.channels = params_channels(params);
        runtime->oss.rate = params_rate(params);
 
-       vfree(runtime->oss.buffer);
-       runtime->oss.buffer = vmalloc(runtime->oss.period_bytes);
+       kvfree(runtime->oss.buffer);
+       runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL);
        if (!runtime->oss.buffer) {
                err = -ENOMEM;
                goto failure;
@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime;
        runtime = substream->runtime;
-       vfree(runtime->oss.buffer);
+       kvfree(runtime->oss.buffer);
        runtime->oss.buffer = NULL;
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
        snd_pcm_oss_plugin_clear(substream);
index 141c5f3a957501e0901102ce5bd519cbca73892e..31cb2acf8afcc5988d47cdb2332c005c0558d18a 100644 (file)
@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
                return -ENXIO;
        size /= 8;
        if (plugin->buf_frames < frames) {
-               vfree(plugin->buf);
-               plugin->buf = vmalloc(size);
+               kvfree(plugin->buf);
+               plugin->buf = kvzalloc(size, GFP_KERNEL);
                plugin->buf_frames = frames;
        }
        if (!plugin->buf) {
@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
        if (plugin->private_free)
                plugin->private_free(plugin);
        kfree(plugin->buf_channels);
-       vfree(plugin->buf);
+       kvfree(plugin->buf);
        kfree(plugin);
        return 0;
 }
index 0a24037184c33e2cd53554d7cf9e4dd6586dd19b..0a567634e5faddafd470220c7a9ebaff88ce8cef 100644 (file)
@@ -1177,6 +1177,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
+       SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
@@ -8413,7 +8414,7 @@ static void ca0132_free(struct hda_codec *codec)
 
        snd_hda_power_down(codec);
        if (spec->mem_base)
-               iounmap(spec->mem_base);
+               pci_iounmap(codec->bus->pci, spec->mem_base);
        kfree(spec->spec_init_verbs);
        kfree(codec->spec);
 }
@@ -8488,7 +8489,7 @@ static void ca0132_config(struct hda_codec *codec)
                break;
        case QUIRK_AE5:
                codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
-               snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+               snd_hda_apply_pincfgs(codec, ae5_pincfgs);
                break;
        }
 
index fa61674a560504224b9ee8ef35da2d4ddf85c6fe..970bc44a378b8d32f1f8a43045d224bc34d4f222 100644 (file)
@@ -6481,6 +6481,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
        SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
index 1dd5f4fcffd53f375ba00479bfed37d867399c4a..db66a952c173958395f9134c1fa029fbbd6d3950 100644 (file)
@@ -129,7 +129,7 @@ WARNINGS += $(call cc-supports,-Wno-pointer-sign)
 WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
 WARNINGS += -Wshadow
 
-CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
+override CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
                -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
 
 UTIL_OBJS =  utils/helpers/amd.o utils/helpers/msr.o \
@@ -156,12 +156,12 @@ LIB_SRC =         lib/cpufreq.c lib/cpupower.c lib/cpuidle.c
 LIB_OBJS =     lib/cpufreq.o lib/cpupower.o lib/cpuidle.o
 LIB_OBJS :=    $(addprefix $(OUTPUT),$(LIB_OBJS))
 
-CFLAGS +=      -pipe
+override CFLAGS +=     -pipe
 
 ifeq ($(strip $(NLS)),true)
        INSTALL_NLS += install-gmo
        COMPILE_NLS += create-gmo
-       CFLAGS += -DNLS
+       override CFLAGS += -DNLS
 endif
 
 ifeq ($(strip $(CPUFREQ_BENCH)),true)
@@ -175,7 +175,7 @@ ifeq ($(strip $(STATIC)),true)
         UTIL_SRC += $(LIB_SRC)
 endif
 
-CFLAGS += $(WARNINGS)
+override CFLAGS += $(WARNINGS)
 
 ifeq ($(strip $(V)),false)
        QUIET=@
@@ -188,10 +188,10 @@ export QUIET ECHO
 
 # if DEBUG is enabled, then we do not strip or optimize
 ifeq ($(strip $(DEBUG)),true)
-       CFLAGS += -O1 -g -DDEBUG
+       override CFLAGS += -O1 -g -DDEBUG
        STRIPCMD = /bin/true -Since_we_are_debugging
 else
-       CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
+       override CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
        STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
 endif
 
index d79ab161cc75f91ab6c7c1f0344387f14216663f..f68b4bc5527397f285cb1b7bedcf5347ebca52cf 100644 (file)
@@ -9,7 +9,7 @@ endif
 ifeq ($(strip $(STATIC)),true)
 LIBS = -L../ -L$(OUTPUT) -lm
 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
-       $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o
+       $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
 else
 LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
index 59af84b8ef455dfb33053948ec0fdef24a70f7e9..b1b6c43644e79be755cc867fb91717ecd1e631c3 100644 (file)
@@ -13,10 +13,10 @@ INSTALL = /usr/bin/install
 default: all
 
 $(OUTPUT)centrino-decode: ../i386/centrino-decode.c
-       $(CC) $(CFLAGS) -o $@ $<
+       $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
 
 $(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
-       $(CC) $(CFLAGS) -o $@ $<
+       $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
 
 all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
 
index 1b993fe1ce2372a5e6fed1d057b6b2a1dc64d5d6..0c0f3e3f0d8038e138077b40d428faff6083cff8 100644 (file)
@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
 
        snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
                         cpu, fname);
-       return sysfs_read_file(path, buf, buflen);
+       return cpupower_read_sysfs(path, buf, buflen);
 }
 
 /* helper function to write a new value to a /sys file */
index 9bd4c7655fdb2a4942d6aa0012174f232e25ee4f..852d25462388c2d61b67bf5c4ca162d591b16281 100644 (file)
@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
 
        snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
 
-       return sysfs_read_file(path, buf, buflen);
+       return cpupower_read_sysfs(path, buf, buflen);
 }
 
 
index 9c395ec924def2538e973eab7724472eea34cb8d..9711d628b0f440151e239d1986bdce4fb8be59ef 100644 (file)
@@ -15,7 +15,7 @@
 #include "cpupower.h"
 #include "cpupower_intern.h"
 
-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
+unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
 {
        int fd;
        ssize_t numread;
@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
 
        snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
                         cpu, fname);
-       if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
+       if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
                return -1;
        *result = strtol(linebuf, &endp, 0);
        if (endp == linebuf || errno == ERANGE)
index 92affdfbe4174e13f5a5fecded202d597b256dbc..4887c76d23f868c060364dab8795702570a91e6f 100644 (file)
@@ -3,4 +3,4 @@
 #define MAX_LINE_LEN 4096
 #define SYSFS_PATH_MAX 255
 
-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
+unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);
index 87a04a8a5945c21222a21c103e5ed88bff0f5c67..7607ba3e3cbe4eba89dfe5a35badae9182b974a9 100755 (executable)
@@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command):
     (rawout, serr) = proc.communicate()
 
     if proc.returncode != 0 and len(serr) > 0:
-        foutput = serr.decode("utf-8")
+        foutput = serr.decode("utf-8", errors="ignore")
     else:
-        foutput = rawout.decode("utf-8")
+        foutput = rawout.decode("utf-8", errors="ignore")
 
     proc.stdout.close()
     proc.stderr.close()
@@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
                   file=sys.stderr)
             print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
                   file=sys.stderr)
+            print("returncode {}; expected {}".format(proc.returncode,
+                                                      exit_codes))
             print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
             print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
             print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
@@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx):
         print('-----> execute stage')
     pm.call_pre_execute()
     (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
-    exit_code = p.returncode
+    if p:
+        exit_code = p.returncode
+    else:
+        exit_code = None
+
     pm.call_post_execute()
 
-    if (exit_code != int(tidx["expExitCode"])):
+    if (exit_code is None or exit_code != int(tidx["expExitCode"])):
         result = False
-        print("exit:", exit_code, int(tidx["expExitCode"]))
+        print("exit: {!r}".format(exit_code))
+        print("exit: {}".format(int(tidx["expExitCode"])))
+        #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
         print(procout)
     else:
         if args.verbose > 0: