Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
+Antoine Tenart <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Archit Taneja <archit@ti.com>
Arnaud Patard <arnaud.patard@rtp-net.org>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
+Boris Brezillon <boris.brezillon@free-electrons.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Christoph Hellwig <hch@lst.de>
Linas Vepstas <linas@austin.ibm.com>
Mark Brown <broonie@sirena.org.uk>
Matthieu CASTET <castet.matthieu@free.fr>
+Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
+Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
N: Mauro Carvalho Chehab
E: m.chehab@samsung.org
+E: mchehab@osg.samsung.com
E: mchehab@infradead.org
D: Media subsystem (V4L/DVB) drivers and core
D: EDAC drivers and EDAC 3.0 core rework
What: /config/usb-gadget/gadget/functions/uvc.name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: UVC function directory
streaming_maxburst - 0..15 (ss only)
What: /config/usb-gadget/gadget/functions/uvc.name/control
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Control descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/class
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Super speed control class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Full speed control class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Terminal descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Output terminal descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default output terminal descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Camera terminal descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default camera terminal descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/control/processing
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Processing unit descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default processing unit descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/control/header
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Control header descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific control header descriptors
dwClockFrequency
bcdUVC
What: /config/usb-gadget/gadget/functions/uvc.name/streaming
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Streaming descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Super speed streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: High speed streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Full speed streaming class descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Color matching descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Default color matching descriptors
All attributes read only:
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: MJPEG format descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific MJPEG format descriptors
All attributes read only,
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific MJPEG frame descriptors
dwFrameInterval - indicates how frame interval can be
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Uncompressed format descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific uncompressed format descriptors
bmaControls - this format's data for bmaControls in
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific uncompressed frame descriptors
dwFrameInterval - indicates how frame interval can be
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Streaming header descriptors
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
Date: Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
Description: Specific streaming header descriptors
All attributes read only:
-What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
+What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
Date: March 2014
KernelVersion: 3.15
Contact: Matt Ranostay <mranostay@gmail.com>
-Analog Device ADV7511(W)/13 HDMI Encoders
+Analog Device ADV7511(W)/13/33 HDMI Encoders
-----------------------------------------
-The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP.
+S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
+the others support RGB interface.
Required properties:
-- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- compatible: Should be one of:
+ "adi,adv7511"
+ "adi,adv7511w"
+ "adi,adv7513"
+ "adi,adv7533"
+
- reg: I2C slave address
The ADV7511 supports a large number of input data formats that differ by their
- adi,input-justification: The input bit justification ("left", "evenly",
"right").
+The following properties are required for ADV7533:
+
+- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
+ be one of 1, 2, 3 or 4.
+
Optional properties:
- interrupts: Specifier for the ADV7511 interrupt
- adi,embedded-sync: The input uses synchronization signals embedded in the
data stream (similar to BT.656). Defaults to separate H/V synchronization
signals.
+- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
+ generator. The chip will rely on the sync signals in the DSI data lanes,
+ rather than generate its own timings for HDMI output.
Required nodes:
The ADV7511 has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-- Video port 0 for the RGB or YUV input
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+ remote endpoint phandle should be a reference to a valid mipi_dsi_host device
+ node.
- Video port 1 for the HDMI output
platform specific such as:
* "samsung,exynos5-dp"
* "rockchip,rk3288-dp"
+ * "rockchip,rk3399-edp"
-reg:
physical base address of the controller and length
of memory mapped region.
--- /dev/null
+Toshiba TC358767 eDP bridge bindings
+
+Required properties:
+ - compatible: "toshiba,tc358767"
+ - reg: i2c address of the bridge, 0x68 or 0x0f, depending on bootstrap pins
+ - clock-names: should be "ref"
+ - clocks: OF device-tree clock specification for refclk input. The reference
+ clock rate must be 13 MHz, 19.2 MHz, 26 MHz, or 38.4 MHz.
+
+Optional properties:
+ - shutdown-gpios: OF device-tree gpio specification for SD pin
+ (active high shutdown input)
+ - reset-gpios: OF device-tree gpio specification for RSTX pin
+ (active low system reset)
+ - ports: the ports node can contain video interface port nodes to connect
+ to a DPI/DSI source and to an eDP/DP sink according to [1][2]:
+ - port@0: DSI input port
+ - port@1: DPI input port
+ - port@2: eDP/DP output port
+
+[1]: Documentation/devicetree/bindings/graph.txt
+[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ edp-bridge@68 {
+ compatible = "toshiba,tc358767";
+ reg = <0x68>;
+ shutdown-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ clock-names = "ref";
+ clocks = <&edp_refclk>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ bridge_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
- clock-names: Should be "dcu" and "pix"
See ../clocks/clock-bindings.txt for details.
- big-endian Boolean property, LS1021A DCU registers are big-endian.
-- fsl,panel: The phandle to panel node.
+- port Video port for the panel output
Optional properties:
- fsl,tcon: The phandle to the timing controller node.
clocks = <&platform_clk 0>, <&platform_clk 0>;
clock-names = "dcu", "pix";
big-endian;
- fsl,panel = <&panel>;
fsl,tcon = <&tcon>;
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&panel_out>;
+ };
+ };
};
be 0 or 1, since we have 2 DSI controllers at most for now.
- interrupts: The interrupt signal from the DSI block.
- power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
- See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clocks: Phandles to device clocks.
- clock-names: the following clocks are required:
* "mdp_core_clk"
* "iface_clk"
* "core_clk"
For DSIv2, we need an additional clock:
* "src_clk"
+- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
+- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
+ by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node
- vddio-supply: phandle to vdd-io regulator device node
- vdda-supply: phandle to vdda regulator device node
-- qcom,dsi-phy: phandle to DSI PHY device node
+- phys: phandle to DSI PHY device node
+- phy-names: the name of the corresponding PHY device
- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
+- ports: Contains 2 DSI controller ports as child nodes. Each port contains
+ an endpoint subnode as defined in [2] and [3].
Optional properties:
- panel@0: Node of panel connected to this DSI controller.
- See files in Documentation/devicetree/bindings/display/panel/ for each supported
- panel.
+ See files in [4] for each supported panel.
- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
driving a panel which needs 2 DSI links.
- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
- pinctrl-names: the pin control state names; should contain "default"
- pinctrl-0: the default pinctrl state (active)
- pinctrl-n: the "sleep" pinctrl state
-- port: DSI controller output port, containing one endpoint subnode.
+- ports: contains DSI controller input and output ports as children, each
+ containing one endpoint subnode.
DSI Endpoint properties:
- - remote-endpoint: set to phandle of the connected panel's endpoint.
- See Documentation/devicetree/bindings/graph.txt for device graph info.
- - qcom,data-lane-map: this describes how the logical DSI lanes are mapped
- to the physical lanes on the given platform. The value contained in
- index n describes what logical data lane is mapped to the physical data
- lane n (DATAn, where n lies between 0 and 3).
+ - remote-endpoint: For port@0, set to phandle of the connected panel/bridge's
+ input endpoint. For port@1, set to the MDP interface output. See [2] for
+ device graph info.
+
+ - data-lanes: this describes how the physical DSI data lanes are mapped
+ to the logical lanes on the given platform. The value contained in
+ index n describes what physical lane is mapped to the logical lane n
+ (DATAn, where n lies between 0 and 3). The clock lane position is fixed
+ and can't be changed. Hence, they aren't a part of the DT bindings. See
+ [3] for more info on the data-lanes property.
For example:
- qcom,data-lane-map = <3 0 1 2>;
+ data-lanes = <3 0 1 2>;
- The above mapping describes that the logical data lane DATA3 is mapped to
- the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1
- to phys DATA2 and logic DATA2 to phys DATA3.
+ The above mapping describes that the logical data lane DATA0 is mapped to
+ the physical data lane DATA3, logical DATA1 to physical DATA0, logic DATA2
+ to phys DATA1 and logic DATA3 to phys DATA2.
There are only a limited number of physical to logical mappings possible:
-
- "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3;
- "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
- "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3;
- "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3;
- "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3;
- "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3;
- "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3;
- "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3;
+ <0 1 2 3>
+ <1 2 3 0>
+ <2 3 0 1>
+ <3 0 1 2>
+ <0 3 2 1>
+ <1 0 3 2>
+ <2 1 0 3>
+ <3 2 1 0>
DSI PHY:
Required properties:
* "dsi_pll"
* "dsi_phy"
* "dsi_phy_regulator"
+- clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating
+ 2 clocks: A byte clock (index 0), and a pixel clock (index 1).
- qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should
be 0 or 1, since we have 2 DSI PHYs at most for now.
- power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
- See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface_clk"
- vddio-supply: phandle to vdd-io regulator device node
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted.
+[1] Documentation/devicetree/bindings/clocks/clock-bindings.txt
+[2] Documentation/devicetree/bindings/graph.txt
+[3] Documentation/devicetree/bindings/media/video-interfaces.txt
+[4] Documentation/devicetree/bindings/display/panel/
+
Example:
- mdss_dsi0: qcom,mdss_dsi@fd922800 {
+ dsi0: dsi@fd922800 {
compatible = "qcom,mdss-dsi-ctrl";
qcom,dsi-host-index = <0>;
- interrupt-parent = <&mdss_mdp>;
+ interrupt-parent = <&mdp>;
interrupts = <4 0>;
reg-names = "dsi_ctrl";
reg = <0xfd922800 0x200>;
<&mmcc MDSS_AHB_CLK>,
<&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_PCLK0_CLK>;
+
+ assigned-clocks =
+ <&mmcc BYTE0_CLK_SRC>,
+ <&mmcc PCLK0_CLK_SRC>;
+ assigned-clock-parents =
+ <&dsi_phy0 0>,
+ <&dsi_phy0 1>;
+
vdda-supply = <&pma8084_l2>;
vdd-supply = <&pma8084_l22>;
vddio-supply = <&pma8084_l12>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ phys = <&dsi_phy0>;
+ phy-names ="dsi-phy";
qcom,dual-dsi-mode;
qcom,master-dsi;
qcom,sync-dual-dsi;
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&mdss_dsi_active>;
- pinctrl-1 = <&mdss_dsi_suspend>;
+ pinctrl-0 = <&dsi_active>;
+ pinctrl-1 = <&dsi_suspend>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&mdp_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
panel: panel@0 {
compatible = "sharp,lq101r1sx01";
};
};
};
-
- port {
- dsi0_out: endpoint {
- remote-endpoint = <&panel_in>;
- lanes = <0 1 2 3>;
- };
- };
};
- mdss_dsi_phy0: qcom,mdss_dsi_phy@fd922a00 {
+ dsi_phy0: dsi-phy@fd922a00 {
compatible = "qcom,dsi-phy-28nm-hpm";
qcom,dsi-phy-index = <0>;
reg-names =
<0xfd922d80 0x7b>;
clock-names = "iface_clk";
clocks = <&mmcc MDSS_AHB_CLK>;
+ #clock-cells = <1>;
vddio-supply = <&pma8084_l12>;
qcom,dsi-phy-regulator-ldo-mode;
+++ /dev/null
-Qualcomm adreno/snapdragon display controller
-
-Required properties:
-- compatible:
- * "qcom,mdp4" - mdp4
- * "qcom,mdp5" - mdp5
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The interrupt signal from the display controller.
-- connectors: array of phandles for output device(s)
-- clocks: device clocks
- See ../clocks/clock-bindings.txt for details.
-- clock-names: the following clocks are required.
- For MDP4:
- * "core_clk"
- * "iface_clk"
- * "lut_clk"
- * "src_clk"
- * "hdmi_clk"
- * "mdp_clk"
- For MDP5:
- * "bus_clk"
- * "iface_clk"
- * "core_clk_src"
- * "core_clk"
- * "lut_clk" (some MDP5 versions may not need this)
- * "vsync_clk"
-
-Optional properties:
-- gpus: phandle for gpu device
-- clock-names: the following clocks are optional:
- * "lut_clk"
-
-Example:
-
-/ {
- ...
-
- mdp: qcom,mdp@5100000 {
- compatible = "qcom,mdp4";
- reg = <0x05100000 0xf0000>;
- interrupts = <GIC_SPI 75 0>;
- connectors = <&hdmi>;
- gpus = <&gpu>;
- clock-names =
- "core_clk",
- "iface_clk",
- "lut_clk",
- "src_clk",
- "hdmi_clk",
- "mdp_clk";
- clocks =
- <&mmcc MDP_SRC>,
- <&mmcc MDP_AHB_CLK>,
- <&mmcc MDP_LUT_CLK>,
- <&mmcc TV_SRC>,
- <&mmcc HDMI_TV_CLK>,
- <&mmcc MDP_TV_CLK>;
- };
-};
--- /dev/null
+Qualcomm adreno/snapdragon MDP4 display controller
+
+Description:
+
+This is the bindings documentation for the MDP4 display controller found in
+SoCs like MSM8960, APQ8064 and MSM8660.
+
+Required properties:
+- compatible:
+ * "qcom,mdp4" - mdp4
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt signal from the display controller.
+- clocks: device clocks
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+ * "core_clk"
+ * "iface_clk"
+ * "bus_clk"
+ * "lut_clk"
+ * "hdmi_clk"
+ * "tv_clk"
+- ports: contains the list of output ports from MDP. These connect to interfaces
+ that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
+ special case since it is a part of the MDP block itself).
+
+ Each output port contains an endpoint that describes how it is connected to an
+ external interface. These are described by the standard properties documented
+ here:
+ Documentation/devicetree/bindings/graph.txt
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ The output port mappings are:
+ Port 0 -> LCDC/LVDS
+ Port 1 -> DSI1 Cmd/Video
+ Port 2 -> DSI2 Cmd/Video
+ Port 3 -> DTV
+
+Optional properties:
+- clock-names: the following clocks are optional:
+ * "lut_clk"
+
+Example:
+
+/ {
+ ...
+
+ hdmi: hdmi@4a00000 {
+ ...
+ ports {
+ ...
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp_dtv_out>;
+ };
+ };
+ ...
+ };
+ ...
+ };
+
+ ...
+
+ mdp: mdp@5100000 {
+ compatible = "qcom,mdp4";
+ reg = <0x05100000 0xf0000>;
+ interrupts = <GIC_SPI 75 0>;
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "lut_clk",
+ "hdmi_clk",
+ "tv_clk";
+ clocks =
+ <&mmcc MDP_CLK>,
+ <&mmcc MDP_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>,
+ <&mmcc MDP_LUT_CLK>,
+ <&mmcc HDMI_TV_CLK>,
+ <&mmcc MDP_TV_CLK>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp_lvds_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdp_dsi1_out: endpoint {
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ mdp_dsi2_out: endpoint {
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ mdp_dtv_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+};
--- /dev/null
+Qualcomm adreno/snapdragon MDP5 display controller
+
+Description:
+
+This is the bindings documentation for the Mobile Display Subsytem(MDSS) that
+encapsulates sub-blocks like MDP5, DSI, HDMI, eDP etc, and the MDP5 display
+controller found in SoCs like MSM8974, APQ8084, MSM8916, MSM8994 and MSM8996.
+
+MDSS:
+Required properties:
+- compatible:
+ * "qcom,mdss" - MDSS
+- reg: Physical base address and length of the controller's registers.
+- reg-names: The names of register regions. The following regions are required:
+ * "mdss_phys"
+ * "vbif_phys"
+- interrupts: The interrupt signal from MDSS.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+ source, should be 1.
+- power-domains: a power domain consumer specifier according to
+ Documentation/devicetree/bindings/power/power_domain.txt
+- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+ * "iface_clk"
+ * "bus_clk"
+ * "vsync_clk"
+- #address-cells: number of address cells for the MDSS children. Should be 1.
+- #size-cells: Should be 1.
+- ranges: parent bus address space is the same as the child bus address space.
+
+Optional properties:
+- clock-names: the following clocks are optional:
+ * "lut_clk"
+
+MDP5:
+Required properties:
+- compatible:
+ * "qcom,mdp5" - MDP5
+- reg: Physical base address and length of the controller's registers.
+- reg-names: The names of register regions. The following regions are required:
+ * "mdp_phys"
+- interrupts: Interrupt line from MDP5 to MDSS interrupt controller.
+- interrupt-parent: phandle to the MDSS block
+ through MDP block
+- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+- * "bus_clk"
+- * "iface_clk"
+- * "core_clk"
+- * "vsync_clk"
+- ports: contains the list of output ports from MDP. These connect to interfaces
+ that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
+ special case since it is a part of the MDP block itself).
+
+ Each output port contains an endpoint that describes how it is connected to an
+ external interface. These are described by the standard properties documented
+ here:
+ Documentation/devicetree/bindings/graph.txt
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ The availability of output ports can vary across SoC revisions:
+
+ For MSM8974 and APQ8084:
+ Port 0 -> MDP_INTF0 (eDP)
+ Port 1 -> MDP_INTF1 (DSI1)
+ Port 2 -> MDP_INTF2 (DSI2)
+ Port 3 -> MDP_INTF3 (HDMI)
+
+ For MSM8916:
+ Port 0 -> MDP_INTF1 (DSI1)
+
+ For MSM8994 and MSM8996:
+ Port 0 -> MDP_INTF1 (DSI1)
+ Port 1 -> MDP_INTF2 (DSI2)
+ Port 2 -> MDP_INTF3 (HDMI)
+
+Optional properties:
+- clock-names: the following clocks are optional:
+ * "lut_clk"
+
+Example:
+
+/ {
+ ...
+
+ mdss: mdss@1a00000 {
+ compatible = "qcom,mdss";
+ reg = <0x1a00000 0x1000>,
+ <0x1ac8000 0x3000>;
+ reg-names = "mdss_phys", "vbif_phys";
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "vsync_clk"
+
+ interrupts = <0 72 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@1a01000 {
+ compatible = "qcom,mdp5";
+ reg = <0x1a01000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "vsync_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@1a98000 {
+ ...
+ ports {
+ ...
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&mdp5_intf1_out>;
+ };
+ };
+ ...
+ };
+ ...
+ };
+
+ dsi_phy0: dsi-phy@1a98300 {
+ ...
+ };
+ };
+};
--- /dev/null
+LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp079qx1-sp0v"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
--- /dev/null
+LG 9.7" (2048x1536 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp097qx1-spa1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
--- /dev/null
+Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,lsn122dl01-c01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
--- /dev/null
+Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq101k1ly04"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
--- /dev/null
+Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq123p1jx31"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
--- /dev/null
+Starry 12.2" (1920x1200 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "starry,kr122ea0sra"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
================================
Required properties:
-- compatible: "rockchip,rk3288-edp";
+- compatible: "rockchip,rk3288-dp",
+ "rockchip,rk3399-edp";
- reg: physical base address of the controller and length
Port 0: contained 2 endpoints, connecting to the output of vop.
Port 1: contained 1 endpoint, connecting to the input of panel.
+Optional property for different chips:
+- clocks: from common clock binding: handle to grf_vio clock.
+
+- clock-names: from common clock binding:
+ Required elements: "grf"
+
For the below properties, please refer to Analogix DP binding document:
* Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt
- phys (required)
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- sor: clock input for the SOR hardware
+ - source: source clock for the SOR clock
- parent: input for the pixel clock
- dp: reference clock for the SOR clock
- safe: safe reference for the SOR clock during power up
- nvidia,dpaux: phandle to a DispayPort AUX interface
- dpaux: DisplayPort AUX interface
- - compatible: For Tegra124, must contain "nvidia,tegra124-dpaux". Otherwise,
- must contain '"nvidia,<chip>-dpaux", "nvidia,tegra124-dpaux"', where
- <chip> is tegra132.
+ - compatible : Should contain one of the following:
+ - "nvidia,tegra124-dpaux": for Tegra124 and Tegra132
+ - "nvidia,tegra210-dpaux": for Tegra210
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.
- reset-names: Must include the following entries:
- dpaux
- vdd-supply: phandle of a supply that powers the DisplayPort link
+ - i2c-bus: Subnode where I2C slave devices are listed. This subnode
+ must be always present. If there are no I2C slave devices, an empty
+ node should be added. See ../../i2c/i2c.txt for more information.
+
+ See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information
+ regarding the DPAUX pad controller bindings.
Example:
- "ti,ina220" for ina220
- "ti,ina226" for ina226
- "ti,ina230" for ina230
+ - "ti,ina231" for ina231
- reg: I2C address
Optional properties:
- our-claim-gpio: The GPIO that we use to claim the bus.
- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
Note that some implementations may only support a single other master.
-- Standard I2C mux properties. See mux.txt in this directory.
-- Single I2C child bus node at reg 0. See mux.txt in this directory.
+- Standard I2C mux properties. See i2c-mux.txt in this directory.
+- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
Optional properties:
- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
parents.
-Furthermore, I2C mux properties and child nodes. See mux.txt in this directory.
+Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
+directory.
Example:
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
- mux-gpios: list of gpios used to control the muxer
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
Optional properties:
- idle-state: value to set the muxer to when idle. When no value is
be numbered based on their order in the device tree.
Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output using the list of
+in the relevant node's reg property will be output using the list of
GPIOs, the first in the list holding the least-significant value.
If an idle state is defined, using the idle-state (optional) property,
* Standard pinctrl properties that specify the pin mux state for each child
bus. See ../pinctrl/pinctrl-bindings.txt.
-* Standard I2C mux properties. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
For each named state defined in the pinctrl-names property, an I2C child bus
will be created. I2C child bus numbers are assigned based on the index into
- compatible: i2c-mux-reg
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
port is connected to.
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
Optional properties:
- reg: this pair of <offset size> specifies the register to control the mux.
given, it defaults to the last value used.
Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output to the register.
+in the relevant node's reg property will be output to the register.
If an idle state is defined, using the idle-state (optional) property,
whenever an access is not being made to a device on a child bus, the
initialization. This is an array of 28 values(u8).
- marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
- firmware will use the pin to wakeup host system.
+ firmware will use the pin to wakeup host system (u16).
- marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
platform. The value will be configured to firmware. This
- is needed to work chip's sleep feature as expected.
+ is needed to work chip's sleep feature as expected (u16).
- interrupt-parent: phandle of the parent interrupt controller
- interrupts : interrupt pin number to the cpu. Driver will request an irq based
on this interrupt number. During system suspend, the irq will be
0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
0x00 0x00 0xf0 0x00>;
- marvell,wakeup-pin = <0x0d>;
- marvell,wakeup-gap-ms = <0x64>;
+ marvell,wakeup-pin = /bits/ 16 <0x0d>;
+ marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
};
};
--- /dev/null
+Device tree binding for NVIDIA Tegra DPAUX pad controller
+========================================================
+
+The Tegra Display Port Auxiliary (DPAUX) pad controller manages two pins
+which can be assigned to either the DPAUX channel or to an I2C
+controller.
+
+This document defines the device-specific binding for the DPAUX pad
+controller. Refer to pinctrl-bindings.txt in this directory for generic
+information about pin controller device tree bindings. Please refer to
+the binding document ../display/tegra/nvidia,tegra20-host1x.txt for more
+details on the DPAUX binding.
+
+Pin muxing:
+-----------
+
+Child nodes contain the pinmux configurations following the conventions
+from the pinctrl-bindings.txt document.
+
+Since only three configurations are possible, only three child nodes are
+needed to describe the pin mux'ing options for the DPAUX pads.
+Furthermore, given that the pad functions are only applicable to a
+single set of pads, the child nodes only need to describe the pad group
+the functions are being applied to rather than the individual pads.
+
+Required properties:
+- groups: Must be "dpaux-io"
+- function: Must be either "aux", "i2c" or "off".
+
+Example:
+--------
+
+ dpaux@545c0000 {
+ ...
+
+ state_dpaux_aux: pinmux-aux {
+ groups = "dpaux-io";
+ function = "aux";
+ };
+
+ state_dpaux_i2c: pinmux-i2c {
+ groups = "dpaux-io";
+ function = "i2c";
+ };
+
+ state_dpaux_off: pinmux-off {
+ groups = "dpaux-io";
+ function = "off";
+ };
+ };
+
+ ...
+
+ i2c@7000d100 {
+ ...
+ pinctrl-0 = <&state_dpaux_i2c>;
+ pinctrl-1 = <&state_dpaux_off>;
+ pinctrl-names = "default", "idle";
+ status = "disabled";
+ };
spansion Spansion Inc.
sprd Spreadtrum Communications Inc.
st STMicroelectronics
+starry Starry Electronic Technology (ShenZhen) Co., LTD
startek Startek
ste ST-Ericsson
stericsson ST-Ericsson
SUNW Sun Microsystems, Inc
tbs TBS Technologies
tcl Toby Churchill Ltd.
+technexion TechNexion
technologic Technologic Systems
thine THine Electronics, Inc.
ti Texas Instruments
truly Truly Semiconductors Limited
tyan Tyan Computer Corporation
upisemi uPI Semiconductor Corp.
+uniwest United Western Technologies Corp (UniWest)
urt United Radiant Technology Corporation
usi Universal Scientific Industrial Co., Ltd.
v3 V3 Semiconductor
start_comm = "swapper/2\000\000\000\000\000\000"
}
- o Dig into a radix tree data structure, such as the IRQ descriptors:
- (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
- $6 = {
- irq_common_data = {
- state_use_accessors = 67584,
- handler_data = 0x0 <__vectors_start>,
- msi_desc = 0x0 <__vectors_start>,
- affinity = {{
- bits = {65535}
- }}
- },
- irq_data = {
- mask = 0,
- irq = 18,
- hwirq = 27,
- common = 0xee803d80,
- chip = 0xc0eb0854 <gic_data>,
- domain = 0xee808000,
- parent_data = 0x0 <__vectors_start>,
- chip_data = 0xc0eb0854 <gic_data>
- } <... trimmed ...>
List of commands and functions
------------------------------
Drivers that require multiple interrupt handlers can't use the managed
IRQ registration functions. In that case IRQs must be registered and
unregistered manually (usually with the :c:func:`request_irq()` and
-:c:func:`free_irq()` functions, or their devm_\* equivalent).
+:c:func:`free_irq()` functions, or their :c:func:`devm_request_irq()` and
+:c:func:`devm_free_irq()` equivalents).
When manually registering IRQs, drivers must not set the
DRIVER_HAVE_IRQ driver feature flag, and must not provide the
Open and Close
--------------
-int (\*firstopen) (struct drm_device \*); void (\*lastclose) (struct
-drm_device \*); int (\*open) (struct drm_device \*, struct drm_file
-\*); void (\*preclose) (struct drm_device \*, struct drm_file \*);
-void (\*postclose) (struct drm_device \*, struct drm_file \*);
- Open and close handlers. None of those methods are mandatory.
+Open and close handlers. None of those methods are mandatory::
+
+ int (*firstopen) (struct drm_device *);
+ void (*lastclose) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *);
+ void (*postclose) (struct drm_device *, struct drm_file *);
The firstopen method is called by the DRM core for legacy UMS (User Mode
Setting) drivers only when an application opens a device that has no
The lastclose method should restore CRTC and plane properties to default
value, so that a subsequent open of the device will not inherit state
from the previous user. It can also be used to execute delayed power
-switching state changes, e.g. in conjunction with the vga_switcheroo
-infrastructure (see ?). Beyond that KMS drivers should not do any
+switching state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
+infrastructure. Beyond that KMS drivers should not do any
further cleanup. Only legacy UMS drivers might need to clean up device
state so that the vga console or an independent fbdev driver could take
over.
DRM Format Handling
-------------------
-.. kernel-doc:: include/drm/drm_fourcc.h
- :internal:
-
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_irq.c
:export:
-.. kernel-doc:: include/drm/drmP.h
- :functions: drm_crtc_vblank_waitqueue
+.. kernel-doc:: include/drm/drm_irq.h
+ :internal:
+.. _vga_switcheroo:
+
==============
VGA Switcheroo
==============
.. kernel-doc:: include/linux/apple-gmux.h
:internal:
-
-.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/vga/vga_switcheroo.c
-
-.. WARNING: DOCPROC directive not supported: !Cinclude/linux/vga_switcheroo.h
-
-.. WARNING: DOCPROC directive not supported: !Cdrivers/platform/x86/apple-gmux.c
however, it is better to use the API function led_blink_set(), as it
will check and implement software fallback if necessary.
-To turn off blinking again, use the API function led_brightness_set()
-as that will not just set the LED brightness but also stop any software
+To turn off blinking, use the API function led_brightness_set()
+with brightness value LED_OFF, which should stop any software
timers that may have been required for blinking.
The blink_set() function should choose a user friendly blinking value
3. scmd recovered
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
- - shost->host_failed--
- clear scmd->eh_eflags
- scsi_setup_cmd_retry()
- move from local eh_work_q to local eh_done_q
LOCKING: none
+ CONCURRENCY: at most one thread per separate eh_work_q to
+ keep queue manipulation lockless
4. EH completes
ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
- layer of failure.
+ layer of failure. May be called concurrently but must have
+ a no more than one thread per separate eh_work_q to
+ manipulate the queue locklessly
- scmd is removed from eh_done_q and scmd->eh_entry is cleared
- if retry is necessary, scmd is requeued using
scsi_queue_insert()
- otherwise, scsi_finish_command() is invoked for scmd
+ - zero shost->host_failed
LOCKING: queue or finish function performs appropriate locking
MPX-instrumented.
3) The kernel detects that the CPU has MPX, allows the new prctl() to
succeed, and notes the location of the bounds directory. Userspace is
- expected to keep the bounds directory at that locationWe note it
+ expected to keep the bounds directory at that location. We note it
instead of reading it each time because the 'xsave' operation needed
to access the bounds directory register is an expensive operation.
4) If the application needs to spill bounds out of the 4 registers, it
We need to decode MPX instructions to get violation address and
set this address into extended struct siginfo.
-The _sigfault feild of struct siginfo is extended as follow:
+The _sigfault field of struct siginfo is extended as follow:
87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
88 struct {
This is allowed architecturally. See more information "Intel(R) Architecture
Instruction Set Extensions Programming Reference" (9.3.4).
-However, if users did this, the kernel might be fooled in to unmaping an
+However, if users did this, the kernel might be fooled in to unmapping an
in-use bounds table since it does not recognize sharing.
from areas other than the one we are trying to flush will be
destroyed and must be refilled later, at some cost.
2. Use the invlpg instruction to invalidate a single page at a
- time. This could potentialy cost many more instructions, but
+ time. This could potentially cost many more instructions, but
it is a much more precise operation, causing no collateral
damage to other TLB entries.
work.
3. The size of the TLB. The larger the TLB, the more collateral
damage we do with a full flush. So, the larger the TLB, the
- more attrative an individual flush looks. Data and
+ more attractive an individual flush looks. Data and
instructions have separate TLBs, as do different page sizes.
4. The microarchitecture. The TLB has become a multi-level
cache on modern CPUs, and the global flushes have become more
check_interval
How often to poll for corrected machine check errors, in seconds
- (Note output is hexademical). Default 5 minutes. When the poller
+ (Note output is hexadecimal). Default 5 minutes. When the poller
finds MCEs it triggers an exponential speedup (poll more often) on
the polling interval. When the poller stops finding MCEs, it
triggers an exponential backoff (poll less often) on the polling
L: linux-alpha@vger.kernel.org
F: arch/alpha/
+ALPS PS/2 TOUCHPAD DRIVER
+R: Pali Rohár <pali.rohar@gmail.com>
+F: drivers/input/mouse/alps.*
+
ALTERA MAILBOX DRIVER
M: Ley Foon Tan <lftan@altera.com>
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <kernel@pengutronix.de>
+R: Fabio Estevam <fabio.estevam@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: drivers/edac/altera_edac.
ARM/STI ARCHITECTURE
-M: Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
-M: Maxime Coquelin <maxime.coquelin@st.com>
M: Patrice Chotard <patrice.chotard@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kernel@stlinux.com
ARM/STM32 ARCHITECTURE
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+M: Alexandre Torgue <alexandre.torgue@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
F: net/ax25/
AZ6007 DVB DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: fs/btrfs/
BTTV VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: net/caif/
CALGARY x86-64 IOMMU
-M: Muli Ben-Yehuda <muli@il.ibm.com>
-M: "Jon D. Mason" <jdmason@kudzu.us>
-L: discuss@x86-64.org
+M: Muli Ben-Yehuda <mulix@mulix.org>
+M: Jon Mason <jdmason@kudzu.us>
+L: iommu@lists.linux-foundation.org
S: Maintained
F: arch/x86/kernel/pci-calgary_64.c
F: arch/x86/kernel/tce_64.c
L: linux-clk@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
S: Maintained
+F: Documentation/devicetree/bindings/clock/
F: drivers/clk/
X: drivers/clk/clkdev.c
F: include/linux/clk-pr*
F: drivers/media/dvb-frontends/cx24120*
CX88 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: Documentation/dma-buf-sharing.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+SYNC FILE FRAMEWORK
+M: Sumit Semwal <sumit.semwal@linaro.org>
+R: Gustavo Padovan <gustavo@padovan.org>
+S: Maintained
+L: linux-media@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+F: drivers/dma-buf/sync_file.c
+F: include/linux/sync_file.h
+F: Documentation/sync_file.txt
+T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/
F: include/linux/dmaengine.h
+F: Documentation/devicetree/bindings/dma/
F: Documentation/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <bp@alien8.de>
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
F: drivers/edac/e7xxx_edac.c
EDAC-GHES
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/ghes_edac.c
F: drivers/edac/i5000_edac.c
EDAC-I5400
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i5400_edac.c
EDAC-I7300
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i7300_edac.c
EDAC-I7CORE
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i7core_edac.c
F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+M: Douglas Miller <dougmill@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ibm/ehea/
EM28XX VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <shuahkh@osg.samsung.com>
+M: Shuah Khan <shuah@kernel.org>
L: linux-kselftest@vger.kernel.org
T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
S: Maintained
F: drivers/media/pci/netup_unidvb/*
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
P: LinuxTV.org Project
L: linux-media@vger.kernel.org
W: https://linuxtv.org
F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en)
-M: Eugenia Emantayev <eugenia@mellanox.com>
+M: Tariq Toukan <tariqt@mellanox.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
T: git git://git.infradead.org/linux-mtd.git
T: git git://git.infradead.org/l2-mtd.git
S: Maintained
+F: Documentation/devicetree/bindings/mtd/
F: drivers/mtd/
F: include/linux/mtd/
F: include/uapi/mtd/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
S: Maintained
+F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
M: Frank Rowand <frowand.list@gmail.com>
-M: Grant Likely <grant.likely@linaro.org>
L: devicetree@vger.kernel.org
W: http://www.devicetree.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
S: Maintained
F: drivers/of/
F: include/linux/of*.h
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
-M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
-M: Ian Campbell <ijc+devicetree@hellion.org.uk>
-M: Kumar Gala <galak@codeaurora.org>
L: devicetree@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
+Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
S: Maintained
F: Documentation/devicetree/
F: arch/*/boot/dts/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/
+F: Documentation/pinctrl.txt
F: drivers/pinctrl/
F: include/linux/pinctrl/
F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: mm/shmem.c
TM6000 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com>
-M: Shuah Khan <shuah.kh@samsung.com>
+M: Shuah Khan <shuahkh@osg.samsung.com>
+M: Shuah Khan <shuah@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/usb/usbip_protocol.txt
W: http://www.linux-usb.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
S: Supported
+F: Documentation/devicetree/bindings/usb/
F: Documentation/usb/
F: drivers/usb/
F: include/linux/usb.h
M: "Michael S. Tsirkin" <mst@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
+F: Documentation/devicetree/bindings/virtio/
F: drivers/virtio/
F: tools/virtio/
F: drivers/net/virtio_net.c
F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER
-M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
VERSION = 4
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION =
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF)
+NOSTDINC_FLAGS =
CFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
+LDFLAGS_vmlinux =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV = -fsanitize-coverage=trace-pc
config ARCH_TASK_STRUCT_ALLOCATOR
bool
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
file which provides platform-specific implementations of some
functions in <linux/hash.h> or fs/namei.c.
+config ISA_BUS_API
+ def_bool ISA
+
#
# ABI hall of shame
#
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return ret;
}
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
def_bool y
config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
+ def_bool n
config ARCH_FLATMEM_ENABLE
def_bool y
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_HAS_REENTRANT_IRQ_LV2
- def_bool n
-
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
- bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+ bool "Setup Timer IRQ as high Priority"
default n
- # Timer HAS to be high priority, for any other high priority config
- select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
- depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
- bool
-
-config ARC_IRQ5_LV2
- bool
-
-config ARC_IRQ6_LV2
- bool
-
-endif #ARC_COMPACT_IRQ_LEVELS
+ depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
default y
depends on !ARC_CANT_LLSC
-config ARC_STAR_9000923308
- bool "Workaround for llock/scond livelock"
- default n
- depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
config HIGHMEM
bool "High Memory Support"
- select DISCONTIGMEM
+ select ARCH_DISCONTIGMEM_ENABLE
help
With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE
endif
-cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
-
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
boot := arch/arc/boot
-#default target for make without any arguements.
+#default target for make without any arguments.
KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)
/ {
- clock-frequency = <500000000>; /* 500 MHZ */
-
soc100 {
bus-frequency = <166666666>;
/ {
- clock-frequency = <500000000>; /* 500 MHZ */
-
soc100 {
bus-frequency = <166666666>;
/ {
compatible = "snps,arc";
- clock-frequency = <750000000>; /* 750 MHZ */
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "snps,arc";
- clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "snps,arc";
- clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "ezchip,arc-nps";
- clock-frequency = <83333333>; /* 83.333333 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
/ {
compatible = "snps,nsim";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,nsimosci";
- clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,nsimosci_hs";
- clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,nsimosci_hs";
- clock-frequency = <5000000>; /* 5 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
/ {
compatible = "snps,arc";
- clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };
/ {
compatible = "snps,arc";
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
/ {
compatible = "snps,arc";
- clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay = 1, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " bz 4f \n" \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- "4: ; --- success --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
-
-#else /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM \
- " bnz 1b \n" \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
- * Above brlo alone would treat it as a valid L1-L2 sceanrio
- * instead of shouting alound
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack
local_flush_tlb_all();
/*
- * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
return pte;
pgtable_t pte_pg;
struct page *page;
- pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
#ifdef CONFIG_ARC_HAS_LLSC
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
smp_mb();
}
-#else /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- " \n" \
- "4: ; --- done --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
-
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * zero means writer holds the lock exclusively, deny Reader.
- * Otherwise grant lock to first/subseq reader
- *
- * if (rw->counter > 0) {
- * rw->counter--;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
- " sub %[val], %[val], 1 \n" /* reader lock */
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
- " sub %[val], %[val], 1 \n" /* counter-- */
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
- * deny writer. Otherwise if unlocked grant to writer
- * Hence the claim that Linux rwlocks are unfair to writers.
- * (can be starved for an indefinite time by readers).
- *
- * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
- * rw->counter = 0;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n"
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter++;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " add %[val], %[val], 1 \n"
- " scond %[val], [%[rwlock]] \n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter))
- : "memory", "cc");
-
- smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " scond %[UNLOCKED], [%[rwlock]]\n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.
__tmp ^ __in; \
})
-#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR handle_interrupt_level2
-#else
-VECTOR handle_interrupt_level1
-#endif
-
-VECTOR handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR handle_interrupt_level2
-#else
-VECTOR handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
-.rept 25
+.rept 28
VECTOR handle_interrupt_level1 ; Other devices
.endr
{
int level_mask = 0;
- /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
- level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+ /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+ level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
/*
* Write to register, even if no LV2 IRQs configured to reset it
int64_t delta = new_raw_count - prev_raw_count;
/*
- * We don't afaraid of hwc->prev_count changing beneath our feet
+ * We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime.
*/
local64_set(&hwc->prev_count, new_raw_count);
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
- * Appent to embedded DT cmdline.
+ * append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
- * This was cauing TLB entries to be overwritten on unrelated indexes
+ * This was causing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,
* prelogue is setup (callee regs saved and then fp set and not other
* way around
*/
- pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;
#endif
return 0;
}
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
* ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced.
/*
* This is technically for MMU v4, using the MMU v3 programming model
- * Special work for HS38 aliasing I-cache configuratino with PAE40
+ * Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop
ic->ver, CONFIG_ARC_MMU_VER);
/*
- * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+ * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.
sun7i-a20-olimex-som-evb.dtb \
sun7i-a20-olinuxino-lime.dtb \
sun7i-a20-olinuxino-lime2.dtb \
+ sun7i-a20-olinuxino-lime2-emmc.dtb \
sun7i-a20-olinuxino-micro.dtb \
sun7i-a20-orangepi.dtb \
sun7i-a20-orangepi-mini.dtb \
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
tps@24 {
compatible = "ti,tps65218";
tps659038_pmic {
compatible = "ti,tps659038-pmic";
+
+ smps12-in-supply = <&vmain>;
+ smps3-in-supply = <&vmain>;
+ smps45-in-supply = <&vmain>;
+ smps6-in-supply = <&vmain>;
+ smps7-in-supply = <&vmain>;
+ smps8-in-supply = <&vmain>;
+ smps9-in-supply = <&vmain>;
+ ldo1-in-supply = <&vmain>;
+ ldo2-in-supply = <&vmain>;
+ ldo3-in-supply = <&vmain>;
+ ldo4-in-supply = <&vmain>;
+ ldo9-in-supply = <&vmain>;
+ ldoln-in-supply = <&vmain>;
+ ldousb-in-supply = <&vmain>;
+ ldortc-in-supply = <&vmain>;
+
regulators {
smps12_reg: smps12 {
/* VDD_MPU */
- vin-supply = <&vmain>;
regulator-name = "smps12";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
smps3_reg: smps3 {
/* VDD_DDR EMIF1 EMIF2 */
- vin-supply = <&vmain>;
regulator-name = "smps3";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
smps45_reg: smps45 {
/* VDD_DSPEVE on AM572 */
/* VDD_IVA + VDD_DSP on AM571 */
- vin-supply = <&vmain>;
regulator-name = "smps45";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
smps6_reg: smps6 {
/* VDD_GPU */
- vin-supply = <&vmain>;
regulator-name = "smps6";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1250000>;
smps7_reg: smps7 {
/* VDD_CORE */
- vin-supply = <&vmain>;
regulator-name = "smps7";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1150000>;
smps8_reg: smps8 {
/* 5728 - VDD_IVAHD */
/* 5718 - N.C. test point */
- vin-supply = <&vmain>;
regulator-name = "smps8";
};
smps9_reg: smps9 {
/* VDD_3_3D */
- vin-supply = <&vmain>;
regulator-name = "smps9";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
ldo1_reg: ldo1 {
/* VDDSHV8 - VSDMMC */
/* NOTE: on rev 1.3a, data supply */
- vin-supply = <&vmain>;
regulator-name = "ldo1";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
ldo2_reg: ldo2 {
/* VDDSH18V */
- vin-supply = <&vmain>;
regulator-name = "ldo2";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldo3_reg: ldo3 {
/* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
- vin-supply = <&vmain>;
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldo4_reg: ldo4 {
/* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
- vin-supply = <&vmain>;
regulator-name = "ldo4";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldo9_reg: ldo9 {
/* VDD_RTC */
- vin-supply = <&vmain>;
regulator-name = "ldo9";
regulator-min-microvolt = <840000>;
regulator-max-microvolt = <1160000>;
ldoln_reg: ldoln {
/* VDDA_1V8_PLL */
- vin-supply = <&vmain>;
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
ldousb_reg: ldousb {
/* VDDA_3V_USB: VDDA_USBHS33 */
- vin-supply = <&vmain>;
regulator-name = "ldousb";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
ldortc_reg: ldortc {
/* VDDA_RTC */
- vin-supply = <&vmain>;
regulator-name = "ldortc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
internal-regs {
};
};
+&mmc1 {
+ status = "disabled";
+};
+
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&sd1_pins>;
cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
};
+&mmc3 {
+ status = "disabled";
+};
+
&pincntl {
sd1_pins: pinmux_sd1_pins {
pinctrl-single,pins = <
phy-mode = "rgmii";
};
+&mmc1 {
+ status = "disabled";
+};
+
+&mmc2 {
+ status = "disabled";
+};
+
&mmc3 {
pinctrl-names = "default";
pinctrl-0 = <&sd2_pins>;
dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */
&edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */
dma-names = "tx", "rx";
+ non-removable;
};
&pincntl {
ti,hwmods = "gpmc";
reg = <0x50000000 0x37c>; /* device IO registers */
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma_xbar 4 0>;
+ dma-names = "rxtx";
gpmc,num-cs = <8>;
gpmc,num-waitpins = <2>;
#address-cells = <2>;
reg = <0x58000000 0x80>,
<0x58004054 0x4>,
<0x58004300 0x20>,
- <0x58005054 0x4>,
- <0x58005300 0x20>;
+ <0x58009054 0x4>,
+ <0x58009300 0x20>;
reg-names = "dss", "pll1_clkctrl", "pll1",
"pll2_clkctrl", "pll2";
hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
ports {
- port0 {
+ port {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
edid-emulation = <5>;
ports {
- port0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
- port1 {
+ port@1 {
+ reg = <1>;
+
bridge_in: endpoint {
remote-endpoint = <&dp_out>;
};
hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
ports {
- port0 {
+ port {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
use-external-pwm;
ports {
- port0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
- port1 {
+ port@1 {
+ reg = <1>;
+
bridge_in: endpoint {
remote-endpoint = <&dp_out>;
};
OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
- OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+ OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
>;
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
bus-width = <4>;
+ cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
};
&mmc3 {
OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
>;
};
+
+ mmc1_wp_pins: pinmux_mmc1_cd_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */
+ >;
+ };
};
&i2c3 {
};
};
};
+
+&mmc1 {
+ pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
+ wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
+};
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
modem_pins: pinmux_modem {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */
- OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */
+ OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* gpio 72 => ape_rst_rq */
OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */
OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */
OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */
OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */
OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */
OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */
- OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+ OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */
OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */
OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */
modem_pins1: pinmux_modem_core1_pins {
pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
+ OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */
OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */
>;
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
- OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+ OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
>;
};
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
- OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+ OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
>;
};
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
- OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+ OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
>;
display0 = &hdmi0;
};
+ vmain: fixedregulator-vmain {
+ compatible = "regulator-fixed";
+ regulator-name = "vmain";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vsys_cobra: fixedregulator-vsys_cobra {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_cobra";
+ vin-supply = <&vmain>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vdds_1v8_main: fixedregulator-vdds_1v8_main {
+ compatible = "regulator-fixed";
+ regulator-name = "vdds_1v8_main";
+ vin-supply = <&smps7_reg>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
vmmcsd_fixed: fixedregulator-mmcsd {
compatible = "regulator-fixed";
regulator-name = "vmmcsd_fixed";
wlcore_irq_pin: pinmux_wlcore_irq_pin {
pinctrl-single,pins = <
- OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
+ OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
>;
};
};
ti,ldo6-vibrator;
+ smps123-in-supply = <&vsys_cobra>;
+ smps45-in-supply = <&vsys_cobra>;
+ smps6-in-supply = <&vsys_cobra>;
+ smps7-in-supply = <&vsys_cobra>;
+ smps8-in-supply = <&vsys_cobra>;
+ smps9-in-supply = <&vsys_cobra>;
+ smps10_out2-in-supply = <&vsys_cobra>;
+ smps10_out1-in-supply = <&vsys_cobra>;
+ ldo1-in-supply = <&vsys_cobra>;
+ ldo2-in-supply = <&vsys_cobra>;
+ ldo3-in-supply = <&vdds_1v8_main>;
+ ldo4-in-supply = <&vdds_1v8_main>;
+ ldo5-in-supply = <&vsys_cobra>;
+ ldo6-in-supply = <&vdds_1v8_main>;
+ ldo7-in-supply = <&vsys_cobra>;
+ ldo8-in-supply = <&vsys_cobra>;
+ ldo9-in-supply = <&vmmcsd_fixed>;
+ ldoln-in-supply = <&vsys_cobra>;
+ ldousb-in-supply = <&vsys_cobra>;
+
regulators {
smps123_reg: smps123 {
/* VDD_OPP_MPU */
pinctrl-0 = <&twl6040_pins>;
interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
- ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
+
+ /* audpwron gpio defined in the board specific dts */
vio-supply = <&smps7_reg>;
v2v1-supply = <&smps9_reg>;
};
};
+/* LDO4 is VPP1 - ball AD9 */
+&ldo4_reg {
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+};
+
+/*
+ * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
+ * VDDA_HDMI - ball AN25
+ */
+&ldo7_reg {
+ status = "okay";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
&omap5_pmx_core {
i2c4_pins: pinmux_i2c4_pins {
pinctrl-single,pins = <
<&gpio7 3 0>; /* 195, SDA */
};
+&twl6040 {
+ ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>; /* gpio line 144 */
+};
+
+&twl6040_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_144 */
+ OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */
+ >;
+};
<&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */
<&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */
};
+
+&twl6040 {
+ ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
+};
+
+&twl6040_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
+ >;
+};
&gmac1 {
status = "okay";
phy-mode = "rgmii";
+ phy-handle = <&phy1>;
snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
compatible = "shared-dma-pool";
reg = <0x40000000 0x01000000>;
no-map;
+ status = "disabled";
};
gp1_reserved: rproc@41000000 {
compatible = "shared-dma-pool";
reg = <0x41000000 0x01000000>;
no-map;
+ status = "disabled";
};
audio_reserved: rproc@42000000 {
compatible = "shared-dma-pool";
reg = <0x42000000 0x01000000>;
no-map;
+ status = "disabled";
};
dmu_reserved: rproc@43000000 {
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&ahb_gates 46>,
<&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
- <&ahb_gates 46>, <&dram_gates 25>,
- <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&ahb_gates 46>,
+ <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
- clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+ <&ahb_gates 36>, <&ahb_gates 44>,
+ <&ahb_gates 46>,
<&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
- <&ahb_gates 44>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+ <&ahb_gates 36>, <&ahb_gates 44>;
status = "disabled";
};
};
/ {
model = "NextThing C.H.I.P.";
- compatible = "nextthing,chip", "allwinner,sun5i-r8";
+ compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
aliases {
i2c0 = &i2c0;
};
®_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
regulator-name = "vcc-lcd";
};
®_dc1sw {
regulator-name = "vcc-lcd-usb2";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
};
®_dc5ldo {
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 43>, <&ahb_gates 44>,
+ <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
- <&dram_gates 26>;
+ clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&dram_gates 26>;
status = "disabled";
};
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll5 1>,
+ clocks = <&pll3>, <&pll5 1>,
<&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
<&dram_gates 5>, <&dram_gates 26>;
status = "disabled";
pll3x2: pll3x2_clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
+ clocks = <&pll3>;
clock-div = <1>;
clock-mult = <2>;
clock-output-names = "pll3-2x";
pll7x2: pll7x2_clk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
+ clocks = <&pll7>;
clock-div = <1>;
clock-mult = <2>;
clock-output-names = "pll7-2x";
ldo5_reg: ldo5 {
regulator-name = "vddio_sdmmc,avdd_vdac";
- regulator-min-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
sdhci@78000000 {
status = "okay";
+ vqmmc-supply = <&ldo5_reg>;
cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio TEGRA_GPIO(T, 3) GPIO_ACTIVE_HIGH>;
power-gpios = <&gpio TEGRA_GPIO(D, 7) GPIO_ACTIVE_HIGH>;
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MAX77693_HAPTIC=y
CONFIG_INPUT_MAX8997_HAPTIC=y
+CONFIG_KEYBOARD_SAMSUNG=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_KEYBOARD_SPEAR=y
CONFIG_KEYBOARD_ST_KEYSCAN=y
CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_CYAPA=m
CONFIG_MOUSE_ELAN_I2C=y
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pmd_t *)get_zeroed_page(GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+#define pmd_present(pmd) (pmd_val(pmd))
#define copy_pmd(pmdpd,pmdps) \
do { \
: !!(pmd_val(pmd) & (val)))
#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
+#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
static inline pte_t pte_mkspecial(pte_t pte)
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
- return __pmd(0);
+ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise(target, ipi_types[ipinr]);
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
+ kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
select CPU_EXYNOS4210
select GIC_NON_BANKED
- select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
select MIGHT_HAVE_CACHE_L2X0
help
Samsung EXYNOS4 (Cortex-A9) SoC based systems
static void __init imx6ul_enet_phy_init(void)
{
if (IS_BUILTIN(CONFIG_PHYLIB))
- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
ksz8081_phy_fixup);
}
obj-$(CONFIG_MACH_MVEBU_ANY) += system-controller.o mvebu-soc-id.o
ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o pm.o pm-board.o
+obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM) += pm.o pm-board.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
endif
obj-$(CONFIG_MACH_DOVE) += dove.o
-obj-$(CONFIG_MACH_KIRKWOOD) += kirkwood.o kirkwood-pm.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y += kirkwood.o
+obj-$(CONFIG_PM) += kirkwood-pm.o
+endif
}
/*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
*/
static void __iomem *
-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller)
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
- struct resource pcie_mem;
-
- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
- mtype = MT_UNCACHED;
-
+ mtype = MT_UNCACHED;
return __arm_ioremap_caller(phys_addr, size, mtype, caller);
}
struct device_node *cache_dn;
coherency_cpu_base = of_iomap(np, 0);
- arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+ arch_ioremap_caller = armada_wa_ioremap_caller;
+ pci_ioremap_set_mem_type(MT_UNCACHED);
/*
* We should switch the PL310 to I/O coherency mode only if
#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
/* IRQ handler register bitmasks */
-#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
-#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1)
+#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
+#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
/* Driver buffer byte offsets */
#define BUF_MASK (FIQ_MASK * 4)
mov r8, #2 @ reset FIQ agreement
str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
- cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt?
+ cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
beq gpio @ yes - process it
mov r8, #1
* Since no set_type() method is provided by OMAP irq chip,
* switch to edge triggered interrupt type manually.
*/
- offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+ offset = IRQ_ILR0_REG_OFFSET +
+ ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
/*
* Redirect GPIO interrupts to FIQ
*/
- offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+ offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
val = omap_readl(OMAP_IH1_BASE + offset) | 1;
omap_writel(val, OMAP_IH1_BASE + offset);
}
#ifndef __AMS_DELTA_FIQ_H
#define __AMS_DELTA_FIQ_H
+#include <mach/irqs.h>
+
/*
* Interrupt number used for passing control from FIQ to IRQ.
* IRQ12, described as reserved, has been selected.
select PM_OPP if PM
select PM if CPU_IDLE
select SOC_HAS_OMAP2_SDRC
+ select ARM_ERRATA_430973
config ARCH_OMAP4
bool "TI OMAP4"
select PM if CPU_IDLE
select ARM_ERRATA_754322
select ARM_ERRATA_775420
+ select OMAP_INTERCONNECT
config SOC_OMAP5
bool "TI OMAP5"
select HAVE_ARM_SCU
select GENERIC_CLOCKEVENTS_BROADCAST
select HAVE_ARM_TWD
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
config SOC_DRA7XX
bool "TI DRA7XX"
endif
+config OMAP5_ERRATA_801819
+ bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
+ depends on SOC_OMAP5 || SOC_DRA7XX
+ help
+ A livelock can occur in the L2 cache arbitration that might prevent
+ a snoop from completing. Under certain conditions this can cause the
+ system to deadlock.
+
endmenu
#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109
#define OMAP5_MON_AMBA_IF_INDEX 0x108
+#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_L2_POR_INDEX 0x23
return scu_base;
}
+#ifdef CONFIG_OMAP5_ERRATA_801819
+void omap5_erratum_workaround_801819(void)
+{
+ u32 acr, revidr;
+ u32 acr_mask;
+
+ /* REVIDR[3] indicates erratum fix available on silicon */
+ asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
+ if (revidr & (0x1 << 3))
+ return;
+
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+ /*
+ * BIT(27) - Disables streaming. All write-allocate lines allocate in
+ * the L1 or L2 cache.
+ * BIT(25) - Disables streaming. All write-allocate lines allocate in
+ * the L1 cache.
+ */
+ acr_mask = (0x3 << 25) | (0x3 << 27);
+ /* do we already have it done.. if yes, skip expensive smc */
+ if ((acr & acr_mask) == acr_mask)
+ return;
+
+ acr |= acr_mask;
+ omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+ pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
+ __func__, smp_processor_id());
+}
+#else
+static inline void omap5_erratum_workaround_801819(void) { }
+#endif
+
static void omap4_secondary_init(unsigned int cpu)
{
/*
omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
4, 0, 0, 0, 0, 0);
- /*
- * Configure the CNTFRQ register for the secondary cpu's which
- * indicates the frequency of the cpu local timers.
- */
- if (soc_is_omap54xx() || soc_is_dra7xx())
+ if (soc_is_omap54xx() || soc_is_dra7xx()) {
+ /*
+ * Configure the CNTFRQ register for the secondary cpu's which
+ * indicates the frequency of the cpu local timers.
+ */
set_cntfreq();
+ /* Configure ACR to disable streaming WA for 801819 */
+ omap5_erratum_workaround_801819();
+ }
/*
* Synchronise with the boot thread.
if (cpu_is_omap446x())
startup_addr = omap4460_secondary_startup;
+ if (soc_is_dra74x() || soc_is_omap54xx())
+ omap5_erratum_workaround_801819();
/*
* Write the address of secondary startup routine into the
trace_state = (PWRDM_TRACE_STATES_FLAG |
((next & OMAP_POWERSTATE_MASK) << 8) |
((prev & OMAP_POWERSTATE_MASK) << 0));
- trace_power_domain_target(pwrdm->name, trace_state,
- smp_processor_id());
+ trace_power_domain_target_rcuidle(pwrdm->name,
+ trace_state,
+ smp_processor_id());
}
break;
default:
if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
/* Trace the pwrdm desired target state */
- trace_power_domain_target(pwrdm->name, pwrst,
- smp_processor_id());
+ trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+ smp_processor_id());
/* Program the pwrdm desired target state */
ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
}
.prcm_offs = DRA7XX_PRM_IVA_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 4,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* hwa_mem */
- [1] = PWRSTS_OFF_RET, /* sl2_mem */
- [2] = PWRSTS_OFF_RET, /* tcm1_mem */
- [3] = PWRSTS_OFF_RET, /* tcm2_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* hwa_mem */
[1] = PWRSTS_ON, /* sl2_mem */
.prcm_offs = DRA7XX_PRM_IPU_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 2,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* aessmem */
- [1] = PWRSTS_OFF_RET, /* periphmem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* aessmem */
[1] = PWRSTS_ON, /* periphmem */
.prcm_offs = DRA7XX_PRM_DSS_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dss_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dss_mem */
},
.name = "l4per_pwrdm",
.prcm_offs = DRA7XX_PRM_L4PER_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
- .pwrsts = PWRSTS_RET_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
+ .pwrsts = PWRSTS_ON,
.banks = 2,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* nonretained_bank */
- [1] = PWRSTS_OFF_RET, /* retained_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* nonretained_bank */
[1] = PWRSTS_ON, /* retained_bank */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* gpu_mem */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gpu_mem */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* wkup_bank */
},
.prcm_offs = DRA7XX_PRM_CORE_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
.banks = 5,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* core_nret_bank */
- [1] = PWRSTS_OFF_RET, /* core_ocmram */
- [2] = PWRSTS_OFF_RET, /* core_other_bank */
- [3] = PWRSTS_OFF_RET, /* ipu_l2ram */
- [4] = PWRSTS_OFF_RET, /* ipu_unicache */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* core_nret_bank */
[1] = PWRSTS_ON, /* core_ocmram */
.prcm_offs = DRA7XX_PRM_VPE_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
- .pwrsts_logic_ret = PWRSTS_OFF,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* vpe_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* vpe_bank */
},
.name = "l3init_pwrdm",
.prcm_offs = DRA7XX_PRM_L3INIT_INST,
.prcm_partition = DRA7XX_PRM_PARTITION,
- .pwrsts = PWRSTS_RET_ON,
- .pwrsts_logic_ret = PWRSTS_RET,
+ .pwrsts = PWRSTS_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* gmac_bank */
- [1] = PWRSTS_OFF_RET, /* l3init_bank1 */
- [2] = PWRSTS_OFF_RET, /* l3init_bank2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gmac_bank */
[1] = PWRSTS_ON, /* l3init_bank1 */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve3_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve3_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* emu_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* emu_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dsp2_edma */
- [1] = PWRSTS_OFF_RET, /* dsp2_l1 */
- [2] = PWRSTS_OFF_RET, /* dsp2_l2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dsp2_edma */
[1] = PWRSTS_ON, /* dsp2_l1 */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 3,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* dsp1_edma */
- [1] = PWRSTS_OFF_RET, /* dsp1_l1 */
- [2] = PWRSTS_OFF_RET, /* dsp1_l2 */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dsp1_edma */
[1] = PWRSTS_ON, /* dsp1_l1 */
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* vip_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* vip_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve4_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve4_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve2_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve2_bank */
},
.prcm_partition = DRA7XX_PRM_PARTITION,
.pwrsts = PWRSTS_OFF_ON,
.banks = 1,
- .pwrsts_mem_ret = {
- [0] = PWRSTS_OFF_RET, /* eve1_bank */
- },
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* eve1_bank */
},
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
2, "timer_sys_ck", NULL, false);
- if (of_have_populated_dt())
- clocksource_probe();
+ clocksource_probe();
}
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
{
__omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
2, "timer_sys_ck", NULL, false);
+
+ clocksource_probe();
}
#endif /* CONFIG_ARCH_OMAP3 */
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon", true);
+
+ clocksource_probe();
}
#endif
init.name = dev_name(cpu_dev);
init.ops = &clk_spc_ops;
- init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &spc->hw);
#include <linux/platform_data/asoc-s3c.h>
#include <linux/platform_data/spi-s3c64xx.h>
-static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
/* AC97 */
#ifdef CONFIG_CPU_S3C2440
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-Image.%: vmlinux
+Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install:
#size-cells = <1>;
#interrupts-cells = <3>;
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
interrupt-parent = <&gic>;
ranges;
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
ranges;
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define BRCM_CPU_PART_VULCAN 0x516
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#ifndef __ASSEMBLY__
#endif /* !__ASSEMBLY__ */
/*
- * gdb is expecting the following registers layout.
+ * gdb remote procotol (well most versions of it) expects the following
+ * register layout.
*
* General purpose regs:
* r0-r30: 64 bit
* sp,pc : 64 bit
- * pstate : 64 bit
- * Total: 34
+ * pstate : 32 bit
+ * Total: 33 + 1
* FPU regs:
* f0-f31: 128 bit
- * Total: 32
- * Extra regs
* fpsr & fpcr: 32 bit
- * Total: 2
+ * Total: 32 + 2
*
+ * To expand a little on the "most versions of it"... when the gdb remote
+ * protocol for AArch64 was developed it depended on a statement in the
+ * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
+ * and, as a result, allocated only 32-bits for the PSTATE in the remote
+ * protocol. In fact this statement is still present in ARM DDI 0487A.i.
+ *
+ * Unfortunately "is a 32-bit register" has a very special meaning for
+ * system registers. It means that "the upper bits, bits[63:32], are
+ * RES0.". RES0 is heavily used in the ARM architecture documents as a
+ * way to leave space for future architecture changes. So to translate a
+ * little for people who don't spend their spare time reading ARM architecture
+ * manuals, what "is a 32-bit register" actually means in this context is
+ * "is a 64-bit register but one with no meaning allocated to any of the
+ * upper 32-bits... *yet*".
+ *
+ * Perhaps then we should not be surprised that this has led to some
+ * confusion. Specifically a patch, influenced by the above translation,
+ * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
+ * was reverted in gdb-7.8.1 and all later releases, when this was
+ * discovered to be an undocumented protocol change.
+ *
+ * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
+ * here even though the kernel itself allocates 64-bits for the same
+ * state. That is because this bit of code tells the kernel how the gdb
+ * remote protocol (well most versions of it) describes the register state.
+ *
+ * Note that if you are using one of the versions of gdb that supports
+ * the gdb-7.7 version of the protocol you cannot use kgdb directly
+ * without providing a custom register description (gdb can load new
+ * protocol descriptions at runtime).
*/
-#define _GP_REGS 34
+#define _GP_REGS 33
#define _FP_REGS 32
-#define _EXTRA_REGS 2
+#define _EXTRA_REGS 3
/*
* general purpose registers size in bytes.
* pstate is only 4 bytes. subtract 4 bytes
#define check_pgt_cache() do { } while (0)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
};
u64 orig_x0;
u64 syscallno;
+ u64 orig_addr_limit;
+ u64 unused; // maintain 16 byte alignment
};
#define arch_has_single_step() (1)
cpu_park_loop();
}
+/*
+ * If a secondary CPU enters the kernel but fails to come online,
+ * (e.g. due to mismatched features), and cannot exit the kernel,
+ * we increment cpus_stuck_in_kernel and leave the CPU in a
+ * quiesecent loop within the kernel text. The memory containing
+ * this loop must not be re-used for anything else as the 'stuck'
+ * core is executing it.
+ *
+ * This function is used to inhibit features like kexec and hibernate.
+ */
+bool cpus_are_stuck_in_kernel(void);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */
{
unsigned int tmp;
arch_spinlock_t lockval;
+ u32 owner;
+
+ /*
+ * Ensure prior spin_lock operations to other locks have completed
+ * on this CPU before we test whether "lock" is locked.
+ */
+ smp_mb();
+ owner = READ_ONCE(lock->owner) << 16;
asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %2\n"
+ /* Is the lock free? */
" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 1b\n"
+" cbz %w1, 3f\n"
+ /* Lock taken -- has there been a subsequent unlock->lock transition? */
+" eor %w1, %w3, %w0, lsl #16\n"
+" cbz %w1, 1b\n"
+ /*
+ * The owner has been updated, so there was an unlock->lock
+ * transition that we missed. That means we can rely on the
+ * store-release of the unlock operation paired with the
+ * load-acquire of the lock operation to publish any of our
+ * previous stores to the new lock owner and therefore don't
+ * need to bother with the writeback below.
+ */
+" b 4f\n"
+"3:\n"
+ /*
+ * Serialise against any concurrent lockers by writing back the
+ * unlocked lock value
+ */
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
-" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
- /* LSE atomics */
" nop\n"
-" nop\n")
+" nop\n",
+ /* LSE atomics */
+" mov %w1, %w0\n"
+" cas %w0, %w0, %2\n"
+" eor %w1, %w1, %w0\n")
+ /* Somebody else wrote to the lock, GOTO 10 and reload the value */
+" cbnz %w1, 2b\n"
+"4:"
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- :
+ : "r" (owner)
: "memory");
}
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+ smp_mb(); /* See arch_spin_unlock_wait */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
+ DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 1),
},
+ {
+ /* Cavium ThunderX, T81 pass 1.0 */
+ .desc = "Cavium erratum 27456",
+ .capability = ARM64_WORKAROUND_CAVIUM_27456,
+ MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+ },
#endif
{
}
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
- .endif
+ get_thread_info tsk
+ /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ ldr x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [sp, #S_ORIG_ADDR_LIMIT]
+ mov x20, #TASK_SIZE_64
+ str x20, [tsk, #TI_ADDR_LIMIT]
+ ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
+ .endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
.endm
.macro kernel_exit, el
+ .if \el != 0
+ /* Restore the task's original addr_limit. */
+ ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
+ str x20, [tsk, #TI_ADDR_LIMIT]
+
+ /* No need to restore UAO, it will be restored from SPSR_EL1 */
+ .endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
bl trace_hardirqs_off
#endif
- get_thread_info tsk
irq_handler
#ifdef CONFIG_PREEMPT
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
+#include <asm/smp.h>
#include <asm/suspend.h>
#include <asm/virt.h>
unsigned long flags;
struct sleep_stack_data state;
+ if (cpus_are_stuck_in_kernel()) {
+ pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+ return -EBUSY;
+ }
+
local_dbg_save(flags);
if (__cpu_suspend_enter(&state)) {
{ "x30", 8, offsetof(struct pt_regs, regs[30])},
{ "sp", 8, offsetof(struct pt_regs, sp)},
{ "pc", 8, offsetof(struct pt_regs, pc)},
- { "pstate", 8, offsetof(struct pt_regs, pstate)},
+ /*
+ * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+ * protocol disagrees. Therefore we must extract only the lower
+ * 32-bits. Look for the big comment in asm/kgdb.h for more
+ * detail.
+ */
+ { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ + 4
+#endif
+ },
{ "v0", 16, -1 },
{ "v1", 16, -1 },
{ "v2", 16, -1 },
memset((char *)gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+ /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
+ dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
return -EINVAL;
}
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int any_cpu = raw_smp_processor_id();
+
+ if (cpu_ops[any_cpu]->cpu_die)
+ return true;
+#endif
+ return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+ bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+ return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
/*
* We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * to safely read from kernel space.
*/
fs = get_fs();
set_fs(KERNEL_DS);
print_ip_sym(where);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
for (i = -4; i < 1; i++) {
unsigned int val, bad;
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ if (!user_mode(regs)) {
+ mm_segment_t fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
&asid_generation);
flush_context(cpu);
- /* We have at least 1 ASID per CPU, so this will always succeed */
+ /* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid:
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
- /* If we end up with more CPUs than ASIDs, expect things to crash */
- WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+ */
+ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
GFP_KERNEL);
* PTE_RDONLY is cleared by default in the asm below, so set it in
* back if necessary (read-only or clean PTE).
*/
- if (!pte_write(entry) || !dirty)
+ if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY;
/*
}
if (permission_fault(esr) && (addr < USER_DS)) {
- if (get_fs() == KERNEL_DS)
+ /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+ if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (!search_exception_tables(regs->pc))
return 1;
}
-static struct fault_info {
+static const struct fault_info {
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
int sig;
int code;
{
struct page *page = pte_page(pte);
- /* no flushing needed for anonymous pages */
- if (!page_mapping(page))
- return;
-
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+ return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
void *pg;
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
struct page *page;
#ifdef CONFIG_HIGHPTE
- page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+ page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
- page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ page = alloc_pages(GFP_KERNEL, 0);
#endif
if (!page)
return NULL;
{
struct page *pte;
- pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
}
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
- select ARCH_THREAD_INFO_ALLOCATOR
+ select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD
select SYSCTL_ARCH_UNALIGN_NO_WARN
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node) \
- ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
-#define free_thread_info(ti) /* nothing */
+#define free_thread_stack(ti) /* nothing */
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
* handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
+#define init_stack init_task_mem.stack
union {
struct {
#include <asm/processor.h>
-static void putc(char c);
+static void m32r_putc(char c);
static int puts(const char *s)
{
char c;
- while ((c = *s++)) putc(c);
+ while ((c = *s++))
+ m32r_putc(c);
return 0;
}
#define BOOT_SIO0TXB PLD_ESIO0TXB
#endif
-static void putc(char c)
+static void m32r_putc(char c)
{
while ((*BOOT_SIO0STS & 0x3) != 0x3)
cpu_relax();
#define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30)
#endif
-static void putc(char c)
+static void m32r_putc(char c)
{
while ((*SIO0STS & 0x1) == 0)
cpu_relax();
#endif
/*
- * Assember start up done, start code proper.
+ * Assembler start up done, start code proper.
*/
jsr start_kernel /* start Linux kernel */
/***************************************************************************/
/*
- * Some 5272 based boards have the FEC ethernet diectly connected to
+ * Some 5272 based boards have the FEC ethernet directly connected to
* an ethernet switch. In this case we need to use the fixed phy type,
* and we need to declare it early in boot.
*/
/*
* We need to be carefull probing on bus 0 (directly connected to host
- * bridge). We should only acccess the well defined possible devices in
+ * bridge). We should only access the well defined possible devices in
* use, ignore aliases and the like.
*/
static unsigned char mcf_host_slot2sid[32] = {
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
+CONFIG_GTP=m
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
xdnrm_sd:
mov.l %a1,-(%sp)
tst.b LOCAL_EX(%a0) # is denorm pos or neg?
- smi.b %d1 # set d0 accodingly
+ smi.b %d1 # set d0 accordingly
bsr.l unf_sub
mov.l (%sp)+,%a1
xdnrm_exit:
# routines where an instruction is selected by an index into
# a large jump table corresponding to a given instruction which
# has been decoded. Flow continues here where we now decode
-# further accoding to the source operand type.
+# further according to the source operand type.
#
global fsinh
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
-# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
-# 2. Check the digits in lwords 3 and 2 in decending order.
+# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
-# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
-# 2. Check the digits in lwords 3 and 2 in decending order.
+# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
* AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
* Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
*
* APR/18/2002 : added proper support for MCF5272 DMA controller.
/*
* I2C module.
*/
-#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base addreess I2C0 */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base address I2C0 */
#define MCFI2C_SIZE0 0x20 /* Register set size */
-#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base addreess I2C1 */
+#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base address I2C1 */
#define MCFI2C_SIZE1 0x20 /* Register set size */
/*
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+ unsigned long page = __get_free_page(GFP_DMA);
if (!page)
return NULL;
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+ struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte;
if (!page)
/*
* MMU Operation register.
*/
-#define MMUOR_UAA 0x00000001 /* Update allocatiom address */
+#define MMUOR_UAA 0x00000001 /* Update allocation address */
#define MMUOR_ACC 0x00000002 /* TLB access */
#define MMUOR_RD 0x00000004 /* TLB access read */
#define MMUOR_WR 0x00000000 /* TLB access write */
{
pte_t *pte;
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (pte) {
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
struct page *page;
pte_t *pte;
- page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
if (!pgtable_page_ctor(page)) {
/*
* Q40 master Chip Control
- * RTC stuff merged for compactnes..
+ * RTC stuff merged for compactness.
*/
#ifndef _Q40_MASTER_H
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ unsigned long page = __get_free_page(GFP_KERNEL);
if (!page)
return NULL;
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ struct page *page = alloc_pages(GFP_KERNEL, 0);
if (page == NULL)
return NULL;
*
* The host talks to the IOPs using a rather simple message-passing scheme via
* a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
- * channel is conneced to a specific software driver on the IOP. For example
+ * channel is connected to a specific software driver on the IOP. For example
* on the SCC IOP there is one channel for each serial port. Each channel has
* an incoming and and outgoing message queue with a depth of one.
*
bfextu %d2{#13,#3},%d0
.endm
-| decode the 8bit diplacement from the brief extension word
+| decode the 8bit displacement from the brief extension word
.macro fp_decode_disp8
move.b %d2,%d0
ext.w %d0
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
- __GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
return pte;
}
unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
+ int flags = GFP_KERNEL;
#endif
ptepage = alloc_pages(flags, 0);
{
pte_t *pte;
if (mem_init_done) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL |
- __GFP_REPEAT | __GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
} else {
pte = (pte_t *)early_get_page();
if (pte)
#define KVM_GUEST_KUSEG 0x00000000UL
#define KVM_GUEST_KSEG0 0x40000000UL
#define KVM_GUEST_KSEG23 0x60000000UL
-#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack;
unsigned long host_gp;
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
{
pmd_t *pmd;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+ pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
- _CACHE_CACHABLE_NONCOHERENT)
+ _page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
- pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}
if (index < 0) {
vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
vcpu);
preempt_enable();
* invalid exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
+ vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
run, vcpu);
preempt_enable();
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
preempt_enable();
- goto dont_update_pc;
+ goto done;
}
kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
- preempt_enable();
- goto dont_update_pc;
}
preempt_enable();
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
dont_update_pc:
- /* Rollback PC */
- vcpu->arch.pc = curr_pc;
-done:
+ /*
+ * This is for exceptions whose emulation updates the PC, so do not
+ * overwrite the PC under any circumstances
+ */
+
return er;
}
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
+extern char __kvm_mips_vcpu_run_end[];
extern char mips32_exception[], mips32_exceptionEnd[];
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
/* Jump to guest */
eret
+EXPORT(__kvm_mips_vcpu_run_end)
VECTOR(MIPSX(exception), unknown)
/* Find out what mode we came from and jump to the proper handler. */
memcpy(gebase + offset, mips32_GuestException,
mips32_GuestExceptionEnd - mips32_GuestException);
+#ifdef MODULE
+ offset += mips32_GuestExceptionEnd - mips32_GuestException;
+ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+ vcpu->arch.vcpu_run = gebase + offset;
+#else
+ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/* Disable hardware page table walking while in guest */
htw_stop();
- r = __kvm_mips_vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(run, vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *ti = (void *)stack;
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (pte)
clear_page(pte);
return pte;
struct page *pte;
#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL, 0);
#endif
if (!pte)
return NULL;
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
- PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (pte) {
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
unsigned long address)
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte = alloc_pages(GFP_KERNEL, 0);
if (!pte)
return NULL;
clear_page(page_address(pte));
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
#if 0
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
- PMD_ORDER);
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
return pmd;
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_CBPF_JIT
+ select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
}
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
#define HPTE_R_G ASM_CONST(0x0000000000000008)
#define HPTE_R_M ASM_CONST(0x0000000000000010)
pgtable_cache[(shift) - 1]; \
})
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int);
return (pgd_t *)__get_free_page(PGALLOC_GFP);
#else
struct page *page;
- page = alloc_pages(PGALLOC_GFP, 4);
+ page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
if (!page)
return NULL;
return (pgd_t *) page_address(page);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- tlb_flush_pgtable(tlb, address);
+ /*
+ * By now all the pud entries should be none entries. So go
+ * ahead and flush the page walk cache
+ */
+ flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, 0);
}
#define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
+extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */
#include <asm/book3s/64/hash.h>
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+ unsigned long rts_field;
+ /*
+ * we support 52 bits, hence 52-31 = 21, 0b10101
+ * RTS encoding details
+ * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+ * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+ */
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
+ return rts_field;
+}
#endif /* __ASSEMBLY__ */
#endif
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
#else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif
#endif
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+ /*
+ * Flush the page table walk cache on freeing a page table. We already
+ * have marked the upper/higher level page table entry none by now.
+ * So it is safe to flush PWC here.
+ */
+ if (!radix_enabled())
+ return;
+ radix__flush_tlb_pwc(tlb, address);
+}
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
#include <linux/mm.h>
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
- unsigned long address)
-{
-
-}
#ifdef CONFIG_PPC64
#include <asm/book3s/64/pgalloc.h>
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- pte_fragment_fre((unsigned long *)pte, 1);
+ pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
} else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
}
} else if (frozen_bus) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
}
/*
*/
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
- if (pe->type & EEH_PE_VF)
+ if (pe->type & EEH_PE_VF) {
eeh_add_virt_device(edev, NULL);
- else
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_hp_add_devices(bus);
+ }
} else if (frozen_bus && rmv_data->removed) {
pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
ssleep(5);
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
-BEGIN_MMU_FTR_SECTION
- b 2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+BEGIN_MMU_FTR_SECTION
beq- 2f
+FTR_SECTION_ELSE
+ b 2f
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
.machine push
.machine "power4"
printk(KERN_INFO "PCI: Probing PCI hardware\n");
- pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
current->thread.regs = regs - 1;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Clear any transactional state, we're exec()ing. The cause is
+ * not important as there will never be a recheckpoint so it's not
+ * user visible.
+ */
+ if (MSR_TM_SUSPENDED(mfmsr()))
+ tm_reclaim_current(0);
+#endif
+
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 125
+#define IBM_ARCH_VEC_NRCORES_OFFSET 133
W(NR_CPUS), /* number of cores supported */
0,
0,
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32][0]));
+ offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32][0]));
+ offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
- /* We need to setup MSR for VSX register save instructions. Here we
- * also clear the MSR RI since when we do the treclaim, we won't have a
- * valid kernel pointer for a while. We clear RI here as it avoids
- * adding another mtmsr closer to the treclaim. This makes the region
- * maked as non-recoverable wider than it needs to be but it saves on
- * inserting another mtmsrd later.
- */
+ /* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
- li r16, MSR_RI
+ li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
- /* The moment we treclaim, ALL of our GPRs will switch
+ /* Clear MSR RI since we are about to change r1, EE is already off. */
+ li r4, 0
+ mtmsrd r4, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ *
+ * The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+
+ /* Reset MSR RI so we can take SLB faults again */
+ li r11, MSR_RI
+ mtmsrd r11, 1
+
mfspr r11, SPRN_PPR
HMT_MEDIUM
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)
- /* Clear the MSR RI since we are about to change R1. EE is already off
- */
- li r4, 0
- mtmsrd r4, 1
-
REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
ld r6, _CCR(r7)
mtcr r6
- REST_GPR(1, r7) /* GPR1 */
- REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
- ld r7, GPR7(r7)
+
+ /*
+ * Store r1 and r5 on the stack so that we can access them
+ * after we clear MSR RI.
+ */
+
+ REST_GPR(5, r7)
+ std r5, -8(r1)
+ ld r5, GPR1(r7)
+ std r5, -16(r1)
+
+ REST_GPR(7, r7)
+
+ /* Clear MSR RI since we are about to change r1. EE is already off */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /*
+ * BE CAREFUL HERE:
+ * At this point we can't take an SLB miss since we have MSR_RI
+ * off. Load only to/from the stack/paca which are in SLB bolted regions
+ * until we turn MSR RI back on.
+ */
+
+ ld r5, -8(r1)
+ ld r1, -16(r1)
/* Commit register state as checkpointed state: */
TRECHKPT
DBG_LOW(" -> hit\n");
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N |
+ ~(HPTE_R_PPP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PPP | HPTE_R_N |
HPTE_R_C)));
}
native_unlock_hpte(hptep);
/* Update the HPTE */
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N)));
+ ~(HPTE_R_PPP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PPP | HPTE_R_N)));
/*
* Ensure it is out of the tlb too. Bolted entries base and
* actual page size will be same.
}
}
/* This works for all page sizes, and for 256M and 1T segments */
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
+ else
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+
shift = mmu_psize_defs[size].shift;
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
/*
* We can't allow hardware to update hpte bits. Hence always
* set 'R' bit and set 'C' if it is a write fault
- * Memory coherence is always enabled
*/
- rflags |= HPTE_R_R | HPTE_R_M;
+ rflags |= HPTE_R_R;
if (pteflags & _PAGE_DIRTY)
rflags |= HPTE_R_C;
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
rflags |= HPTE_R_I;
- if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
- if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
- rflags |= (HPTE_R_I | HPTE_R_W);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
+ else
+ /*
+ * Add memory coherence if cache inhibited is not set
+ */
+ rflags |= HPTE_R_M;
return rflags;
}
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
+#ifdef CONFIG_PCI
+ pci_io_base = ISA_IO_BASE;
+#endif
+
/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
cachep = PGT_CACHE(pdshift - pshift);
#endif
- new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
+ new = kmem_cache_zalloc(cachep, GFP_KERNEL);
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
/*
* set the process table entry,
*/
- rts_field = 3ull << PPC_BITLSHIFT(2);
+ rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
return 0;
}
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/*
* Fill in the process table.
- * we support 52 bits, hence 52-28 = 24, 11000
*/
- rts_field = 3ull << PPC_BITLSHIFT(2);
+ rts_field = radix__get_tree_size();
process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
/*
* Fill in the partition table. We are suppose to use effective address
static void __init radix_init_partition_table(void)
{
unsigned long rts_field;
- /*
- * we support 52 bits, hence 52-28 = 24, 11000
- */
- rts_field = 3ull << PPC_BITLSHIFT(2);
+
+ rts_field = radix__get_tree_size();
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
__vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
+
+#ifdef CONFIG_PCI
+ pci_io_base = ISA_IO_BASE;
+#endif
+
/*
* For now radix also use the same frag size
*/
pte_t *pte;
if (slab_is_available()) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
{
struct page *ptepage;
- gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
ptepage = alloc_pages(flags, 0);
if (!ptepage)
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (!page)
return NULL;
if (!kernel && !pgtable_page_ctor(page)) {
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbiel_pid(unsigned long pid, int set)
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+static inline void __tlbiel_pid(unsigned long pid, int set,
+ unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rb |= set << PPC_BITLSHIFT(51);
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static inline void _tlbiel_pid(unsigned long pid)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
- __tlbiel_pid(pid, set);
+ __tlbiel_pid(pid, set, ric);
}
return;
}
-static inline void _tlbie_pid(unsigned long pid)
+static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
}
static inline void _tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap)
+ unsigned long ap, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap)
+ unsigned long ap, unsigned long ric)
{
- unsigned long rb,rs,ric,prs,r;
+ unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
- ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
*/
void radix__local_flush_tlb_mm(struct mm_struct *mm)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_pid(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ unsigned long pid;
+ struct mm_struct *mm = tlb->mm;
+
+ preempt_disable();
+
+ pid = mm->context.id;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_va(vmaddr, pid, ap);
+ _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
preempt_enable();
}
void radix__flush_tlb_mm(struct mm_struct *mm)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm->context.id;
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_pid(pid);
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
- _tlbiel_pid(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
preempt_enable();
}
EXPORT_SYMBOL(radix__flush_tlb_mm);
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ unsigned long pid;
+ struct mm_struct *mm = tlb->mm;
+
+ preempt_disable();
+
+ pid = mm->context.id;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+
+ if (!mm_is_core_local(mm)) {
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+ raw_spin_lock(&native_tlbie_lock);
+ _tlbie_pid(pid, RIC_FLUSH_PWC);
+ if (lock_tlbie)
+ raw_spin_unlock(&native_tlbie_lock);
+ } else
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+ preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
unsigned long ap, int nid)
{
- unsigned int pid;
+ unsigned long pid;
preempt_disable();
pid = mm ? mm->context.id : 0;
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_va(vmaddr, pid, ap);
+ _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
- _tlbiel_va(vmaddr, pid, ap);
+ _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
preempt_enable();
}
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
- _tlbie_pid(0);
+ _tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}
/* convenience wrappers around the common clk API */
static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mpc512x_clk_factor(
if (rc < 0)
goto out;
- skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+ skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
out:
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
- cfg_addr = (pdn->busno << 8) | pdn->devfn;
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
- cfg_addr = (pdn->busno << 8) | pdn->devfn;
+ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
" la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (orig_fpc)
+ : "=d" (rc), "=&d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL));
return rc;
}
u32 exit_stop_request;
u32 exit_validity;
u32 exit_instruction;
+ u32 exit_pei;
u32 halt_successful_poll;
u32 halt_attempted_poll;
u32 halt_poll_invalid;
S390_lowcore.program_new_psw.addr =
(unsigned long) s390_base_pgm_handler;
- /*
- * Clear subchannel ID and number to signal new kernel that no CCW or
- * SCSI IPL has been done (for kexec and kdump)
- */
- S390_lowcore.subchannel_id = 0;
- S390_lowcore.subchannel_nr = 0;
-
do_reset_calls();
}
/* Performance monitoring unit for s390x */
static struct pmu cpumf_pmu = {
+ .task_ctx_nr = perf_sw_context,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
.pmu_enable = cpumf_pmu_enable,
.pmu_disable = cpumf_pmu_disable,
.event_init = cpumf_pmu_event_init,
goto out;
}
- /* The CPU measurement counter facility does not have overflow
- * interrupts to do sampling. Sampling must be provided by
- * external means, for example, by timers.
- */
- cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
+ vcpu->stat.exit_pei++;
+
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu);
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
{ "exit_external_request", VCPU_STAT(exit_external_request) },
{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
{ "exit_instruction", VCPU_STAT(exit_instruction) },
+ { "exit_pei", VCPU_STAT(exit_pei) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
kvm->arch.model.cpuid = proc->cpuid;
lowest_ibc = sclp.ibc >> 16 & 0xfff;
unblocked_ibc = sclp.ibc & 0xfff;
- if (lowest_ibc) {
+ if (lowest_ibc && proc->ibc) {
if (proc->ibc > unblocked_ibc)
kvm->arch.model.ibc = unblocked_ibc;
else if (proc->ibc < lowest_ibc)
return table;
}
/* Allocate a fresh page */
- page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ page = alloc_page(GFP_KERNEL);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste);
pte = *ptep;
- if (pte_swap(pte) &&
+ if (!reset && pte_swap(pte) &&
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO))) {
ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
- PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
void *pg;
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+ pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
if (!pg)
return NULL;
page = virt_to_page(pg);
#include <linux/mm.h>
#include <linux/slab.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
static struct kmem_cache *pgd_cachep;
#if PAGETABLE_LEVELS > 2
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache,
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache,
- GFP_KERNEL|__GFP_REPEAT);
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pte_t *pte = NULL;
if (page)
pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
- __GFP_REPEAT | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
#ifndef __ASSEMBLY__
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
/*
* Release a thread_info structure
*/
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *info = (void *)stack;
struct single_step_state *step_state = info->step_state;
if (step_state) {
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
int order)
{
- gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
+ gfp_t flags = GFP_KERNEL|__GFP_ZERO;
struct page *p;
int i;
{
pte_t *pte;
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
}
{
struct page *pte;
- pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pte)
return NULL;
if (!pgtable_page_ctor(pte)) {
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/*
* Allocate one PTE table.
source "drivers/pci/Kconfig"
+config ISA_BUS
+ bool "ISA-style bus support on modern systems" if EXPERT
+ select ISA_BUS_API
+ help
+ Enables ISA-style drivers on modern systems. This is necessary to
+ support PC/104 devices on X86_64 platforms.
+
+ If unsure, say N.
+
# x86_64 have no ISA slots, but can have ISA-style DMA.
config ISA_DMA_API
bool "ISA-style DMA support" if (X86_64 && EXPERT)
for i in lib lib64 share end ; do \
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
+ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
+ fi ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \
msr_fail:
pr_cont("Broken PMU hardware detected, using software events only.\n");
- pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+ printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
reg, val_new);
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stack_frame frame;
- const void __user *fp;
+ const unsigned long __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return;
- fp = (void __user *)regs->bp;
+ fp = (unsigned long __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
pagefault_disable();
while (entry->nr < entry->max_stack) {
unsigned long bytes;
+
frame.next_frame = NULL;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, 16))
+ if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
break;
- bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
if (bytes != 0)
break;
- bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+ bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
if (bytes != 0)
break;
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
-intel-rapl-objs := rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
+intel-rapl-perf-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
+
+ /*
+ * when HT is off, these can only run on the bottom 4 counters
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
+ /*
+ * When HT is off these events can only run on the bottom 4 counters
+ * When HT is on, they are impacted by the HT bug and require EXCL access
+ */
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+ /*
+ * when HT is off, these can only run on the bottom 4 counters
+ */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
EVENT_CONSTRAINT_END
};
int i;
for (i = 0; i < rapl_pmus->maxpkg; i++)
- kfree(rapl_pmus->pmus + i);
+ kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}
.format_group = &hswep_uncore_cbox_format_group,
};
-static struct intel_uncore_type bdx_uncore_sbox = {
- .name = "sbox",
- .num_counters = 4,
- .num_boxes = 4,
- .perf_ctr_bits = 48,
- .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
- .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
- .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
- .msr_offset = HSWEP_SBOX_MSR_OFFSET,
- .ops = &hswep_uncore_sbox_msr_ops,
- .format_group = &hswep_uncore_sbox_format_group,
-};
-
-#define BDX_MSR_UNCORE_SBOX 3
-
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
&hswep_uncore_pcu,
- &bdx_uncore_sbox,
NULL,
};
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
-
- /* BDX-DE doesn't have SBOX */
- if (boot_cpu_data.x86_model == 86)
- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
*/
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
+#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#endif /* _ASM_X86_CPUFEATURES_H */
--- /dev/null
+#ifndef _ASM_X86_INTEL_FAMILY_H
+#define _ASM_X86_INTEL_FAMILY_H
+
+/*
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+ * "Extreme" ones, like Broadwell-E.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them. There's no processor called "WESTMERE2".
+ */
+
+#define INTEL_FAM6_CORE_YONAH 0x0E
+#define INTEL_FAM6_CORE2_MEROM 0x0F
+#define INTEL_FAM6_CORE2_MEROM_L 0x16
+#define INTEL_FAM6_CORE2_PENRYN 0x17
+#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
+
+#define INTEL_FAM6_NEHALEM 0x1E
+#define INTEL_FAM6_NEHALEM_EP 0x1A
+#define INTEL_FAM6_NEHALEM_EX 0x2E
+#define INTEL_FAM6_WESTMERE 0x25
+#define INTEL_FAM6_WESTMERE2 0x1F
+#define INTEL_FAM6_WESTMERE_EP 0x2C
+#define INTEL_FAM6_WESTMERE_EX 0x2F
+
+#define INTEL_FAM6_SANDYBRIDGE 0x2A
+#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
+#define INTEL_FAM6_IVYBRIDGE 0x3A
+#define INTEL_FAM6_IVYBRIDGE_X 0x3E
+
+#define INTEL_FAM6_HASWELL_CORE 0x3C
+#define INTEL_FAM6_HASWELL_X 0x3F
+#define INTEL_FAM6_HASWELL_ULT 0x45
+#define INTEL_FAM6_HASWELL_GT3E 0x46
+
+#define INTEL_FAM6_BROADWELL_CORE 0x3D
+#define INTEL_FAM6_BROADWELL_XEON_D 0x56
+#define INTEL_FAM6_BROADWELL_GT3E 0x47
+#define INTEL_FAM6_BROADWELL_X 0x4F
+
+#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
+#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
+#define INTEL_FAM6_SKYLAKE_X 0x55
+#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
+#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
+
+/* "Small Core" Processors (Atom) */
+
+#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
+#define INTEL_FAM6_ATOM_LINCROFT 0x26
+#define INTEL_FAM6_ATOM_PENWELL 0x27
+#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
+#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
+#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
+#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
+#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
+#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+
+/* Xeon Phi */
+
+#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+
+#endif /* _ASM_X86_INTEL_FAMILY_H */
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) \
- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+ (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR) \
+ (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
+ MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
#define flush_insn_slot(p) do { } while (0)
#include <linux/irqbypass.h>
#include <linux/hyperv.h>
+#include <asm/apic.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline int kvm_cpu_get_apicid(int mps_cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ return __default_cpu_present_to_apicid(mps_cpu);
+#else
+ WARN_ON_ONCE(1);
+ return BAD_APICID;
+#endif
+}
+
#endif /* _ASM_X86_KVM_HOST_H */
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
- if (msr_tracepoint_active(__tracepoint_read_msr))
+ if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
- page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!page)
return NULL;
if (!pgtable_pmd_page_ctor(page)) {
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ return (pud_t *)get_zeroed_page(GFP_KERNEL);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
return product;
}
-static __always_inline
-u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
-{
- u64 delta = rdtsc_ordered() - src->tsc_timestamp;
- return pvclock_scale_delta(delta, src->tsc_to_system_mul,
- src->tsc_shift);
-}
-
static __always_inline
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
cycle_t *cycles, u8 *flags)
{
unsigned version;
- cycle_t ret, offset;
- u8 ret_flags;
+ cycle_t offset;
+ u64 delta;
version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
- offset = pvclock_get_nsec_offset(src);
- ret = src->system_time + offset;
- ret_flags = src->flags;
-
- *cycles = ret;
- *flags = ret_flags;
+ delta = rdtsc_ordered() - src->tsc_timestamp;
+ offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+ *cycles = src->system_time + offset;
+ *flags = src->flags;
return version;
}
struct thread_info;
struct stacktrace_ops;
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
unsigned long *stack,
unsigned long bp,
const struct stacktrace_ops *ops,
int *graph);
extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
i++;
- if (i == 0)
- return 0;
+ if (!i)
+ return -ENODEV;
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
+ ioapics[i].iomem_res = &res[num];
num++;
- ioapics[i].iomem_res = res;
}
ioapic_resources = res;
u64 value;
/* re-enable TopologyExtensions if switched off by BIOS */
- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+ if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
- pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{
- struct task_struct *task;
unsigned long ret_addr;
int index;
if (addr != (unsigned long)return_to_handler)
return;
- task = tinfo->task;
index = task->curr_ret_stack;
if (!task->ret_stack || index < *graph)
static inline void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{ }
#endif
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
void *p, unsigned int size, void *end)
{
- void *t = tinfo;
+ void *t = task_stack_page(task);
if (end) {
if (p < end && p >= (end-THREAD_SIZE))
return 1;
}
unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
{
struct stack_frame *frame = (struct stack_frame *)bp;
- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+ while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
unsigned long addr;
addr = *stack;
} else {
ops->address(data, addr, 0);
}
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
stack++;
}
EXPORT_SYMBOL_GPL(print_context_stack);
unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long *ret_addr = &frame->return_address;
- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+ while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
unsigned long addr = *ret_addr;
if (!__kernel_text_address(addr))
break;
frame = frame->next_frame;
ret_addr = &frame->return_address;
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
return (unsigned long)frame;
bp = stack_frame(task, regs);
for (;;) {
- struct thread_info *context;
void *end_stack;
end_stack = is_hardirq_stack(stack, cpu);
if (!end_stack)
end_stack = is_softirq_stack(stack, cpu);
- context = task_thread_info(task);
- bp = ops->walk_stack(context, stack, bp, ops, data,
+ bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
/* Stop if not on irq stack */
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
- struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
unsigned used = 0;
* current stack address. If the stacks consist of nested
* exceptions
*/
- tinfo = task_thread_info(task);
while (!done) {
unsigned long *stack_end;
enum stack_type stype;
if (ops->stack(data, id) < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp, ops,
+ bp = ops->walk_stack(task, stack, bp, ops,
data, stack_end, &graph);
ops->stack(data, "<EOE>");
/*
if (ops->stack(data, "IRQ") < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp,
+ bp = ops->walk_stack(task, stack, bp,
ops, data, stack_end, &graph);
/*
* We link to the next stack (which would be
/*
* This handles the process stack:
*/
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+ bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
#include <drm/i915_drm.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
static void __init fix_hypertransport_config(int num, int slot, int func)
{
{
#ifdef CONFIG_ACPI
#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Only applies to Nvidia root ports (bus 0) and not to
+ * Nvidia graphics cards with PCI ports on secondary buses.
+ */
+ if (num)
+ return;
+
/*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
#endif
}
+#define BCM4331_MMIO_SIZE 16384
+#define BCM4331_PM_CAP 0x40
+#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+ void __iomem *mmio;
+ u16 pmcsr;
+ u64 addr;
+ int i;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+ return;
+
+ /* Card may have been put into PCI_D3hot by grub quirk */
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+ mdelay(10);
+
+ pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+ dev_err("Cannot power up Apple AirPort card\n");
+ return;
+ }
+ }
+
+ addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+ addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+ addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+ if (!mmio) {
+ dev_err("Cannot iomap Apple AirPort card\n");
+ return;
+ }
+
+ pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+ for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+ udelay(10);
+
+ bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(1);
+
+ bcma_awrite32(BCMA_RESET_CTL, 0);
+ bcma_aread32(BCMA_RESET_CTL);
+ udelay(10);
+
+ early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
void (*f)(int num, int slot, int func);
};
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
*/
{ PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_BROADCOM, 0x4331,
+ PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{}
};
+static void __init early_pci_scan_bus(int bus);
+
/**
* check_dev_quirk - apply early quirks to a given PCI device
* @num: bus number
*
* Check the vendor & device ID against the early quirks table.
*
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
* poke at this device again.
*/
static int __init check_dev_quirk(int num, int slot, int func)
u16 vendor;
u16 device;
u8 type;
+ u8 sec;
int i;
class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
type = read_pci_config_byte(num, slot, func,
PCI_HEADER_TYPE);
+
+ if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+ sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+ if (sec > num)
+ early_pci_scan_bus(sec);
+ }
+
if (!(type & 0x80))
return -1;
return 0;
}
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
{
int slot, func;
- if (!early_pci_allowed())
- return;
-
/* Poor man's PCI discovery */
- /* Only scan the root bus */
for (slot = 0; slot < 32; slot++)
for (func = 0; func < 8; func++) {
/* Only probe function 0 on single fn devices */
- if (check_dev_quirk(0, slot, func))
+ if (check_dev_quirk(bus, slot, func))
break;
}
}
+
+void __init early_quirks(void)
+{
+ if (!early_pci_allowed())
+ return;
+
+ early_pci_scan_bus(0);
+}
# error "Need more than one PGD for the ESPFIX hack"
#endif
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
void do_softirq_own_stack(void)
{
- struct thread_info *curstk;
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- curstk = current_stack();
irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */
* normal page fault.
*/
regs->ip = (unsigned long)cur->addr;
+ /*
+ * Trap flag (TF) has been set here because this fault
+ * happened where the single stepping will be done.
+ * So clear it by resetting the current kprobe:
+ */
+ regs->flags &= ~X86_EFLAGS_TF;
+
+ /*
+ * If the TF flag was set before the kprobe hit,
+ * don't touch it:
+ */
regs->flags |= kcb->kprobe_old_flags;
+
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
{
unsigned version;
- cycle_t ret;
u8 flags;
do {
- version = __pvclock_read_cycles(src, &ret, &flags);
+ version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
+
+ flags = src->flags;
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
return flags & valid_flags;
do {
version = __pvclock_read_cycles(src, &ret, &flags);
+ /* Make sure that the version double-check is last. */
+ smp_rmb();
} while ((src->version & 1) || version != src->version);
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
local_irq_disable();
}
+/*
+ * In IST context, we explicitly disable preemption. This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
void ist_enter(struct pt_regs *regs)
{
if (user_mode(regs)) {
rcu_nmi_enter();
}
- /*
- * We are atomic because we're on the IST stack; or we're on
- * x86_32, in which case we still shouldn't schedule; or we're
- * on x86_64 and entered from user mode, in which case we're
- * still atomic unless ist_begin_non_atomic is called.
- */
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
void ist_exit(struct pt_regs *regs)
{
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
}
/**
*/
void ist_end_non_atomic(void)
{
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
}
static nokprobe_inline int
/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline)
- __delay(tsc_deadline - guest_tsc);
+ __delay(min(tsc_deadline - guest_tsc,
+ nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
}
static void start_apic_timer(struct kvm_lapic *apic)
/* enable / disable AVIC */
static int avic;
+#ifdef CONFIG_X86_LOCAL_APIC
module_param(avic, int, S_IRUGO);
+#endif
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
} else
kvm_disable_tdp();
- if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
- avic = false;
-
- if (avic)
- pr_info("AVIC enabled\n");
+ if (avic) {
+ if (!npt_enabled ||
+ !boot_cpu_has(X86_FEATURE_AVIC) ||
+ !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+ avic = false;
+ else
+ pr_info("AVIC enabled\n");
+ }
return 0;
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
u64 entry;
- int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+ int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
{
u64 entry;
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
- int h_physical_id = __default_cpu_present_to_apicid(cpu);
+ int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
if (avic_vcpu_is_running(vcpu))
wrmsrl(SVM_AVIC_DOORBELL,
- __default_cpu_present_to_apicid(vcpu->cpu));
+ kvm_cpu_get_apicid(vcpu->cpu));
else
kvm_vcpu_wake_up(vcpu);
}
unsigned int dest;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
do {
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
/* Set SN when the vCPU is preempted */
/* Checks for #GP/#SS exceptions. */
exn = false;
- if (is_protmode(vcpu)) {
+ if (is_long_mode(vcpu)) {
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+ exn = is_noncanonical_address(*ret);
+ } else if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the
* following order:
* - segment type check (#GP(0) may be thrown)
* execute-only code segment
*/
exn = ((s.type & 0xa) == 8);
- }
- if (exn) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
- if (is_long_mode(vcpu)) {
- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
- * non-canonical form. This is an only check for long mode.
- */
- exn = is_noncanonical_address(*ret);
- } else if (is_protmode(vcpu)) {
+ if (exn) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return 0;
vcpu->pre_pcpu = vcpu->cpu;
unsigned long flags;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
return;
do {
int idx, ret = -EINVAL;
if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP))
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(kvm->vcpus[0]))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz;
-static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
-{
- return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
- vcpu->arch.virtual_tsc_shift);
-}
-
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
u64 v = (u64)khz * (1000000 + ppm);
#define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h>
+#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
extern struct static_key kvm_no_apic_vcpu;
+static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
+{
+ return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
+ vcpu->arch.virtual_tsc_shift);
+}
+
/* Same "calling convention" as do_div:
* - divide (n << 32) by base
* - put result in n
void *data)
{
if (val == DIE_GPF) {
- pr_emerg("CONFIG_KASAN_INLINE enabled");
- pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+ pr_emerg("CONFIG_KASAN_INLINE enabled\n");
+ pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
}
return NOTIFY_OK;
}
#include <asm/fixmap.h>
#include <asm/mtrr.h>
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
#ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM
return -ENODEV;
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+ acpi_irq_penalty_init();
pcibios_enable_irq = acpi_pci_irq_enable;
pcibios_disable_irq = acpi_pci_irq_disable;
x86_init.pci.init_irq = x86_init_noop;
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+ gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
if (!efi_pgd)
return -ENOMEM;
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
+#include <asm/tlbflush.h>
/* Defined in hibernate_asm_64.S */
extern asmlinkage __visible int restore_image(void);
* kernel's text (this value is passed in the image header).
*/
unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
/*
* Value of the cr3 register from before the hibernation (this value is passed
pgd_t *temp_level4_pgt __visible;
-void *relocated_restore_code __visible;
+unsigned long relocated_restore_code __visible;
+
+static int set_up_temporary_text_mapping(void)
+{
+ pmd_t *pmd;
+ pud_t *pud;
+
+ /*
+ * The new mapping only has to cover the page containing the image
+ * kernel's entry point (jump_address_phys), because the switch over to
+ * it is carried out by relocated code running from a page allocated
+ * specifically for this purpose and covered by the identity mapping, so
+ * the temporary kernel text mapping is only needed for the final jump.
+ * Moreover, in that mapping the virtual address of the image kernel's
+ * entry point must be the same as its virtual address in the image
+ * kernel (restore_jump_address), so the image kernel's
+ * restore_registers() code doesn't find itself in a different area of
+ * the virtual address space after switching over to the original page
+ * tables used by the image kernel.
+ */
+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!pud)
+ return -ENOMEM;
+
+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd)
+ return -ENOMEM;
+
+ set_pmd(pmd + pmd_index(restore_jump_address),
+ __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+ set_pud(pud + pud_index(restore_jump_address),
+ __pud(__pa(pmd) | _KERNPG_TABLE));
+ set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+ __pgd(__pa(pud) | _KERNPG_TABLE));
+
+ return 0;
+}
static void *alloc_pgt_page(void *context)
{
if (!temp_level4_pgt)
return -ENOMEM;
- /* It is safe to reuse the original kernel mapping */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+ /* Prepare a temporary mapping for the kernel text */
+ result = set_up_temporary_text_mapping();
+ if (result)
+ return result;
/* Set up the direct mapping from scratch */
for (i = 0; i < nr_pfn_mapped; i++) {
return 0;
}
+static int relocate_restore_code(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ relocated_restore_code = get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+
+ memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+
+ /* Make the page containing the relocated code executable */
+ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+ pud = pud_offset(pgd, relocated_restore_code);
+ if (pud_large(*pud)) {
+ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+ } else {
+ pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
+
+ if (pmd_large(*pmd)) {
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+ } else {
+ pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
+
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+ }
+ }
+ __flush_tlb_all();
+
+ return 0;
+}
+
int swsusp_arch_resume(void)
{
int error;
/* We have got enough memory and from now on we cannot recover */
- if ((error = set_up_temporary_mappings()))
+ error = set_up_temporary_mappings();
+ if (error)
return error;
- relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
- if (!relocated_restore_code)
- return -ENOMEM;
- memcpy(relocated_restore_code, &core_restore_code,
- &restore_registers - &core_restore_code);
+ error = relocate_restore_code();
+ if (error)
+ return error;
restore_image();
return 0;
struct restore_data_record {
unsigned long jump_address;
+ unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
};
-#define RESTORE_MAGIC 0x0123456789ABCDEFUL
+#define RESTORE_MAGIC 0x123456789ABCDEF0UL
/**
* arch_hibernation_header_save - populate the architecture specific part
if (max_size < sizeof(struct restore_data_record))
return -EOVERFLOW;
- rdr->jump_address = restore_jump_address;
+ rdr->jump_address = (unsigned long)&restore_registers;
+ rdr->jump_address_phys = __pa_symbol(&restore_registers);
rdr->cr3 = restore_cr3;
rdr->magic = RESTORE_MAGIC;
return 0;
struct restore_data_record *rdr = addr;
restore_jump_address = rdr->jump_address;
+ jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
}
pushfq
popq pt_regs_flags(%rax)
- /* save the address of restore_registers */
- movq $restore_registers, %rax
- movq %rax, restore_jump_address(%rip)
/* save cr3 */
movq %cr3, %rax
movq %rax, restore_cr3(%rip)
ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
- /* switch to temporary page tables */
- movq $__PAGE_OFFSET, %rdx
- movq temp_level4_pgt(%rip), %rax
- subq %rdx, %rax
- movq %rax, %cr3
- /* Flush TLB */
- movq mmu_cr4_features(%rip), %rax
- movq %rax, %rdx
- andq $~(X86_CR4_PGE), %rdx
- movq %rdx, %cr4; # turn off PGE
- movq %cr3, %rcx; # flush TLB
- movq %rcx, %cr3;
- movq %rax, %cr4; # turn PGE back on
-
/* prepare to jump to the image kernel */
- movq restore_jump_address(%rip), %rax
- movq restore_cr3(%rip), %rbx
+ movq restore_jump_address(%rip), %r8
+ movq restore_cr3(%rip), %r9
+
+ /* prepare to switch to temporary page tables */
+ movq temp_level4_pgt(%rip), %rax
+ movq mmu_cr4_features(%rip), %rbx
/* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
+
+ /* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
+ /* switch to temporary page tables */
+ movq $__PAGE_OFFSET, %rcx
+ subq %rcx, %rax
+ movq %rax, %cr3
+ /* flush TLB */
+ movq %rbx, %rcx
+ andq $~(X86_CR4_PGE), %rcx
+ movq %rcx, %cr4; # turn off PGE
+ movq %cr3, %rcx; # flush TLB
+ movq %rcx, %cr3;
+ movq %rbx, %cr4; # turn PGE back on
.Lloop:
testq %rdx, %rdx
jz .Ldone
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
jmp .Lloop
+
.Ldone:
/* jump to the restore_registers address from the image header */
- jmpq *%rax
- /*
- * NOTE: This assumes that the boot kernel's text mapping covers the
- * image kernel's page containing restore_registers and the address of
- * this page is the same as in the image kernel's text mapping (it
- * should always be true, because the text mapping is linear, starting
- * from 0, and is supposed to cover the entire kernel text for every
- * kernel).
- *
- * code below belongs to the image kernel
- */
+ jmpq *%r8
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
ENTRY(restore_registers)
FRAME_BEGIN
/* go back to the original page tables */
- movq %rbx, %cr3
+ movq %r9, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
#endif
}
-#ifdef CONFIG_X86_32
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
- /* If there's an existing pte, then don't allow _PAGE_RW to be set */
- if (pte_val_ma(*ptep) & _PAGE_PRESENT)
- pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
- pte_val_ma(pte));
-
- return pte;
-}
-#else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
- unsigned long pfn;
-
- if (xen_feature(XENFEAT_writable_page_tables) ||
- xen_feature(XENFEAT_auto_translated_physmap) ||
- xen_start_info->mfn_list >= __START_KERNEL_map)
- return pte;
-
- /*
- * Pages belonging to the initial p2m list mapped outside the default
- * address range must be mapped read-only. This region contains the
- * page tables for mapping the p2m list, too, and page tables MUST be
- * mapped read-only.
- */
- pfn = pte_pfn(pte);
- if (pfn >= xen_start_info->first_p2m_pfn &&
- pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
- pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
-
- return pte;
-}
-#endif /* CONFIG_X86_64 */
-
/*
* Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW.
* so always write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary.
*/
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+__visible pte_t xen_make_pte_init(pteval_t pte)
{
- if (pte_mfn(pte) != INVALID_P2M_ENTRY)
- pte = mask_rw_pte(ptep, pte);
- else
- pte = __pte_ma(0);
+#ifdef CONFIG_X86_64
+ unsigned long pfn;
+
+ /*
+ * Pages belonging to the initial p2m list mapped outside the default
+ * address range must be mapped read-only. This region contains the
+ * page tables for mapping the p2m list, too, and page tables MUST be
+ * mapped read-only.
+ */
+ pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+ if (xen_start_info->mfn_list < __START_KERNEL_map &&
+ pfn >= xen_start_info->first_p2m_pfn &&
+ pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+ pte &= ~_PAGE_RW;
+#endif
+ pte = pte_pfn_to_mfn(pte);
+ return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+ if (pte_mfn(pte) != INVALID_P2M_ENTRY
+ && pte_val_ma(*ptep) & _PAGE_PRESENT)
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+#endif
native_set_pte(ptep, pte);
}
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
+ pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
.pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
- .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+ .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
#ifdef CONFIG_X86_PAE
if (unlikely(!slab_is_available()))
return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
- return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ return (void *)__get_free_page(GFP_KERNEL);
}
static void __ref free_p2m_page(void *p)
pte_t *ptep;
int i;
- ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ ptep = (pte_t *)__get_free_page(GFP_KERNEL);
if (!ptep)
return NULL;
for (i = 0; i < 1024; i++)
ret = submit_bio_wait(type, bio);
if (ret == -EOPNOTSUPP)
ret = 0;
+ bio_put(bio);
}
blk_finish_plug(&plug);
}
}
- if (bio)
+ if (bio) {
ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ bio_put(bio);
+ }
return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
}
}
- if (bio)
- return submit_bio_wait(WRITE, bio);
+ if (bio) {
+ ret = submit_bio_wait(WRITE, bio);
+ bio_put(bio);
+ return ret;
+ }
return 0;
}
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q)) {
- if (blk_attempt_plug_merge(q, bio, &request_count,
- &same_queue_rq))
- return BLK_QC_T_NONE;
- } else
- request_count = blk_plug_queued_count(q);
+ if (!is_flush_fua && !blk_queue_nomerges(q) &&
+ blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+ return BLK_QC_T_NONE;
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return BLK_QC_T_NONE;
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+ return BLK_QC_T_NONE;
+ } else
+ request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
+ task_unlock(p);
out:
return ret;
}
struct pefile_context *ctx = context;
ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
- return ctx->digest ? 0 : -ENOMEM;
+ if (!ctx->digest)
+ return -ENOMEM;
+
+ ctx->digest_len = vlen;
+
+ return 0;
}
if (asymmetric_key_id_same(p->id, auth))
goto found_issuer_check_skid;
}
- } else {
+ } else if (sig->auth_ids[1]) {
auth = sig->auth_ids[1];
pr_debug("- want %*phN\n", auth->len, auth->data);
for (p = pkcs7->certs; p; p = p->next) {
sig = payload->data[asym_auth];
if (!sig->auth_ids[0] && !sig->auth_ids[1])
- return 0;
+ return -ENOKEY;
if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
return -EPERM;
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+ [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
};
};
struct pkcs1pad_request {
- struct akcipher_request child_req;
-
struct scatterlist in_sg[3], out_sg[2];
uint8_t *in_buf, *out_buf;
+
+ struct akcipher_request child_req;
};
static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
+ acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
return ret;
}
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
+ acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
return n;
}
acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
{
u64 address;
- u8 access_width;
- u32 bit_width;
- u8 bit_offset;
- u64 value64;
- u32 new_value32, old_value32;
- u8 index;
acpi_status status;
ACPI_FUNCTION_NAME(hw_write);
return (status);
}
- /* Convert access_width into number of bits based */
-
- access_width = acpi_hw_get_access_bit_width(reg, 32);
- bit_width = reg->bit_offset + reg->bit_width;
- bit_offset = reg->bit_offset;
-
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- index = 0;
- while (bit_width) {
- /*
- * Use offset style bit reads because "Index * AccessWidth" is
- * ensured to be less than 32-bits by acpi_hw_validate_register().
- */
- new_value32 = ACPI_GET_BITS(&value, index * access_width,
- ACPI_MASK_BITS_ABOVE_32
- (access_width));
-
- if (bit_offset >= access_width) {
- bit_offset -= access_width;
- } else {
- /*
- * Use offset style bit masks because access_width is ensured
- * to be less than 32-bits by acpi_hw_validate_register() and
- * bit_offset/bit_width is less than access_width here.
- */
- if (bit_offset) {
- new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
- }
- if (bit_width < access_width) {
- new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
- }
-
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- if (bit_offset || bit_width < access_width) {
- /*
- * Read old values in order not to modify the bits that
- * are beyond the register bit_width/bit_offset setting.
- */
- status =
- acpi_os_read_memory((acpi_physical_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- &value64,
- access_width);
- old_value32 = (u32)value64;
-
- /*
- * Use offset style bit masks because access_width is
- * ensured to be less than 32-bits by
- * acpi_hw_validate_register() and bit_offset/bit_width is
- * less than access_width here.
- */
- if (bit_offset) {
- old_value32 &=
- ACPI_MASK_BITS_ABOVE
- (bit_offset);
- bit_offset = 0;
- }
- if (bit_width < access_width) {
- old_value32 &=
- ACPI_MASK_BITS_BELOW
- (bit_width);
- }
-
- new_value32 |= old_value32;
- }
-
- value64 = (u64)new_value32;
- status =
- acpi_os_write_memory((acpi_physical_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- value64, access_width);
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- if (bit_offset || bit_width < access_width) {
- /*
- * Read old values in order not to modify the bits that
- * are beyond the register bit_width/bit_offset setting.
- */
- status =
- acpi_hw_read_port((acpi_io_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- &old_value32,
- access_width);
-
- /*
- * Use offset style bit masks because access_width is
- * ensured to be less than 32-bits by
- * acpi_hw_validate_register() and bit_offset/bit_width is
- * less than access_width here.
- */
- if (bit_offset) {
- old_value32 &=
- ACPI_MASK_BITS_ABOVE
- (bit_offset);
- bit_offset = 0;
- }
- if (bit_width < access_width) {
- old_value32 &=
- ACPI_MASK_BITS_BELOW
- (bit_width);
- }
-
- new_value32 |= old_value32;
- }
-
- status = acpi_hw_write_port((acpi_io_address)
- address +
- index *
- ACPI_DIV_8
- (access_width),
- new_value32,
- access_width);
- }
- }
-
- /*
- * Index * access_width is ensured to be less than 32-bits by
- * acpi_hw_validate_register().
- */
- bit_width -=
- bit_width > access_width ? access_width : bit_width;
- index++;
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ address, (u64)value,
+ reg->bit_width);
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_write_port((acpi_io_address)
+ address, value, reg->bit_width);
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, access_width, ACPI_FORMAT_UINT64(address),
+ value, reg->bit_width, ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
*/
- acpi_boot_ec_enable();
+ acpi_ec_dsdt_probe();
printk(KERN_INFO PREFIX "Interpreter enabled\n");
static void ec_remove_handlers(struct acpi_ec *ec)
{
- acpi_ec_stop(ec, false);
-
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
+ /*
+ * Stops handling the EC transactions after removing the operation
+ * region handler. This is required because _REG(DISCONNECT)
+ * invoked during the removal can result in new EC transactions.
+ *
+ * Flushes the EC requests and thus disables the GPE before
+ * removing the GPE handler. This is required by the current ACPICA
+ * GPE core. ACPICA GPE core will automatically disable a GPE when
+ * it is indicated but there is no way to handle it. So the drivers
+ * must disable the GPEs prior to removing the GPE handlers.
+ */
+ acpi_ec_stop(ec, false);
+
if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
return AE_OK;
}
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+ {"PNP0C09", 0},
+ {"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
{
- if (!boot_ec)
+ acpi_status status;
+
+ if (boot_ec)
return 0;
+
+ /*
+ * Finding EC from DSDT if there is no ECDT EC available. When this
+ * function is invoked, ACPI tables have been fully loaded, we can
+ * walk namespace now.
+ */
+ boot_ec = make_acpi_ec();
+ if (!boot_ec)
+ return -ENOMEM;
+ status = acpi_get_devices(ec_device_ids[0].id,
+ ec_parse_device, boot_ec, NULL);
+ if (ACPI_FAILURE(status) || !boot_ec->handle)
+ return -ENODEV;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;
return 0;
return -EFAULT;
}
-static const struct acpi_device_id ec_device_ids[] = {
- {"PNP0C09", 0},
- {"", 0},
-};
-
#if 0
/*
* Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
+ return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
}
static DEVICE_ATTR_RO(format);
continue;
if (nfit_dcr->dcr->code == dcr->code)
continue;
- rc = sprintf(buf, "%#x\n",
- be16_to_cpu(nfit_dcr->dcr->code));
+ rc = sprintf(buf, "0x%04x\n",
+ le16_to_cpu(nfit_dcr->dcr->code));
break;
}
if (rc != ENXIO)
if (disable_vendor_specific)
dsm_mask &= ~(1 << 8);
} else {
- dev_err(dev, "unknown dimm command family\n");
+ dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
- return force_enable_dimms ? 0 : -ENODEV;
+ /* DSMs are optional, continue loading the driver... */
+ return 0;
}
uuid = to_nfit_uuid(nfit_mem->family);
};
/*
- * Region format interface codes are stored as an array of bytes in the
- * NFIT DIMM Control Region structure
+ * Region format interface codes are stored with the interface as the
+ * LSB and the function as the MSB.
*/
-#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
-#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
-#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
+#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
enum {
NFIT_BLK_READ_FLUSH = 1,
{
struct acpi_pci_link *link;
int penalty = 0;
+ int i;
list_for_each_entry(link, &acpi_link_list, list) {
/*
*/
if (link->irq.active && link->irq.active == irq)
penalty += PIRQ_PENALTY_PCI_USING;
- else {
- int i;
-
- /*
- * If a link is inactive, penalize the IRQs it
- * might use, but not as severely.
- */
- for (i = 0; i < link->irq.possible_count; i++)
- if (link->irq.possible[i] == irq)
- penalty += PIRQ_PENALTY_PCI_POSSIBLE /
- link->irq.possible_count;
- }
+
+ /*
+ * penalize the IRQs PCI might use, but not as severely.
+ */
+ for (i = 0; i < link->irq.possible_count; i++)
+ if (link->irq.possible[i] == irq)
+ penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
}
return penalty;
{
int penalty = 0;
- if (irq < ACPI_MAX_ISA_IRQS)
- penalty += acpi_isa_irq_penalty[irq];
-
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
penalty += PIRQ_PENALTY_PCI_USING;
}
+ if (irq < ACPI_MAX_ISA_IRQS)
+ return penalty + acpi_isa_irq_penalty[irq];
+
penalty += acpi_irq_pci_sharing_penalty(irq);
return penalty;
}
+int __init acpi_irq_penalty_init(void)
+{
+ struct acpi_pci_link *link;
+ int i;
+
+ /*
+ * Update penalties to facilitate IRQ balancing.
+ */
+ list_for_each_entry(link, &acpi_link_list, list) {
+
+ /*
+ * reflect the possible and active irqs in the penalty table --
+ * useful for breaking ties.
+ */
+ if (link->irq.possible_count) {
+ int penalty =
+ PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
+
+ for (i = 0; i < link->irq.possible_count; i++) {
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
+ }
+
+ } else if (link->irq.active &&
+ (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
+ }
+ }
+
+ return 0;
+}
+
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
- active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
+ (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
bool acpi_isa_irq_available(int irq)
u32 val;
plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
- if (IS_ERR(plat_data))
+ if (!plat_data)
return &ahci_port_info;
plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
ata_scsi_port_error_handler(host, ap);
/* finish or retry handled scmd's and clean up */
- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+ WARN_ON(!list_empty(&eh_work_q));
DPRINTK("EXIT\n");
}
* Looks like a lot of fuss, but it avoids an unnecessary
* +1 usec read-after-write delay for unaffected registers.
*/
- laddr = (long)addr & 0xffff;
+ laddr = (unsigned long)addr & 0xffff;
if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f;
if (laddr == 0x4 || laddr == 0xc) {
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_ISA) += isa.o
+obj-$(CONFIG_ISA_BUS_API) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
return error;
}
-device_initcall(isa_bus_init);
+postcore_initcall(isa_bus_init);
static void module_create_drivers_dir(struct module_kobject *mk)
{
- if (!mk || mk->drivers_dir)
- return;
+ static DEFINE_MUTEX(drivers_dir_mutex);
- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+ mutex_lock(&drivers_dir_mutex);
+ if (mk && !mk->drivers_dir)
+ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+ mutex_unlock(&drivers_dir_mutex);
}
void module_add_driver(struct module *mod, struct device_driver *drv)
}
/* Mark opp-table as multiple CPUs are sharing it now */
- opp_table->shared_opp = true;
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
}
unlock:
mutex_unlock(&opp_table_lock);
*
* This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
*
- * Returns -ENODEV if OPP table isn't already present.
+ * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
+ * table's status is access-unknown.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
goto unlock;
}
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
cpumask_clear(cpumask);
- if (opp_table->shared_opp) {
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
} else {
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- return opp_table->shared_opp ? opp_table : NULL;
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+ return opp_table;
+
+ return NULL;
}
}
}
opp_table->np = opp_np;
- opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ if (of_property_read_bool(opp_np, "opp-shared"))
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+ else
+ opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
mutex_unlock(&opp_table_lock);
#endif
};
+enum opp_table_access {
+ OPP_TABLE_ACCESS_UNKNOWN = 0,
+ OPP_TABLE_ACCESS_EXCLUSIVE = 1,
+ OPP_TABLE_ACCESS_SHARED = 2,
+};
+
/**
* struct opp_table - Device opp structure
* @node: table node - contains the devices with OPPs that
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool shared_opp;
+ enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
unsigned int *supported_hw;
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
-#define BCMA_CORE_SIZE 0x1000
-
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
int ret;
/* get_zeroed_page returns page with ref count 1 */
- p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ p = (void *) get_zeroed_page(GFP_KERNEL);
if (!p)
return -ENOMEM;
empty_page = virt_to_page(p);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
- debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+ debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
}
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ /* Save uncomplete reqs and bios for migration. */
+ struct list_head requests;
+ struct bio_list bio_list;
};
static unsigned int nr_minors;
const struct blk_mq_queue_data *qd)
{
unsigned long flags;
- struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+ int qid = hctx->queue_num;
+ struct blkfront_info *info = hctx->queue->queuedata;
+ struct blkfront_ring_info *rinfo = NULL;
+ BUG_ON(info->nr_rings <= qid);
+ rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
return BLK_MQ_RQ_QUEUE_BUSY;
}
-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int index)
-{
- struct blkfront_info *info = (struct blkfront_info *)data;
-
- BUG_ON(info->nr_rings <= index);
- hctx->driver_data = &info->rinfo[index];
- return 0;
-}
-
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue,
- .init_hctx = blk_mq_init_hctx,
};
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return PTR_ERR(rq);
}
+ rq->queuedata = info;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) {
{
unsigned int i, r_index;
struct request *req, *n;
- struct blk_shadow *copy;
int rc;
struct bio *bio, *cloned_bio;
- struct bio_list bio_list, merge_bio;
unsigned int segs, offset;
int pending, size;
struct split_bio *split_bio;
- struct list_head requests;
blkfront_gather_backend_features(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs);
- bio_list_init(&bio_list);
- INIT_LIST_HEAD(&requests);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
- /* Stage 1: Make a safe copy of the shadow state. */
- copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
- GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
- if (!copy)
- return -ENOMEM;
-
- /* Stage 2: Set up free list. */
- memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
- for (i = 0; i < BLK_RING_SIZE(info); i++)
- rinfo->shadow[i].req.u.rw.id = i+1;
- rinfo->shadow_free = rinfo->ring.req_prod_pvt;
- rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+ struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
rc = blkfront_setup_indirect(rinfo);
- if (rc) {
- kfree(copy);
+ if (rc)
return rc;
- }
-
- for (i = 0; i < BLK_RING_SIZE(info); i++) {
- /* Not in use? */
- if (!copy[i].request)
- continue;
-
- /*
- * Get the bios in the request so we can re-queue them.
- */
- if (copy[i].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
- /*
- * Flush operations don't contain bios, so
- * we need to requeue the whole request
- */
- list_add(©[i].request->queuelist, &requests);
- continue;
- }
- merge_bio.head = copy[i].request->bio;
- merge_bio.tail = copy[i].request->biotail;
- bio_list_merge(&bio_list, &merge_bio);
- copy[i].request->bio = NULL;
- blk_end_request_all(copy[i].request, 0);
- }
-
- kfree(copy);
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
kick_pending_request_queues(rinfo);
}
- list_for_each_entry_safe(req, n, &requests, queuelist) {
+ list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
}
blk_mq_kick_requeue_list(info->rq);
- while ((bio = bio_list_pop(&bio_list)) != NULL) {
+ while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
if (bio_segments(bio) > segs) {
/*
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
+ unsigned int i, j;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+ bio_list_init(&info->bio_list);
+ INIT_LIST_HEAD(&info->requests);
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct bio_list merge_bio;
+ struct blk_shadow *shadow = rinfo->shadow;
+
+ for (j = 0; j < BLK_RING_SIZE(info); j++) {
+ /* Not in use? */
+ if (!shadow[j].request)
+ continue;
+
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
+ if (shadow[j].request->cmd_flags &
+ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ /*
+ * Flush operations don't contain bios, so
+ * we need to requeue the whole request
+ */
+ list_add(&shadow[j].request->queuelist, &info->requests);
+ continue;
+ }
+ merge_bio.head = shadow[j].request->bio;
+ merge_bio.tail = shadow[j].request->biotail;
+ bio_list_merge(&info->bio_list, &merge_bio);
+ shadow[j].request->bio = NULL;
+ blk_mq_end_request(shadow[j].request, 0);
+ }
+ }
+
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);
return err;
err = talk_to_blkback(dev, info);
+ if (!err)
+ blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
/*
* We have to wait for the backend to switch to
break;
case XenbusStateConnected:
- if (dev->state != XenbusStateInitialised) {
+ /*
+ * talk_to_blkback sets state to XenbusStateInitialised
+ * and blkfront_connect sets it to XenbusStateConnected
+ * (if connection went OK).
+ *
+ * If the backend (or toolstack) decides to poke at backend
+ * state (and re-trigger the watch by setting the state repeatedly
+ * to XenbusStateConnected (4)) we need to deal with this.
+ * This is allowed as this is used to communicate to the guest
+ * that the size of disk has changed!
+ */
+ if ((dev->state != XenbusStateInitialised) &&
+ (dev->state != XenbusStateConnected)) {
if (talk_to_blkback(dev, info))
break;
}
+
blkfront_connect(info);
break;
while (!list_empty(&intf->waiting_rcv_msgs)) {
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
flags);
if (rv > 0) {
/*
* To preserve message order, quit if we
- * can't handle a message.
+ * can't handle a message. Add the message
+ * back at the head, this is safe because this
+ * tasklet is the only thing that pulls the
+ * messages.
*/
+ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
break;
} else {
- list_del(&smi_msg->link);
if (rv == 0)
/* Message handled */
ipmi_free_smi_msg(smi_msg);
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
+ select MFD_SYSCON if ARCH_LPC18XX
---help---
Support for clock providers on NXP platforms.
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned int mask = layout->css_mask;
- unsigned int pckr = 0;
+ unsigned int pckr = index;
if (layout->have_slck_mck)
mask |= AT91_PMC_CSSMCK_MCK;
return -ENOMEM;
regmap = syscon_node_to_regmap(of_get_parent(np));
- if (!regmap) {
+ if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
- return -EINVAL;
+ return PTR_ERR(regmap);
}
for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
/* register fixed rate clocks */
clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
- CLK_IS_ROOT, 24000000);
+ 0, 24000000);
clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
- CLK_IS_ROOT, 8000000);
+ 0, 8000000);
clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
- CLK_IS_ROOT, 8000000);
+ 0, 8000000);
clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
- CLK_IS_ROOT, 32000);
+ 0, 32000);
clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
- CLK_IS_ROOT, 24000000);
+ 0, 24000000);
/* fixed rate (optional) clock */
if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
pr_info("pic32-clk: dt requests SOSC.\n");
}
cclk = clk_register(NULL, &cpuclk->hw);
- if (IS_ERR(clk)) {
+ if (IS_ERR(cclk)) {
pr_err("%s: could not register cpuclk %s\n", __func__, name);
- ret = PTR_ERR(clk);
+ ret = PTR_ERR(cclk);
goto free_rate_table;
}
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
-#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
-#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
#define PSECS_PER_SEC 1000000000000LL
return ERR_PTR(-ENOMEM);
init.name = name;
+ init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
mmc_clock->reg = reg;
mmc_clock->shift = shift;
- /*
- * Assert init_state to soft reset the CLKGEN
- * for mmc tuning phase and degree
- */
- if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
- writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
- ROCKCHIP_MMC_INIT_STATE_RESET,
- mmc_clock->shift), mmc_clock->reg);
-
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
kfree(mmc_clock);
RK3399_CLKGATE_CON(13), 1, GFLAGS),
/* perihp */
- GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+ GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
+ GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 1, GFLAGS),
COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
static const char *const rk3399_cru_critical_clocks[] __initconst = {
"aclk_cci_pre",
+ "aclk_gic",
+ "aclk_gic_noc",
"pclk_perilp0",
"pclk_perilp0",
"hclk_perilp0",
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
return;
}
ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ iounmap(reg_base);
return;
}
u8 width_div;
u8 width_mux;
+
+ u32 flags;
};
struct reset_data {
data->has_div ? &div->hw : NULL,
data->has_div ? &clk_divider_ops : NULL,
&gate->hw, &clk_gate_ops,
- 0);
+ data->flags);
if (IS_ERR(clk)) {
pr_err("%s: Couldn't register the clock\n", clk_name);
goto free_div;
.offset_rst = 29,
.offset_mux = 24,
.width_mux = 2,
+ .flags = CLK_SET_RATE_PARENT,
};
static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
static u8 tcon_ch1_get_parent(struct clk_hw *hw)
{
struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 reg;
reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
- if (reg >= num_parents)
- return -EINVAL;
-
return reg;
}
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
-static inline void get_seq(__u32 *ts, int *cpu)
+static inline void send_msg(struct cn_msg *msg)
{
preempt_disable();
- *ts = __this_cpu_inc_return(proc_event_counts) - 1;
- *cpu = smp_processor_id();
+
+ msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+ ((struct proc_event *)msg->data)->cpu = smp_processor_id();
+
+ /*
+ * Preemption remains disabled during send to ensure the messages are
+ * ordered according to their sequence numbers.
+ *
+ * If cn_netlink_send() fails, the data is not sent.
+ */
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
+
preempt_enable();
}
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- /* If cn_netlink_send() failed, the data is not sent */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_exec_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_id_connector(struct task_struct *task, int which_id)
return;
}
rcu_read_unlock();
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_sid_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_comm_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_coredump_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_exit_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
/*
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
/**
static int __init cpufreq_dt_platdev_init(void)
{
struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
if (!np)
return -ENODEV;
- if (!of_match_node(machines, np))
+ match = of_match_node(machines, np);
+ of_node_put(np);
+ if (!match)
return -ENODEV;
- of_node_put(of_root);
-
return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
NULL, 0));
}
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ if (cpufreq_suspended) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
new_policy.cur = cpufreq_update_current_freq(policy);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;
return acpi_ppc;
}
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
- return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
- int turbo_pss_ctl;
int ret;
int i;
* max frequency, which will cause a reduced performance as
* this driver uses real max turbo frequency as the max
* frequency. So correct this frequency in _PSS table to
- * correct max turbo frequency based on the turbo ratio.
+ * correct max turbo frequency based on the turbo state.
* Also need to convert to MHz as _PSS freq is in MHz.
*/
- turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
- if (turbo_pss_ctl > cpu->pstate.max_pstate)
+ if (!limits->turbo_disabled)
cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
{
struct cpudata *cpu = all_cpu_data[cpu_num];
+ if (cpu->update_util_set)
+ return;
+
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- intel_pstate_clear_update_util_hook(policy->cpu);
+ pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
cpu = all_cpu_data[0];
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
limits->max_sysfs_pct);
limits->max_perf_pct = max(limits->min_policy_pct,
limits->max_perf_pct);
- limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
limits->min_perf = div_fp(limits->min_perf_pct, 100);
limits->max_perf = div_fp(limits->max_perf_pct, 100);
+ limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
intel_pstate_set_update_util_hook(policy->cpu);
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
- policy->cpuinfo.max_freq =
- cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ update_turbo_state();
+ policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
intel_pstate_init_acpi_perf_limits(policy);
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
doorbell.space_id = reg_resource->space_id;
doorbell.bit_width = reg_resource->bit_width;
doorbell.bit_offset = reg_resource->bit_offset;
- doorbell.access_width = 64;
+ doorbell.access_width = 4;
doorbell.address = reg_resource->address;
pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
- u64 time_start, time_end;
+ ktime_t time_start, time_end;
s64 diff;
/*
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
- time_start = local_clock();
+ time_start = ns_to_ktime(local_clock());
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
- time_end = local_clock();
+ time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- /*
- * local_clock() returns the time in nanosecond, let's shift
- * by 10 (divide by 1024) to have microsecond based time.
- */
- diff = (time_end - time_start) >> 10;
+ diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
$(obj)/qat_rsapubkey-asn1.h
$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
$(obj)/qat_rsaprivkey-asn1.h
+$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
&device_data->state);
memmove(req_ctx->state.buffer,
device_data->state.buffer,
- HASH_BLOCK_SIZE / sizeof(u32));
+ HASH_BLOCK_SIZE);
if (ret) {
dev_err(device_data->dev,
"%s: hash_resume_state() failed!\n",
memmove(device_data->state.buffer,
req_ctx->state.buffer,
- HASH_BLOCK_SIZE / sizeof(u32));
+ HASH_BLOCK_SIZE);
if (ret) {
dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
__func__);
.cra_name = "cbc(aes)",
.cra_driver_name = "p8_aes_cbc",
.cra_module = THIS_MODULE,
- .cra_priority = 1000,
+ .cra_priority = 2000,
.cra_type = &crypto_blkcipher_type,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
.cra_alignmask = 0,
.cra_name = "ctr(aes)",
.cra_driver_name = "p8_aes_ctr",
.cra_module = THIS_MODULE,
- .cra_priority = 1000,
+ .cra_priority = 2000,
.cra_type = &crypto_blkcipher_type,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
.cra_alignmask = 0,
# Some ABIs specify vrsave, special-purpose register #256, as reserved
# for system use.
-my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
my $mtspr = sub {
my ($f,$idx,$ra) = @_;
if ($idx == 256 && $no_vrsave) {
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
- if (err)
+ if (err) {
+ freqs.new = cur_freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
return err;
+ }
freqs.new = freq;
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
+ devfreq->last_status.current_frequency = profile->initial_freq;
devfreq->data = data;
devfreq->nb.notifier_call = devfreq_notifier_call;
mutex_lock(&devfreq->lock);
}
- devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
- devfreq->profile->max_state *
- devfreq->profile->max_state,
- GFP_KERNEL);
- devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
- devfreq->profile->max_state,
- GFP_KERNEL);
- devfreq->last_stat_updated = jiffies;
-
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
- put_device(&devfreq->dev);
mutex_unlock(&devfreq->lock);
goto err_out;
}
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+ devfreq->profile->max_state *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->last_stat_updated = jiffies;
+
srcu_init_notifier_head(&devfreq->transition_notifier_list);
mutex_unlock(&devfreq->lock);
err_init:
list_del(&devfreq->node);
device_unregister(&devfreq->dev);
- kfree(devfreq);
err_out:
return ERR_PTR(err);
}
return -EINVAL;
device_unregister(&devfreq->dev);
- put_device(&devfreq->dev);
return 0;
}
/* Maps the memory mapped IO to control nocp register */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
menu "DMABUF options"
config SYNC_FILE
- bool "sync_file support for fences"
+ bool "Explicit Synchronization Framework"
default n
select ANON_INODES
select DMA_SHARED_BUFFER
---help---
- This option enables the fence framework synchronization to export
- sync_files to userspace that can represent one or more fences.
+ The Sync File Framework adds explicit syncronization via
+ userspace. It enables send/receive 'struct fence' objects to/from
+ userspace via Sync File fds for synchronization between drivers via
+ userspace components. It has been ported from Android.
+
+ The first and main user for this is graphics in which a fence is
+ associated with a buffer. When a job is submitted to the GPU a fence
+ is attached to the buffer and is transferred via userspace, using Sync
+ Files fds, to the DRM driver for example. More details at
+ Documentation/sync_file.txt.
+
endmenu
struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
+ int ret;
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
- module_put(exp_info->owner);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_module;
}
dmabuf->priv = exp_info->priv;
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
exp_info->flags);
if (IS_ERR(file)) {
- kfree(dmabuf);
- return ERR_CAST(file);
+ ret = PTR_ERR(file);
+ goto err_dmabuf;
}
file->f_mode |= FMODE_LSEEK;
mutex_unlock(&db_list.lock);
return dmabuf;
+
+err_dmabuf:
+ kfree(dmabuf);
+err_module:
+ module_put(exp_info->owner);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dma_buf_export);
u32 mbr_dus; /* Destination Microblock Stride Register */
};
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
struct at_xdmac_desc {
struct at_xdmac_lld lld;
enum dma_transfer_direction direction;
unsigned int xfer_size;
struct list_head descs_list;
struct list_head xfer_node;
-};
+} __aligned(sizeof(u64));
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
+ bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
residue = desc->xfer_size;
/*
* Flush FIFO: only relevant when the transfer is source peripheral
- * synchronized.
+ * synchronized. Flush is needed before reading CUBC because data in
+ * the FIFO are not reported by CUBC. Reporting a residue of the
+ * transfer length while we have data in FIFO can cause issue.
+ * Usecase: atmel USART has a timeout which means I have received
+ * characters but there is no more character received for a while. On
+ * timeout, it requests the residue. If the data are in the DMA FIFO,
+ * we will return a residue of the transfer length. It means no data
+ * received. If an application is waiting for these data, it will hang
+ * since we won't have another USART timeout without receiving new
+ * data.
*/
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
}
/*
- * When processing the residue, we need to read two registers but we
- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
- * we stand in the descriptor list and AT_XDMAC_CUBC is used
- * to know how many data are remaining for the current descriptor.
- * Since the dma channel is not paused to not loose data, between the
- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
- * descriptor.
- * For that reason, after reading AT_XDMAC_CUBC, we check if we are
- * still using the same descriptor by reading a second time
- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
- * read again AT_XDMAC_CUBC.
+ * The easiest way to compute the residue should be to pause the DMA
+ * but doing this can lead to miss some data as some devices don't
+ * have FIFO.
+ * We need to read several registers because:
+ * - DMA is running therefore a descriptor change is possible while
+ * reading these registers
+ * - When the block transfer is done, the value of the CUBC register
+ * is set to its initial value until the fetch of the next descriptor.
+ * This value will corrupt the residue calculation so we have to skip
+ * it.
+ *
+ * INITD -------- ------------
+ * |____________________|
+ * _______________________ _______________
+ * NDA @desc2 \/ @desc3
+ * _______________________/\_______________
+ * __________ ___________ _______________
+ * CUBC 0 \/ MAX desc1 \/ MAX desc2
+ * __________/\___________/\_______________
+ *
+ * Since descriptors are aligned on 64 bits, we can assume that
+ * the update of NDA and CUBC is atomic.
* Memory barriers are used to ensure the read order of the registers.
- * A max number of retries is set because unlikely it can never ends if
- * we are transferring a lot of data with small buffers.
+ * A max number of retries is set because unlikely it could never ends.
*/
- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
- rmb();
- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
- rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
- if (likely(cur_nda == check_nda))
- break;
-
- cur_nda = check_nda;
+ rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ rmb();
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+
+ if ((check_nda == cur_nda) && initd)
+ break;
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
goto spin_unlock;
}
+ /*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized. Another flush is needed here because CUBC is updated
+ * when the controller sends the data write command. It can lead to
+ * report data that are not written in the memory or the device. The
+ * FIFO flush ensures that data are really written.
+ */
+ if ((desc->lld.mbr_cfg & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
goto free_resources;
}
- src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
- PAGE_SIZE, DMA_TO_DEVICE);
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
+ (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+ DMA_TO_DEVICE);
unmap->addr[0] = src_dma;
ret = dma_mapping_error(dma_chan->device->dev, src_dma);
}
unmap->to_cnt = 1;
- dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
+ (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+ DMA_FROM_DEVICE);
unmap->addr[1] = dest_dma;
ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mod_work(&mci->work, value);
+ if (mci->op_state == OP_RUNNING_POLL)
+ edac_mod_work(&mci->work, value);
}
mutex_unlock(&mem_ctls_mutex);
}
{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
};
-#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+ GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+ GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
/* Device 16, functions 2-7 */
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs;
+ enum type type;
};
struct sbridge_dev {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
};
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) { \
+ .descr = A, \
+ .n_devs = ARRAY_SIZE(A), \
+ .type = T \
+}
+
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
{0,} /* 0 terminated list. */
};
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
{0,}
};
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
{0,} /* 0 terminated list. */
};
pci_read_config_dword(pvt->pci_tad[i],
rir_offset[j][k],
®);
- tmp_mb = RIR_OFFSET(reg) << 6;
+ tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
i, j, k,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
+ (u32)RIR_RNK_TGT(pvt->info.type, reg),
reg);
}
}
pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
rir_offset[n_rir][idx],
®);
- *rank = RIR_RNK_TGT(reg);
+ *rank = RIR_RNK_TGT(pvt->info.type, reg);
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
* @num_mc: pointer to the memory controllers count, to be incremented in case
* of success.
* @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- * (as implemented, this isn't expected to work correctly in the
- * multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- * memory controllers.
*
* returns 0 in case of success or error code
*/
-static int sbridge_get_all_devices_full(u8 *num_mc,
- const struct pci_id_table *table,
- int allow_dups,
- int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+ const struct pci_id_table *table)
{
int i, rc;
struct pci_dev *pdev = NULL;
+ int allow_dups = 0;
+ int multi_bus = 0;
+ if (table->type == KNIGHTS_LANDING)
+ allow_dups = multi_bus = 1;
while (table && table->descr) {
for (i = 0; i < table->n_devs; i++) {
if (!allow_dups || i == 0 ||
return 0;
}
-#define sbridge_get_all_devices(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
#define ICPU(model, table) \
{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
-/* Order here must match "enum type" */
static const struct x86_cpu_id sbridge_cpuids[] = {
ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
+ ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
{ }
};
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+ rc = sbridge_register_mci(sbridge_dev, ptable->type);
if (unlikely(rc < 0))
goto fail1;
}
palmas_enable_irq(palmas_usb);
/* perform initial detection */
+ if (palmas_usb->enable_gpio_vbus_detection)
+ palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
device_set_wakeup_capable(&pdev->dev, true);
return 0;
{
efi_memory_desc_t *md;
u64 paddr, npages, size;
+ int resv;
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
paddr = md->phys_addr;
npages = md->num_pages;
+ resv = is_reserve_region(md);
if (efi_enabled(EFI_DBG)) {
char buf[64];
- pr_info(" 0x%012llx-0x%012llx %s",
+ pr_info(" 0x%012llx-0x%012llx %s%s\n",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
- efi_md_typeattr_format(buf, sizeof(buf), md));
+ efi_md_typeattr_format(buf, sizeof(buf), md),
+ resv ? "*" : "");
}
memrange_efi_to_native(&paddr, &npages);
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
- if (is_reserve_region(md)) {
+ if (resv)
memblock_mark_nomap(paddr, size);
- if (efi_enabled(EFI_DBG))
- pr_cont("*");
- }
- if (efi_enabled(EFI_DBG))
- pr_cont("\n");
}
set_bit(EFI_MEMMAP, &efi.flags);
menuconfig GPIOLIB
bool "GPIO Support"
+ select ANON_INODES
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
config OF_GPIO
def_bool y
- depends on OF || COMPILE_TEST
+ depends on OF
config GPIO_ACPI
def_bool y
select OF_GPIO
config GPIO_TEGRA
- bool
- default y
+ bool "NVIDIA Tegra GPIO support"
+ default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on OF
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
config GPIO_104_IDI_48
tristate "ACCES 104-IDI-48 GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
- depends on ISA
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the WinSystems WS16C48. The base port
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
const unsigned io_port = offset / 8;
- const unsigned control_port = io_port / 2;
+ const unsigned int control_port = io_port / 3;
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
unsigned long flags;
unsigned control;
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
const unsigned io_port = offset / 8;
- const unsigned control_port = io_port / 2;
+ const unsigned int control_port = io_port / 3;
const unsigned mask = BIT(offset % 8);
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
idi48gpio->irq = irq[id];
spin_lock_init(&idi48gpio->lock);
+ spin_lock_init(&idi48gpio->ack_lock);
dev_set_drvdata(dev, idi48gpio);
/* disable interrupts and clear status */
for (i = 0; i < kona_gpio->num_bank; i++) {
/* Unlock the entire bank first */
- bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
+ bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
/* Now re-lock the bank */
- bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
+ bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
}
}
return gpio % 8;
}
-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
{
- struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
return reg_val;
}
-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
int val)
{
- struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GIO, 1);
+ sch_gpio_reg_set(sch, gpio_num, GIO, 1);
spin_unlock(&sch->lock);
return 0;
}
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
{
- return sch_gpio_reg_get(gc, gpio_num, GLV);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ return sch_gpio_reg_get(sch, gpio_num, GLV);
}
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GLV, val);
+ sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock(&sch->lock);
}
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GIO, 0);
+ sch_gpio_reg_set(sch, gpio_num, GIO, 0);
spin_unlock(&sch->lock);
/*
* GPIO7 is configured by the CMC as SLPIOVR
* Enable GPIO[9:8] core powered gpios explicitly
*/
- sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
- sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
+ sch_gpio_reg_set(sch, 8, GEN, 1);
+ sch_gpio_reg_set(sch, 9, GEN, 1);
/*
* SUS_GPIO[2:0] enabled by default
* Enable SUS_GPIO3 resume powered gpio explicitly
*/
- sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
+ sch_gpio_reg_set(sch, 13, GEN, 1);
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
const struct tegra_gpio_soc_config *soc;
struct gpio_chip gc;
struct irq_chip ic;
- struct lock_class_key lock_class;
u32 bank_count;
};
SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different category
+ * than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
static int tegra_gpio_probe(struct platform_device *pdev)
{
const struct tegra_gpio_soc_config *config;
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &tgi->lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(gpio->clk);
}
+ ret = clk_prepare_enable(gpio->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
pm_runtime_put(&pdev->dev);
err_pm_dis:
pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(gpio->clk);
return ret;
}
if (!desc && gpio_is_valid(gpio))
return -EPROBE_DEFER;
+ err = gpiod_request(desc, label);
+ if (err)
+ return err;
+
if (flags & GPIOF_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (flags & GPIOF_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- err = gpiod_request(desc, label);
- if (err)
- return err;
-
if (flags & GPIOF_DIR_IN)
err = gpiod_direction_input(desc);
else
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/io-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
{
struct gpio_device *gdev = dev_get_drvdata(dev);
- cdev_del(&gdev->chrdev);
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
kfree(gdev->label);
/* From this point, the .release() function cleans up gpio_device */
gdev->dev.release = gpiodevice_release;
- get_device(&gdev->dev);
pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
__func__, gdev->base, gdev->base + gdev->ngpio - 1,
dev_name(&gdev->dev), gdev->chip->label ? : "generic");
* be removed, else it will be dangling until the last user is
* gone.
*/
+ cdev_del(&gdev->chrdev);
+ device_del(&gdev->dev);
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list)
- if (match(gdev->chip, data))
+ if (gdev->chip && match(gdev->chip, data))
break;
/* No match? */
spin_lock_irqsave(&gpio_lock, flags);
}
done:
- if (status < 0) {
- /* Clear flags that might have been set by the caller before
- * requesting the GPIO.
- */
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
- clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
- clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
- }
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
#define VALIDATE_DESC(desc) do { \
if (!desc) \
return 0; \
+ if (IS_ERR(desc)) { \
+ pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+ return PTR_ERR(desc); \
+ } \
if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO\n", __func__); \
+ pr_warn("%s: invalid GPIO (no device)\n", __func__); \
return -EINVAL; \
} \
if ( !desc->gdev->chip ) { \
#define VALIDATE_DESC_VOID(desc) do { \
if (!desc) \
return; \
+ if (IS_ERR(desc)) { \
+ pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+ return; \
+ } \
if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO\n", __func__); \
+ pr_warn("%s: invalid GPIO (no device)\n", __func__); \
return; \
} \
if (!desc->gdev->chip) { \
struct gpio_chip *chip;
int offset;
- VALIDATE_DESC(desc);
+ /*
+ * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
+ * requires this function to not return zero on an invalid descriptor
+ * but rather a negative error number.
+ */
+ if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+ return -EINVAL;
+
chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
if (chip->to_irq) {
}
EXPORT_SYMBOL_GPL(gpiod_get_optional);
-/**
- * gpiod_parse_flags - helper function to parse GPIO lookup flags
- * @desc: gpio to be setup
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
- *
- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
- */
-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
-{
- if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-}
/**
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
+ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
+ * of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
* occurred while trying to acquire the GPIO.
*/
static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
- enum gpiod_flags dflags)
+ unsigned long lflags, enum gpiod_flags dflags)
{
int status;
+ if (lflags & GPIO_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIO_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (lflags & GPIO_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
pr_debug("no flags found for %s\n", con_id);
return desc;
}
- gpiod_parse_flags(desc, lookupflags);
-
status = gpiod_request(desc, con_id);
if (status < 0)
return ERR_PTR(status);
- status = gpiod_configure_flags(desc, con_id, flags);
+ status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (status < 0) {
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
if (IS_ERR(desc))
return desc;
+ ret = gpiod_request(desc, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
if (active_low)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
}
- ret = gpiod_request(desc, NULL);
- if (ret)
- return ERR_PTR(ret);
-
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
chip = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
- gpiod_parse_flags(desc, lflags);
-
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
if (IS_ERR(local_desc)) {
status = PTR_ERR(local_desc);
return status;
}
- status = gpiod_configure_flags(desc, name, dflags);
+ status = gpiod_configure_flags(desc, name, lflags, dflags);
if (status < 0) {
pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, chip->label, hwnum, status);
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
+extern int amdgpu_powercontainment;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
+extern unsigned amdgpu_cg_mask;
+extern unsigned amdgpu_pg_mask;
+extern char *amdgpu_disable_cu;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
struct amdgpu_ip_block_version {
enum amd_ip_block_type type;
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
+void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
-void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
- u64 next_rptr_gpu_addr;
- volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
u32 doorbell_index;
bool use_doorbell;
unsigned wptr_offs;
- unsigned next_rptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
enum amdgpu_ring_type type;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
};
/*
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
+ uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
- struct amdgpu_ring *last_user;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
+ uint32_t current_gpu_reset_count;
+
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
uint32_t max_pfn;
/* vram base address for page table entry */
u64 vram_base_offset;
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr);
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint32_t bitmap[4][4];
};
+struct amdgpu_gfx_funcs {
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+ void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config;
/* ce ram size*/
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
+ const struct amdgpu_gfx_funcs *funcs;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data);
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
uint32_t num_ibs;
void *owner;
uint64_t ctx;
+ bool vm_needs_flush;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t oa_base, oa_size;
/* user fence handling */
- struct amdgpu_bo *uf_bo;
- uint32_t uf_offset;
+ uint64_t uf_addr;
uint64_t uf_sequence;
};
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
};
struct amdgpu_dpm {
void amdgpu_debugfs_cleanup(struct drm_minor *minor);
#endif
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+
/*
* amdgpu smumgr functions
*/
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
int (*reset)(struct amdgpu_device *adev);
- /* wait for mc_idle */
- int (*wait_for_mc_idle)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
- /* get the gpu clock counter */
- uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+ /* query virtual capabilities */
+ u32 (*get_virtual_caps)(struct amdgpu_device *adev);
};
/*
/* GPU virtualization */
+#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
+#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
struct amdgpu_virtualization {
bool supports_sr_iov;
+ bool is_virtual;
+ u32 caps;
};
/*
spinlock_t didt_idx_lock;
amdgpu_rreg_t didt_rreg;
amdgpu_wreg_t didt_wreg;
+ /* protects concurrent gc_cac register access */
+ spinlock_t gc_cac_idx_lock;
+ amdgpu_rreg_t gc_cac_rreg;
+ amdgpu_wreg_t gc_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
atomic64_t vram_vis_usage;
atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
+ atomic64_t num_evictions;
atomic_t gpu_reset_counter;
/* display */
#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
+#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
*/
#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
-#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
+#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
#define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+bool amdgpu_is_atpx_hybrid(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
#endif
/*
{
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
- if (rdev->asic_funcs->get_gpu_clock_counter)
- return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+ if (rdev->gfx.funcs->get_gpu_clock_counter)
+ return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
return 0;
}
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "amd_acpi.h"
struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
+ bool is_hybrid;
};
static struct amdgpu_atpx_priv {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool amdgpu_is_atpx_hybrid(void) {
+ return amdgpu_atpx_priv.atpx.is_hybrid;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+#if 1
+ /* This is a temporary hack until the D3 cold support
+ * makes it upstream. The ATPX power_control method seems
+ * to still work on even if the system should be using
+ * the new standardized hybrid D3 cold ACPI interface.
+ */
+ atpx->functions.power_cntl = true;
+#else
+ atpx->functions.power_cntl = false;
+#endif
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
unsigned last_entry = 0, first_userptr = num_entries;
unsigned i;
int r;
+ unsigned long total_size = 0;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
+ total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
list->array = array;
list->num_entries = num_entries;
+ trace_amdgpu_cs_bo_status(list->num_entries, total_size);
return 0;
error_free:
return RREG32_UVD_CTX(index);
case CGS_IND_REG__DIDT:
return RREG32_DIDT(index);
+ case CGS_IND_REG_GC_CAC:
+ return RREG32_GC_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
return WREG32_UVD_CTX(index, value);
case CGS_IND_REG__DIDT:
return WREG32_DIDT(index, value);
+ case CGS_IND_REG_GC_CAC:
+ return WREG32_GC_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
return result;
}
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+ release_firmware(adev->pm.fw);
+ return 0;
+ }
+ /* cannot release other firmware because they are not created by cgs */
+ return -EINVAL;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
case CGS_SYSTEM_INFO_GFX_CU_INFO:
sys_info->value = adev->gfx.cu_info.number;
break;
+ case CGS_SYSTEM_INFO_GFX_SE_INFO:
+ sys_info->value = adev->gfx.config.max_shader_engines;
+ break;
default:
return -ENODEV;
}
struct cgs_acpi_method_argument *argument = NULL;
uint32_t i, count;
acpi_status status;
- int result;
+ int result = 0;
uint32_t func_no = 0xFFFFFFFF;
handle = ACPI_HANDLE(&adev->pdev->dev);
params->integer.value = argument->value;
break;
case ACPI_TYPE_STRING:
- params->string.length = argument->method_length;
+ params->string.length = argument->data_length;
params->string.pointer = argument->pointer;
break;
case ACPI_TYPE_BUFFER:
- params->buffer.length = argument->method_length;
+ params->buffer.length = argument->data_length;
params->buffer.pointer = argument->pointer;
break;
default:
struct cgs_acpi_method_info info = {0};
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
- acpi_input[0].method_length = sizeof(uint32_t);
acpi_input[0].data_length = sizeof(uint32_t);
acpi_input[0].value = acpi_function;
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
- acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_input[1].data_length = input_size;
acpi_input[1].pointer = pinput;
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
- acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_output.data_length = output_size;
acpi_output.pointer = poutput;
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
+ amdgpu_cgs_rel_firmware,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
if (ret)
goto free_all_kdata;
- if (p->uf_entry.robj) {
- p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
- p->job->uf_offset = uf_offset;
- }
-
+ if (p->uf_entry.robj)
+ p->job->uf_addr = uf_offset;
kfree(chunk_array);
return 0;
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
if (r)
goto error_validate;
+ fpriv->vm.last_eviction_counter =
+ atomic64_read(&p->adev->num_evictions);
+
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
}
}
+ if (p->uf_entry.robj)
+ p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+
error_validate:
if (r) {
amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
+ p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- }
+ } else {
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
- r = amdgpu_bo_vm_update_pte(p, vm);
- if (!r)
- amdgpu_cs_sync_rings(p);
+ r = amdgpu_bo_vm_update_pte(p, vm);
+ if (r)
+ return r;
+ }
- return r;
+ return amdgpu_cs_sync_rings(p);
}
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
}
/* UVD & VCE fw doesn't support user fences */
- if (parser->job->uf_bo && (
+ if (parser->job->uf_addr && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
- struct fence *fence;
struct amdgpu_job *job;
int r;
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func,
- p->filp, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
return r;
job->owner = p->filp;
job->ctx = entity->fence_context;
- p->fence = fence_get(fence);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+ p->fence = fence_get(&job->base.s_fence->finished);
+ cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
+ amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
bool always_indirect)
{
+ uint32_t ret;
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
else {
unsigned long flags;
- uint32_t ret;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- return ret;
}
+ trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ return ret;
}
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
*/
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
{
- if (adev->mode_info.atom_context)
+ if (adev->mode_info.atom_context) {
kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
kfree(adev->mode_info.atom_context);
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
}
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type) {
+ r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (r)
+ return r;
+ break;
+ }
+ }
+ return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type)
+ return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ }
+ return true;
+
+}
+
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
struct amdgpu_device *adev,
enum amd_ip_block_type type)
}
}
+ adev->cg_flags &= amdgpu_cg_mask;
+ adev->pg_flags &= amdgpu_pg_mask;
+
return 0;
}
adev->ip_block_status[i].valid = false;
}
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (adev->ip_blocks[i].funcs->late_fini)
+ adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ }
+
return 0;
}
return 0;
}
+static bool amdgpu_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_init - initialize the driver
*
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
adev->didt_wreg = &amdgpu_invalid_wreg;
+ adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+ adev->gc_cac_wreg = &amdgpu_invalid_wreg;
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
+
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
spin_lock_init(&adev->pcie_idx_lock);
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
+ spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
/* Read BIOS */
- if (!amdgpu_get_bios(adev))
- return -EINVAL;
+ if (!amdgpu_get_bios(adev)) {
+ r = -EINVAL;
+ goto failed;
+ }
/* Must be an ATOMBIOS */
if (!adev->is_atom_bios) {
dev_err(adev->dev, "Expecting atombios for GPU\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- return r;
+ goto failed;
}
/* See if the asic supports SR-IOV */
adev->virtualization.supports_sr_iov =
amdgpu_atombios_has_gpu_virtualization_table(adev);
+ /* Check if we are executing in a virtualized environment */
+ adev->virtualization.is_virtual = amdgpu_device_is_virtual();
+ adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+
/* Post card if necessary */
if (!amdgpu_card_posted(adev) ||
- adev->virtualization.supports_sr_iov) {
+ (adev->virtualization.is_virtual &&
+ !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
DRM_INFO("GPU not posted. posting now...\n");
amdgpu_atom_asic_init(adev->mode_info.atom_context);
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- return r;
+ goto failed;
}
/* init the mode config */
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_fini(adev);
- return r;
+ goto failed;
}
adev->accel_working = true;
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- return r;
+ goto failed;
}
r = amdgpu_ib_ring_tests(adev);
DRM_ERROR("registering register debugfs failed (%d).\n", r);
}
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r) {
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ return r;
+ }
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- return r;
+ goto failed;
}
return 0;
+
+failed:
+ if (runtime)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ return r;
}
static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
+ drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);
kfree(adev->bios);
adev->bios = NULL;
vga_switcheroo_unregister_client(adev->pdev);
+ if (adev->flags & AMD_IS_PX)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
pci_iounmap(adev->pdev, adev->rio_mem);
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
- unsigned ring_sizes[AMDGPU_MAX_RINGS];
- uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
- bool saved = false;
-
int i, r;
int resched;
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- r = amdgpu_suspend(adev);
-
+ /* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring)
continue;
-
- ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
- if (ring_sizes[i]) {
- saved = true;
- dev_info(adev->dev, "Saved %d dwords of commands "
- "on ring %d.\n", ring_sizes[i], i);
- }
+ kthread_park(ring->sched.thread);
+ amd_sched_hw_job_reset(&ring->sched);
}
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(adev);
+
+ /* save scratch */
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_suspend(adev);
retry:
+ /* Disable fb access */
+ if (adev->mode_info.num_crtc) {
+ struct amdgpu_mode_mc_save save;
+ amdgpu_display_stop_mc_access(adev, &save);
+ amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ }
+
r = amdgpu_asic_reset(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
}
-
+ /* restore scratch */
+ amdgpu_atombios_scratch_regs_restore(adev);
if (!r) {
+ r = amdgpu_ib_ring_tests(adev);
+ if (r) {
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_suspend(adev);
+ goto retry;
+ }
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
-
- amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
- ring_sizes[i] = 0;
- ring_data[i] = NULL;
- }
-
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- if (saved) {
- saved = false;
- r = amdgpu_suspend(adev);
- goto retry;
- }
+ amd_sched_job_recovery(&ring->sched);
+ kthread_unpark(ring->sched.thread);
}
} else {
- amdgpu_fence_driver_force_completion(adev);
+ dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i])
- kfree(ring_data[i]);
+ if (adev->rings[i]) {
+ kthread_unpark(adev->rings[i]->sched.thread);
+ }
}
}
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
+ amdgpu_irq_gpu_reset_resume_helper(adev);
return r;
}
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+ use_bank = 1;
+ *pos &= 0xFFFFFF;
+ } else {
+ use_bank = 0;
+ }
+
+ if (use_bank) {
+ if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+ se_bank >= adev->gfx.config.max_shader_engines)
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
while (size) {
uint32_t value;
if (*pos > adev->rmmio_size)
- return result;
+ goto end;
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto end;
+ }
result += 4;
buf += 4;
size -= 4;
}
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
return result;
}
return result;
}
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 0;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
};
static const char *debugfs_regs_names[] = {
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
+ "amdgpu_gca_config",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto cleanup;
+ goto unreserve;
}
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
DRM_ERROR("failed to get fences for buffer\n");
- goto cleanup;
+ goto unpin;
}
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
+unpin:
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
+unreserve:
amdgpu_bo_unreserve(new_rbo);
cleanup:
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
+int amdgpu_powercontainment = 1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
+unsigned amdgpu_cg_mask = 0xffffffff;
+unsigned amdgpu_pg_mask = 0xffffffff;
+char *amdgpu_disable_cu = NULL;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
+
+MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
+module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
#endif
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
+MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
+MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (amdgpu_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (amdgpu_is_atpx_hybrid() ||
+ !amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
.dev_priv_size = 0,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
- driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
}
}
}
+
+/**
+ * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
+ *
+ * @mask: array in which the per-shader array disable masks will be stored
+ * @max_se: number of SEs
+ * @max_sh: number of SHs
+ *
+ * The bitmask of CUs to be disabled in the shader array determined by se and
+ * sh is stored in mask[se * max_sh + sh].
+ */
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+{
+ unsigned se, sh, cu;
+ const char *p;
+
+ memset(mask, 0, sizeof(*mask) * max_se * max_sh);
+
+ if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
+ return;
+
+ p = amdgpu_disable_cu;
+ for (;;) {
+ char *next;
+ int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+ if (ret < 3) {
+ DRM_ERROR("amdgpu: could not parse disable_cu\n");
+ return;
+ }
+
+ if (se < max_se && sh < max_sh && cu < 16) {
+ DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
+ mask[se * max_sh + sh] |= 1u << cu;
+ } else {
+ DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
+ se, sh, cu);
+ }
+
+ next = strchr(p, ',');
+ if (!next)
+ break;
+ p = next + 1;
+ }
+}
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+
#endif
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
- r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
+ r = amdgpu_vm_flush(ring, job);
if (r) {
amdgpu_ring_undo(ring);
return r;
}
/* wrap the last IB with fence */
- if (job && job->uf_bo) {
- uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
-
- addr += job->uf_offset;
- amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
return r;
}
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+{
+ int i, j;
+ for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
+ struct amdgpu_irq_src *src = adev->irq.sources[i];
+ if (!src)
+ continue;
+ for (j = 0; j < src->num_types; j++)
+ amdgpu_irq_update(adev, src, j);
+ }
+}
+
/**
* amdgpu_irq_get - enable interrupt
*
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_free_handler(struct work_struct *ws)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
- amd_sched_job_put(&job->base);
-}
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
-void amdgpu_job_timeout_func(struct work_struct *work)
-{
- struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
-
- amd_sched_job_put(&job->base);
+ job->base.sched->name,
+ atomic_read(&job->ring->fence_drv.last_seq),
+ job->ring->fence_drv.sync_seq);
+ amdgpu_gpu_reset(job->adev);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
- INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
return r;
}
-void amdgpu_job_free(struct amdgpu_job *job)
+void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- unsigned i;
struct fence *f;
+ unsigned i;
+
/* use sched fence if available */
- f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+ f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
- fence_put(job->fence);
+ amdgpu_ib_free(job->adev, &job->ibs[i], f);
+}
- amdgpu_bo_unref(&job->uf_bo);
- amdgpu_sync_free(&job->sync);
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+{
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- if (!job->base.use_sched)
- kfree(job);
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ kfree(job);
}
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free(struct amdgpu_job *job)
{
- struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+ amdgpu_job_free_resources(job);
+
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
kfree(job);
}
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
- struct fence *fence;
int r;
job->ring = ring;
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func, owner, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
job->owner = owner;
job->ctx = entity->fence_context;
- *f = fence_get(fence);
+ *f = fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->base,
- &job->vm_id, &job->vm_pd_addr);
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
}
job = to_amdgpu_job(sched_job);
- r = amdgpu_sync_wait(&job->sync);
- if (r) {
- DRM_ERROR("failed to sync wait (%d)\n", r);
- return NULL;
- }
+ BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
}
err:
+ /* if gpu reset, hw fence will be replaced here */
+ fence_put(job->fence);
job->fence = fence;
- amdgpu_job_free(job);
return fence;
}
const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
- .begin_job = amd_sched_job_begin,
- .finish_job = amd_sched_job_finish,
+ .timedout_job = amdgpu_job_timedout,
+ .free_job = amdgpu_job_free_cb
};
if (adev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (amdgpu_device_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
amdgpu_amdkfd_device_fini(adev);
}
out:
- if (r)
+ if (r) {
+ /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
+ if (adev->rmmio && amdgpu_device_is_px(dev))
+ pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
-
+ }
return r;
}
+static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+ struct drm_amdgpu_query_fw *query_fw,
+ struct amdgpu_device *adev)
+{
+ switch (query_fw->fw_type) {
+ case AMDGPU_INFO_FW_VCE:
+ fw_info->ver = adev->vce.fw_version;
+ fw_info->feature = adev->vce.fb_version;
+ break;
+ case AMDGPU_INFO_FW_UVD:
+ fw_info->ver = adev->uvd.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GMC:
+ fw_info->ver = adev->mc.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GFX_ME:
+ fw_info->ver = adev->gfx.me_fw_version;
+ fw_info->feature = adev->gfx.me_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_PFP:
+ fw_info->ver = adev->gfx.pfp_fw_version;
+ fw_info->feature = adev->gfx.pfp_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_CE:
+ fw_info->ver = adev->gfx.ce_fw_version;
+ fw_info->feature = adev->gfx.ce_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC:
+ fw_info->ver = adev->gfx.rlc_fw_version;
+ fw_info->feature = adev->gfx.rlc_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_MEC:
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->gfx.mec_fw_version;
+ fw_info->feature = adev->gfx.mec_feature_version;
+ } else if (query_fw->index == 1) {
+ fw_info->ver = adev->gfx.mec2_fw_version;
+ fw_info->feature = adev->gfx.mec2_feature_version;
+ } else
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_FW_SMC:
+ fw_info->ver = adev->pm.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_SDMA:
+ if (query_fw->index >= adev->sdma.num_instances)
+ return -EINVAL;
+ fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
+ fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* Userspace get information ioctl
*/
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_TIMESTAMP:
- ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+ ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_FW_VERSION: {
struct drm_amdgpu_info_firmware fw_info;
+ int ret;
/* We only support one instance of each IP block right now. */
if (info->query_fw.ip_instance != 0)
return -EINVAL;
- switch (info->query_fw.fw_type) {
- case AMDGPU_INFO_FW_VCE:
- fw_info.ver = adev->vce.fw_version;
- fw_info.feature = adev->vce.fb_version;
- break;
- case AMDGPU_INFO_FW_UVD:
- fw_info.ver = adev->uvd.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GMC:
- fw_info.ver = adev->mc.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GFX_ME:
- fw_info.ver = adev->gfx.me_fw_version;
- fw_info.feature = adev->gfx.me_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_PFP:
- fw_info.ver = adev->gfx.pfp_fw_version;
- fw_info.feature = adev->gfx.pfp_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_CE:
- fw_info.ver = adev->gfx.ce_fw_version;
- fw_info.feature = adev->gfx.ce_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_RLC:
- fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = adev->gfx.rlc_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0) {
- fw_info.ver = adev->gfx.mec_fw_version;
- fw_info.feature = adev->gfx.mec_feature_version;
- } else if (info->query_fw.index == 1) {
- fw_info.ver = adev->gfx.mec2_fw_version;
- fw_info.feature = adev->gfx.mec2_feature_version;
- } else
- return -EINVAL;
- break;
- case AMDGPU_INFO_FW_SMC:
- fw_info.ver = adev->pm.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_SDMA:
- if (info->query_fw.index >= adev->sdma.num_instances)
- return -EINVAL;
- fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
- fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
- break;
- default:
- return -EINVAL;
- }
+ ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
+ if (ret)
+ return ret;
+
return copy_to_user(out, &fw_info,
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
}
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
}
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
- dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
+ dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines;
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
dev_info._pad = 0;
dev_info.ids_flags = 0;
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_amdgpu_info_firmware fw_info;
+ struct drm_amdgpu_query_fw query_fw;
+ int ret, i;
+
+ /* VCE */
+ query_fw.fw_type = AMDGPU_INFO_FW_VCE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* UVD */
+ query_fw.fw_type = AMDGPU_INFO_FW_UVD;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* GMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* ME */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* PFP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* CE */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
+ query_fw.index = 0;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC2 */
+ if (adev->asic_type == CHIP_KAVERI ||
+ (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+ query_fw.index = 1;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+ }
+
+ /* SMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_SMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* SDMA */
+ query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, fw_info.feature, fw_info.ver);
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_firmware_info_list[] = {
+ {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
+ ARRAY_SIZE(amdgpu_firmware_info_list));
+#else
+ return 0;
+#endif
+}
struct ttm_mem_reg *new_mem)
{
struct amdgpu_bo *rbo;
+ struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
/* move_notify is called before move happens */
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+
+ trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state = 0;
- long idx;
+ unsigned long idx;
int ret;
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
- else {
- ret = kstrtol(buf, 0, &idx);
+ else if (adev->pp_enabled) {
+ struct pp_states_info data;
- if (ret) {
+ ret = kstrtoul(buf, 0, &idx);
+ if (ret || idx >= ARRAY_SIZE(data.states)) {
count = -EINVAL;
goto fail;
}
- if (adev->pp_enabled) {
- struct pp_states_info data;
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
- adev->pp_force_state_enabled = true;
- }
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+ state = data.states[idx];
+ /* only set user selected power states */
+ if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+ state != POWER_STATE_TYPE_DEFAULT) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ adev->pp_force_state_enabled = true;
}
}
fail:
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
return size;
}
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
return size;
}
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
return size;
}
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_sclk_od(adev);
+ else if (adev->pm.funcs->get_sclk_od)
+ value = adev->pm.funcs->get_sclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_sclk_od) {
+ adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_mclk_od(adev);
+ else if (adev->pm.funcs->get_mclk_od)
+ value = adev->pm.funcs->get_mclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_mclk_od) {
+ adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
fail:
return count;
}
static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
amdgpu_get_pp_dpm_pcie,
amdgpu_set_pp_dpm_pcie);
+static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_sclk_od,
+ amdgpu_set_pp_sclk_od);
+static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_mclk_od,
+ amdgpu_set_pp_mclk_od);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
DRM_ERROR("failed to create device file pp_table\n");
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
}
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_sclk_od\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_mclk_od\n");
+ return ret;
+ }
+
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
}
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
+ device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
+ pp_init->powercontainment_enabled = amdgpu_powercontainment;
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
if (ret)
return ret;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_fini(adev->powerplay.pp_handle);
- }
-#endif
-
return ret;
}
return ret;
}
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pp_enabled) {
+ amdgpu_pm_sysfs_fini(adev);
+ amd_powerplay_fini(adev->powerplay.pp_handle);
+ }
+
+ if (adev->powerplay.ip_funcs->late_fini)
+ adev->powerplay.ip_funcs->late_fini(
+ adev->powerplay.pp_handle);
+#endif
+}
+
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
+ .late_fini = amdgpu_pp_late_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
*/
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
ring->wptr = ring->wptr_old;
}
-/**
- * amdgpu_ring_backup - Back up the content of a ring
- *
- * @ring: the ring we want to back up
- *
- * Saves all unprocessed commits from a ring, returns the number of dwords saved.
- */
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data)
-{
- unsigned size, ptr, i;
-
- *data = NULL;
-
- if (ring->ring_obj == NULL)
- return 0;
-
- /* it doesn't make sense to save anything if all fences are signaled */
- if (!amdgpu_fence_count_emitted(ring))
- return 0;
-
- ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- size = ring->wptr + (ring->ring_size / 4);
- size -= ptr;
- size &= ring->ptr_mask;
- if (size == 0)
- return 0;
-
- /* and then save the content of the ring */
- *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
- if (!*data)
- return 0;
- for (i = 0; i < size; ++i) {
- (*data)[i] = ring->ring[ptr++];
- ptr &= ring->ptr_mask;
- }
-
- return size;
-}
-
-/**
- * amdgpu_ring_restore - append saved commands to the ring again
- *
- * @ring: ring to append commands to
- * @size: number of dwords we want to write
- * @data: saved commands
- *
- * Allocates space on the ring and restore the previously saved commands.
- */
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data)
-{
- int i, r;
-
- if (!size || !data)
- return 0;
-
- /* restore the saved ring content */
- r = amdgpu_ring_alloc(ring, size);
- if (r)
- return r;
-
- for (i = 0; i < size; ++i) {
- amdgpu_ring_write(ring, data[i]);
- }
-
- amdgpu_ring_commit(ring);
- kfree(data);
- return 0;
-}
-
/**
* amdgpu_ring_init - init driver ring struct.
*
return r;
}
- r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
- return r;
- }
- ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
- ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
}
r = amdgpu_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
+
+ memset((void *)ring->ring, 0, ring->ring_size);
+
amdgpu_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(adev->dev, "(%d) ring map failed\n", r);
ring->ring = NULL;
ring->ring_obj = NULL;
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
if (ring_obj) {
r = amdgpu_bo_reserve(ring_obj, false);
}
amdgpu_bo_unref(&ring_obj);
}
+ amdgpu_debugfs_ring_fini(ring);
}
/*
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+/* Layout of file is 12 bytes consisting of
+ * - rptr
+ * - wptr
+ * - driver's copy of wptr
+ *
+ * followed by n-words of ring data
+ */
+static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int roffset = (unsigned long)node->info_ent->data;
- struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
- uint32_t rptr, wptr, rptr_next;
- unsigned i;
-
- wptr = amdgpu_ring_get_wptr(ring);
- seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
-
- rptr = amdgpu_ring_get_rptr(ring);
- rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
-
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
- ring->wptr, ring->wptr);
-
- if (!ring->ready)
- return 0;
-
- /* print 8 dw before current rptr as often it's the last executed
- * packet that is the root issue
- */
- i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- while (i != rptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+ struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+ int r, i;
+ uint32_t value, result, early[3];
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ result = 0;
+
+ if (*pos < 12) {
+ early[0] = amdgpu_ring_get_rptr(ring);
+ early[1] = amdgpu_ring_get_wptr(ring);
+ early[2] = ring->wptr;
+ for (i = *pos / 4; i < 3 && size; i++) {
+ r = put_user(early[i], (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
}
- while (i != wptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
+
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t*)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
}
- return 0;
+
+ return result;
}
-static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
-static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_ring_read,
+ .llseek = default_llseek
+};
#endif
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
- unsigned i;
- struct drm_info_list *info;
- char *name;
-
- for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
- info = &amdgpu_debugfs_ring_info_list[i];
- if (!info->data)
- break;
- }
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ char name[32];
- if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
- return -ENOSPC;
-
- name = &amdgpu_debugfs_ring_names[i][0];
sprintf(name, "amdgpu_ring_%s", ring->name);
- info->name = name;
- info->show = amdgpu_debugfs_ring_info;
- info->driver_features = 0;
- info->data = (void*)(uintptr_t)offset;
- return amdgpu_debugfs_add_files(adev, info, 1);
+ ent = debugfs_create_file(name,
+ S_IFREG | S_IRUGO, root,
+ ring, &amdgpu_debugfs_ring_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ i_size_write(ent->d_inode, ring->ring_size + 12);
+ ring->ent = ent;
#endif
return 0;
}
+
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(ring->ent);
+#endif
+}
return r;
}
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ memset(sa_manager->cpu_ptr, 0, sa_manager->size);
amdgpu_bo_unreserve(sa_manager->bo);
return r;
}
}
/**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
*
* @sync: the sync object
+ * @ring: optional ring to use for test
*
- * Returns true if all fences in the sync object are signaled.
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
*/
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (ring && s_fence) {
+ /* For fences from the same ring it is sufficient
+ * when they are scheduled.
+ */
+ if (s_fence->sched == &ring->sched) {
+ if (fence_is_signaled(&s_fence->scheduled))
+ continue;
+
+ return &s_fence->scheduled;
+ }
+ }
if (fence_is_signaled(f)) {
hash_del(&e->node);
continue;
}
- return false;
+ return f;
}
- return true;
+ return NULL;
}
/**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ * amdgpu_sync_get_fence - get the next fence from the sync object
*
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
+ * @sync: sync object to use
*
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
+ * Get and removes the next fence from the sync object not signaled yet.
*/
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence)
-{
- struct amdgpu_sync_entry *e, *newone;
- struct hlist_node *tmp;
- int i;
-
- /* Allocate the new entry before moving the old ones */
- newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
- if (!newone)
- return -ENOMEM;
-
- hash_for_each_safe(src->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
-
- hash_del(&e->node);
- if (fence_is_signaled(f)) {
- fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- if (amdgpu_sync_add_later(dst, f)) {
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- hash_add(dst->fences, &e->node, f->context);
- }
-
- hash_add(src->fences, &newone->node, fence->context);
- newone->fence = fence_get(fence);
-
- return 0;
-}
-
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
return NULL;
}
-int amdgpu_sync_wait(struct amdgpu_sync *sync)
-{
- struct amdgpu_sync_entry *e;
- struct hlist_node *tmp;
- int i, r;
-
- hash_for_each_safe(sync->fences, i, tmp, e, node) {
- r = fence_wait(e->fence, false);
- if (r)
- return r;
-
- hash_del(&e->node);
- fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
- }
-
- return 0;
-}
-
/**
* amdgpu_sync_free - free the sync object
*
#define TRACE_SYSTEM amdgpu
#define TRACE_INCLUDE_FILE amdgpu_trace
+TRACE_EVENT(amdgpu_mm_rreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_mm_wreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
TRACE_EVENT(amdgpu_bo_create,
TP_PROTO(struct amdgpu_bo *bo),
TP_ARGS(bo),
TP_STRUCT__entry(
__field(struct amdgpu_bo *, bo)
__field(u32, pages)
+ __field(u32, type)
+ __field(u32, prefer)
+ __field(u32, allow)
+ __field(u32, visible)
),
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
+ __entry->type = bo->tbo.mem.mem_type;
+ __entry->prefer = bo->prefered_domains;
+ __entry->allow = bo->allowed_domains;
+ __entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+
+ TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
+ __entry->bo, __entry->pages, __entry->type,
+ __entry->prefer, __entry->allow, __entry->visible)
);
TRACE_EVENT(amdgpu_cs,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, list)
__field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
),
TP_fast_assign(
__entry->list = list;
__entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
),
- TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+ TP_printk("list=%p, bo=%p, bo_size = %Ld",
+ __entry->list,
+ __entry->bo,
+ __entry->bo_size)
+);
+
+TRACE_EVENT(amdgpu_cs_bo_status,
+ TP_PROTO(uint64_t total_bo, uint64_t total_size),
+ TP_ARGS(total_bo, total_size),
+ TP_STRUCT__entry(
+ __field(u64, total_bo)
+ __field(u64, total_size)
+ ),
+
+ TP_fast_assign(
+ __entry->total_bo = total_bo;
+ __entry->total_size = total_size;
+ ),
+ TP_printk("total bo size = %Ld, total bo count = %Ld",
+ __entry->total_bo, __entry->total_size)
+);
+
+TRACE_EVENT(amdgpu_ttm_bo_move,
+ TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+ TP_ARGS(bo, new_placement, old_placement),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ ),
+ TP_printk("bo=%p from:%d to %d with size = %Ld",
+ __entry->bo, __entry->old_placement,
+ __entry->new_placement, __entry->bo_size)
);
#endif
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
bo->resv, &fence);
- /* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, fence,
- evict, no_wait_gpu, new_mem);
+ if (r)
+ return r;
+
+ r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
fence_put(fence);
return r;
}
return -EINVAL;
adev = amdgpu_get_adev(bo->bdev);
+
+ /* remember the eviction */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
return 0;
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
if (r) {
return r;
}
{
int r;
- if (adev->uvd.vcpu_bo == NULL)
- return 0;
+ kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
+ if (adev->uvd.vcpu_bo) {
+ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+ if (!r) {
+ amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+ amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+ }
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ }
amdgpu_ring_fini(&adev->uvd.ring);
if (fences == 0 && handles == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
+ /* just work around for uvd clock remain high even
+ * when uvd dpm disabled on Polaris10 */
+ if (adev->asic_type == CHIP_POLARIS10)
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
/**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
*
+ * @adev: amdgpu device pointer
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
* Add the page directory to the BO duplicates list
* for command submission.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates)
{
+ uint64_t num_evictions;
unsigned i;
+ /* We only need to validate the page tables
+ * if they aren't already valid.
+ */
+ num_evictions = atomic64_read(&adev->num_evictions);
+ if (num_evictions == vm->last_eviction_counter)
+ return;
+
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
spin_unlock(&glob->lru_lock);
}
+static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter) ? true : false;
+}
+
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr)
+ struct amdgpu_job *job)
{
- uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
struct fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id;
- unsigned i = ring->idx;
- int r;
+ struct amdgpu_vm_id *id, *idle;
+ struct fence **fences;
+ unsigned i;
+ int r = 0;
+
+ fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
+ GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
mutex_lock(&adev->vm_manager.lock);
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&idle->list == &adev->vm_manager.ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct fence_array *array;
+ unsigned j;
+
+ for (j = 0; j < i; ++j)
+ fence_get(fences[j]);
+
+ array = fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ fence_put(fences[j]);
+ kfree(fences);
+ r = -ENOMEM;
+ goto error;
+ }
+
+
+ r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+ fence_put(&array->base);
+ if (r)
+ goto error;
+
+ mutex_unlock(&adev->vm_manager.lock);
+ return 0;
+
+ }
+ kfree(fences);
+
+ job->vm_needs_flush = true;
/* Check if we can use a VMID already assigned to this VM */
+ i = ring->idx;
do {
struct fence *flushed;
+ bool same_ring = ring->idx == i;
id = vm->ids[i++];
if (i == AMDGPU_MAX_RINGS)
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
+ if (amdgpu_vm_is_gpu_reset(adev, id))
+ continue;
if (atomic64_read(&id->owner) != vm->client_id)
continue;
- if (pd_addr != id->pd_gpu_addr)
+ if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
- if (id->last_user != ring &&
+ if (!same_ring &&
(!id->last_flush || !fence_is_signaled(id->last_flush)))
continue;
flushed = id->flushed_updates;
- if (updates && (!flushed || fence_is_later(updates, flushed)))
+ if (updates &&
+ (!flushed || fence_is_later(updates, flushed)))
continue;
- /* Good we can use this VMID */
- if (id->last_user == ring) {
- r = amdgpu_sync_fence(ring->adev, sync,
- id->first);
- if (r)
- goto error;
- }
-
- /* And remember this submission as user of the VMID */
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ job->vm_needs_flush = false;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
mutex_unlock(&adev->vm_manager.lock);
return 0;
} while (i != ring->idx);
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
- if (!amdgpu_sync_is_idle(&id->active)) {
- struct list_head *head = &adev->vm_manager.ids_lru;
- struct amdgpu_vm_id *tmp;
-
- list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
- list) {
- if (amdgpu_sync_is_idle(&id->active)) {
- list_move(&id->list, head);
- head = &id->list;
- }
- }
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
- }
-
- r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
- id->pd_gpu_addr = pd_addr;
-
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
- id->last_user = ring;
atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = pd_addr;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
+static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ const struct amdgpu_ip_block_version *ip_block;
+
+ if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ /* only compute rings */
+ return false;
+
+ ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (!ip_block)
+ return false;
+
+ if (ip_block->major <= 7) {
+ /* gfx7 has no workaround */
+ return true;
+ } else if (ip_block->major == 8) {
+ if (adev->gfx.mec_fw_version >= 673)
+ /* gfx8 is fixed in MEC firmware 673 */
+ return false;
+ else
+ return true;
+ }
+ return false;
+}
+
/**
* amdgpu_vm_flush - hardware flush the vm
*
*
* Emit a VM flush when it is necessary.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != gds_base ||
- id->gds_size != gds_size ||
- id->gws_base != gws_base ||
- id->gws_size != gws_size ||
- id->oa_base != oa_base ||
- id->oa_size != oa_size);
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+ id->gws_base != job->gws_base ||
+ id->gws_size != job->gws_size ||
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size);
int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
- ring->type == AMDGPU_RING_TYPE_COMPUTE))
+ job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush &&
- pd_addr != AMDGPU_VM_NO_FLUSH) {
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
struct fence *fence;
- trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
- amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+ trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
mutex_lock(&adev->vm_manager.lock);
- if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
- r = amdgpu_fence_emit(ring, &fence);
- if (r) {
- mutex_unlock(&adev->vm_manager.lock);
- return r;
- }
- fence_put(id->last_flush);
- id->last_flush = fence;
- }
+ fence_put(id->last_flush);
+ id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
- id->gds_base = gds_base;
- id->gds_size = gds_size;
- id->gws_base = gws_base;
- id->gws_size = gws_size;
- id->oa_base = oa_base;
- id->oa_size = oa_size;
- amdgpu_ring_emit_gds_switch(ring, vm_id,
- gds_base, gds_size,
- gws_base, gws_size,
- oa_base, oa_size);
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
}
return 0;
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
- * @dst: destination address to map to
+ * @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
- uint64_t addr;
+ uint64_t cur_pe_start, cur_pe_end, cur_dst;
+ uint64_t addr; /* next GPU address to be updated */
+ uint64_t pt_idx;
+ struct amdgpu_bo *pt;
+ unsigned nptes; /* next number of ptes to be updated */
+ uint64_t next_pe_start;
+
+ /* initialize the variables */
+ addr = start;
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+ cur_pe_start = amdgpu_bo_gpu_offset(pt);
+ cur_pe_start += (addr & mask) * 8;
+ cur_pe_end = cur_pe_start + 8 * nptes;
+ cur_dst = dst;
+
+ /* for next ptb*/
+ addr += nptes;
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
/* walk over the address space and update the page tables */
- for (addr = start; addr < end; ) {
- uint64_t pt_idx = addr >> amdgpu_vm_block_size;
- struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
- unsigned nptes;
- uint64_t pe_start;
+ while (addr < end) {
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
- pe_start = amdgpu_bo_gpu_offset(pt);
- pe_start += (addr & mask) * 8;
-
- if (last_pe_end != pe_start) {
+ next_pe_start = amdgpu_bo_gpu_offset(pt);
+ next_pe_start += (addr & mask) * 8;
+ if (cur_pe_end == next_pe_start) {
+ /* The next ptb is consecutive to current ptb.
+ * Don't call amdgpu_vm_frag_ptes now.
+ * Will update two ptbs together in future.
+ */
+ cur_pe_end += 8 * nptes;
+ } else {
amdgpu_vm_frag_ptes(adev, vm_update_params,
- last_pe_start, last_pe_end,
- last_dst, flags);
+ cur_pe_start, cur_pe_end,
+ cur_dst, flags);
- last_pe_start = pe_start;
- last_pe_end = pe_start + 8 * nptes;
- last_dst = dst;
- } else {
- last_pe_end += 8 * nptes;
+ cur_pe_start = next_pe_start;
+ cur_pe_end = next_pe_start + 8 * nptes;
+ cur_dst = dst;
}
+ /* for next ptb*/
addr += nptes;
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
- last_pe_end, last_dst, flags);
+ amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
+ cur_pe_end, cur_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
vm_update_params.ib = &job->ibs[0];
+ r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+ if (r)
+ goto error_free;
+
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
if (r)
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
addr += mapping->offset;
if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ return amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
- r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct fence *exclusive;
uint64_t addr;
int r;
default:
break;
}
+
+ exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
} else {
addr = 0;
+ exclusive = NULL;
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, exclusive,
+ gtt_flags, pages_addr, vm,
mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+ r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
0, 0, NULL);
kfree(mapping);
if (r)
amdgpu_bo_unreserve(vm->page_directory);
if (r)
goto error_free_page_directory;
+ vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
return 0;
&adev->vm_manager.ids_lru);
}
+ adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
}
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+ args.ucRegIndex = offset;
+ args.lpI2CDataOut = data;
+ args.ucFlag = 1;
+ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucTransBytes = 1;
+ args.ucSlaveAddr = slave_addr;
+ args.ucLineNumber = line_number;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num);
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+ u8 slave_addr, u8 line_number, u8 offset, u8 data);
#endif
#include "gmc/gmc_7_1_sh_mask.h"
MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
ci_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
ci_setup_default_pcie_tables(adev);
+ /* save a copy of the default DPM table */
+ memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
+ sizeof(struct ci_dpm_table));
+
return 0;
}
switch (adev->asic_type) {
case CHIP_BONAIRE:
- chip_name = "bonaire";
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f))
+ chip_name = "bonaire_k";
+ else
+ chip_name = "bonaire";
break;
case CHIP_HAWAII:
- chip_name = "hawaii";
+ if (adev->pdev->revision == 0x80)
+ chip_name = "hawaii_k";
+ else
+ chip_name = "hawaii";
break;
case CHIP_KAVERI:
case CHIP_KABINI:
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
return 0;
}
+static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type, char *buf)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = ci_get_current_pcie_speed(adev);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (adev->pm.dpm.forced_level
+ != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!pi->sclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_MCLK:
+ if (!pi->mclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!pi->pcie_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].sclk =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].mclk =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+ .print_clock_levels = ci_dpm_print_clock_levels,
+ .force_clock_level = ci_dpm_force_clock_level,
+ .get_sclk_od = ci_dpm_get_sclk_od,
+ .set_sclk_od = ci_dpm_set_sclk_od,
+ .get_mclk_od = ci_dpm_get_mclk_od,
+ .set_mclk_od = ci_dpm_set_mclk_od,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
struct ci_power_info {
struct ci_dpm_table dpm_table;
+ struct ci_dpm_table golden_dpm_table;
u32 voltage_control;
u32 mvdd_control;
u32 vddci_control;
return true;
}
+static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
+{
+ /* CIK does not support SR-IOV */
+ return 0;
+}
+
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
+ int r = -EINVAL;
dev_info(adev->dev, "GPU pci config reset\n");
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ r = 0;
break;
+ }
udelay(1);
}
/* does asic init need to be run first??? */
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
+
+ return r;
}
static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
*/
static int cik_asic_reset(struct amdgpu_device *adev)
{
+ int r;
cik_set_bios_scratch_engine_hung(adev, true);
- cik_gpu_pci_config_reset(adev);
+ r = cik_gpu_pci_config_reset(adev);
cik_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+ .get_virtual_caps = &cik_get_virtual_caps,
};
static int cik_common_early_init(void *handle)
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
unsigned vm_id, bool ctx_switch)
{
u32 extra_bits = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 4)
- next_rptr++;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1); /* number of DWs to follow */
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ cik_sdma_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
if (r)
return r;
- /* unhalt the MEs */
- cik_sdma_enable(adev, true);
+ /* halt the engine before programing */
+ cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ cik_sdma_free_microcode(adev);
return 0;
}
}
}
} else { /*pi->caps_vce_pg*/
+ pi->vce_power_gated = gate;
cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, !gate);
}
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
- /* flip at hsync for async, default is vsync */
- /* use UPDATE_IMMEDIATE_EN instead for async? */
+ /* flip immediate for async, default is vsync */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+ GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
+#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ /*it is correct only for RGB ; black is 0*/
+ WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
+ mdelay(20);
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 tmp, frame_count;
- int i, j;
+ u32 tmp;
+ int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
}
+ mdelay(20);
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
static int fiji_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
return err;
}
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+}
+
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
* registers are instanced per SE or SH. 0xffffffff means
* broadcast to all SEs or SHs (CIK).
*/
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v7_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
-
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
- control |= ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
}
}
adev->gfx.rlc.cs_data = ci_cs_data;
- adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
src_ptr = adev->gfx.rlc.reg_list;
dws = adev->gfx.rlc.reg_list_size;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
}
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
return orig;
}
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
}
}
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
*
* Halt the RLC ME (MicroEngine) (CIK).
*/
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32(mmRLC_CNTL, 0);
WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(mmRLC_LB_PARAMS, 0x00600408);
WREG32(mmRLC_LB_CNTL, 0x80000004);
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
}
}
+static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v7_0_select_se_sh,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+ .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+ .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+};
+
static int gfx_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
gfx_v7_0_set_ring_funcs(adev);
gfx_v7_0_set_irq_funcs(adev);
gfx_v7_0_set_gds_init(adev);
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ gfx_v7_0_free_microcode(adev);
return 0;
}
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- if ((ring->me == me_id) & (ring->pipe == pipe_id))
+ if ((ring->me == me_id) && (ring->pipe == pipe_id))
amdgpu_fence_process(ring);
}
break;
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v7_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
#include "vid.h"
#include "amdgpu_ucode.h"
#include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
#include "clearstate_vi.h"
#include "gmc/gmc_8_2_d.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_NUM_COMPUTE_RINGS 8
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris10_golden_common_all[] =
amdgpu_program_register_sequence(adev,
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
+ WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+ if (adev->pdev->revision == 0xc7) {
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+ }
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
return r;
}
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ))
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
buffer[count++] = cpu_to_le32(0);
}
+static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_CARRIZO)
+ max_me = 5;
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
int r;
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
adev->gfx.rlc.clear_state_obj = NULL;
}
+
+ /* jump table block */
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+ if (adev->gfx.rlc.cp_table_obj == NULL) {
+ r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.cp_table_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ return r;
+ }
+
+ cz_init_cp_jump_table(adev);
+
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ }
+
return 0;
}
gfx_v8_0_rlc_fini(adev);
- kfree(adev->gfx.rlc.register_list_format);
+ gfx_v8_0_free_microcode(adev);
return 0;
}
}
}
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
}
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
WREG32(mmRLC_SRM_CNTL, data);
}
-static void polaris11_init_power_gating(struct amdgpu_device *adev)
+static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG)) {
data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
}
}
+static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+ else
+ data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
- if (adev->asic_type == CHIP_POLARIS11)
- polaris11_init_power_gating(adev);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
+ }
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if (adev->asic_type == CHIP_POLARIS11) {
+ gfx_v8_0_init_power_gating(adev);
+ }
}
}
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
- case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
+ case CHIP_TOPAZ:
+ amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+ 0x00000000 : 0x00000002);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);
* Fetches a GPU clock counter snapshot.
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v8_0_select_se_sh,
+};
+
static int gfx_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
gfx_v8_0_set_gds_init(adev);
return 0;
}
-static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, temp;
- /* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ if (adev->asic_type == CHIP_POLARIS11)
+ /* Send msg to SMU via Powerplay */
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
- if (enable) {
- /* Enable static MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable static MGPG */
+ if (enable)
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
+ else
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
}
-static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, temp;
- if (enable) {
- /* Enable dynamic MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable dynamic MGPG */
+ if (enable)
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
+ else
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
}
static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
{
uint32_t data, temp;
- if (enable) {
- /* Enable quick PG */
- temp = data = RREG32(mmRLC_PG_CNTL);
- data |= 0x100000;
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable quick PG */
+ if (enable)
+ data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
- data &= ~0x100000;
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+
+ /* Read any GFX register to wake up GFX. */
+ if (!enable)
+ data = RREG32(mmDB_RENDER_CONTROL);
+}
+
+static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+ cz_enable_gfx_cg_power_gating(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ cz_enable_gfx_pipeline_power_gating(adev, true);
+ } else {
+ cz_enable_gfx_cg_power_gating(adev, false);
+ cz_enable_gfx_pipeline_power_gating(adev, false);
}
}
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+ cz_update_gfx_cg_power_gating(adev, enable);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+ break;
case CHIP_POLARIS11:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
- polaris11_enable_gfx_static_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
- else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
- polaris11_enable_gfx_dynamic_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
+ polaris11_enable_gfx_quick_mg_power_gating(adev, true);
else
- polaris11_enable_gfx_quick_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ polaris11_enable_gfx_quick_mg_power_gating(adev, false);
break;
default:
break;
{
uint32_t data;
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+
adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
- control |= ib->length_dw | (vm_id << 24);
-
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- case CHIP_STONEY:
adev->gfx.rlc.funcs = &iceland_rlc_funcs;
break;
+ case CHIP_STONEY:
case CHIP_CARRIZO:
adev->gfx.rlc.funcs = &cz_rlc_funcs;
break;
}
}
+static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v8_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
#endif
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v7_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
}
}
-/**
- * gmc7_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
udelay(100);
}
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
amdgpu_display_set_vga_render_state(adev, false);
gmc_v7_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v7_0_mc_resume(adev, &save);
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev, &save);
- if (gmc_v7_0_wait_for_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v8_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
}
}
-/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
- SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC1_BUSY_MASK);
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
udelay(100);
}
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
amdgpu_display_set_vga_render_state(adev, false);
gmc_v8_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v8_0_mc_resume(adev, &save);
if (srbm_soft_reset) {
gmc_v8_0_mc_stop(adev, &save);
- if (gmc_v8_0_wait_for_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
static int iceland_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
}
}
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
-
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ sdma_v2_4_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
return -EINVAL;
}
- /* unhalt the MEs */
- sdma_v2_4_enable(adev, true);
+ /* halt the engine before programing */
+ sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v2_4_free_microcode(adev);
return 0;
}
}
}
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ /* unhalt the MEs */
+ sdma_v3_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v3_0_ctx_switch_enable(adev, true);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
}
}
- /* unhalt the MEs */
- sdma_v3_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v3_0_ctx_switch_enable(adev, true);
+ /* disble sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v3_0_free_microcode(adev);
return 0;
}
static int tonga_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
amdgpu_ring_write(ring, 2);
}
+/**
+ * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* uvd_v4_2_ring_test_ring - register write test
*
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
+ .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
.test_ring = uvd_v4_2_ring_test_ring,
.test_ib = uvd_v4_2_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
#include "vi.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
amdgpu_ring_write(ring, 2);
}
+/**
+ * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* uvd_v5_0_ring_test_ring - register write test
*
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v5_0_ring_test_ring,
.test_ib = uvd_v5_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
#include "oss/oss_2_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
+#include "bif/bif_5_1_d.h"
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
uint32_t mp_swap_cntl;
int i, j, r;
- /*disable DPG */
- WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+ /* disable DPG */
+ WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* disable byte swapping */
lmi_swap_cntl = 0;
}
/* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+ WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
/* stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ WREG32(mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
mdelay(5);
/* initialize UVD memory controller */
- WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
+ WREG32(mmUVD_LMI_CTRL,
+ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
mdelay(5);
/* enable VCPU clock */
- WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+ WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
return r;
}
/* enable master interrupt */
- WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+ WREG32_P(mmUVD_MASTINT_EN,
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
- WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
rb_bufsz = order_base_2(ring->ring_size);
tmp = 0;
amdgpu_ring_write(ring, 2);
}
+/**
+ * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* uvd_v6_0_ring_test_ring - register write test
*
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
- if (adev->asic_type == CHIP_FIJI)
+ if (adev->asic_type == CHIP_FIJI ||
+ adev->asic_type == CHIP_POLARIS10)
uvd_v6_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = uvd_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
+static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ r = RREG32(mmGC_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+ return r;
+}
+
+static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ WREG32(mmGC_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+
static const u32 tonga_mgcg_cgcg_init[] =
{
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
return true;
}
+static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+{
+ u32 caps = 0;
+ u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+ caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+ caps |= AMDGPU_VIRT_CAPS_IS_VF;
+
+ return caps;
+}
+
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true},
};
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
return -EINVAL;
}
-static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
{
u32 i;
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
- break;
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ return 0;
+ }
udelay(1);
}
-
+ return -EINVAL;
}
static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
*/
static int vi_asic_reset(struct amdgpu_device *adev)
{
+ int r;
+
vi_set_bios_scratch_engine_hung(adev, true);
- vi_gpu_pci_config_reset(adev);
+ r = vi_gpu_pci_config_reset(adev);
vi_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
+ .get_virtual_caps = &vi_get_virtual_caps,
};
static int vi_common_early_init(void *handle)
adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
adev->didt_rreg = &vi_didt_rreg;
adev->didt_wreg = &vi_didt_wreg;
+ adev->gc_cac_rreg = &vi_gc_cac_rreg;
+ adev->gc_cac_wreg = &vi_gc_cac_wreg;
adev->asic_funcs = &vi_asic_funcs;
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
+ /* rev0 hardware doesn't support PG */
adev->pg_flags = 0;
+ if (adev->rev_id != 0x00)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_GFX_PIPELINE;
adev->external_rev_id = adev->rev_id + 0x1;
break;
case CHIP_STONEY:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
void kfd_process_create_wq(void)
{
if (!kfd_process_wq)
- kfd_process_wq = create_workqueue("kfd_process_wq");
+ kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
}
void kfd_process_destroy_wq(void)
{
if (kfd_process_wq) {
- flush_workqueue(kfd_process_wq);
destroy_workqueue(kfd_process_wq);
kfd_process_wq = NULL;
}
pqm_uninit(&p->pqm);
/* Iterate over all process device data structure and check
- * if we should reset all wavefronts */
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ * if we should delete debug managers and reset all wavefronts
+ */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ if ((pdd->dev->dbgmgr) &&
+ (pdd->dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
+
if (pdd->reset_wavefronts) {
pr_warn("amdkfd: Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
+ }
mutex_unlock(&p->mutex);
synchronize_rcu();
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
+ mutex_destroy(&process->mutex);
kfd_pasid_free(process->pasid);
err_alloc_pasid:
kfree(process->queues);
idx = srcu_read_lock(&kfd_processes_srcu);
+ /*
+ * Look for the process that matches the pasid. If there is no such
+ * process, we either released it in amdkfd's own notifier, or there
+ * is a bug. Unfortunately, there is no way to tell...
+ */
hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
- if (p->pasid == pasid)
- break;
+ if (p->pasid == pasid) {
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ srcu_read_unlock(&kfd_processes_srcu, idx);
- BUG_ON(p->pasid != pasid);
+ pr_debug("Unbinding process %d from IOMMU\n", pasid);
- mutex_lock(&p->mutex);
+ mutex_lock(&p->mutex);
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
+ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(dev->dbgmgr);
- pqm_uninit(&p->pqm);
+ pqm_uninit(&p->pqm);
- pdd = kfd_get_process_device_data(dev, p);
+ pdd = kfd_get_process_device_data(dev, p);
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
- }
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ if (pdd->reset_wavefronts) {
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
- /*
- * Just mark pdd as unbound, because we still need it to call
- * amd_iommu_unbind_pasid() in when the process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ /*
+ * Just mark pdd as unbound, because we still need it
+ * to call amd_iommu_unbind_pasid() in when the
+ * process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ pdd->bound = false;
- mutex_unlock(&p->mutex);
+ mutex_unlock(&p->mutex);
+
+ return;
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
+/* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+ | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
+/* 1/2/4/8/16 lanes */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-/*
-* Supported GPU families (aligned with amdgpu_drm.h)
-*/
-#define AMD_FAMILY_UNKNOWN 0
-#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */
-#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
-#define AMD_FAMILY_VI 130 /* Iceland, Tonga */
-#define AMD_FAMILY_CZ 135 /* Carrizo */
-
/*
* Supported ASIC types
*/
#define AMD_PG_SUPPORT_SDMA (1 << 8)
#define AMD_PG_SUPPORT_ACP (1 << 9)
#define AMD_PG_SUPPORT_SAMU (1 << 10)
+#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11)
+#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
enum amd_pm_state_type {
/* not used for dpm */
int (*hw_init)(void *handle);
/* tears down the hw state */
int (*hw_fini)(void *handle);
+ void (*late_fini)(void *handle);
/* handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
/* handles IP specific hw/sw changes for resume */
#define mmDC_EDC_CSINVOC_CNT 0x3192
#define mmDC_EDC_RESTORE_CNT 0x3193
+#define mmGC_CAC_IND_INDEX 0x129a
+#define mmGC_CAC_IND_DATA 0x129b
+
#endif /* GFX_8_0_D_H */
#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
-#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
-#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
-#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000
-#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
+#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000
+#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14
+#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15
#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
-#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
-#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
#define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
#define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
#define RLC_SRM_DEBUG__DATA__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
-#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
#define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
-#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
#define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
#define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
#endif /* GFX_8_0_SH_MASK_H */
ULONG ulReserved[12];
}ATOM_ASIC_PROFILING_INFO_V3_5;
+/* for Polars10/11 AVFS parameters */
+typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ULONG ulMaxVddc;
+ ULONG ulMinVddc;
+ USHORT usLkgEuseIndex;
+ UCHAR ucLkgEfuseBitLSB;
+ UCHAR ucLkgEfuseLength;
+ ULONG ulLkgEncodeLn_MaxDivMin;
+ ULONG ulLkgEncodeMax;
+ ULONG ulLkgEncodeMin;
+ EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+ ULONG ulEvvDefaultVddc;
+ ULONG ulEvvNoCalcVddc;
+ ULONG ulSpeed_Model;
+ ULONG ulSM_A0;
+ ULONG ulSM_A1;
+ ULONG ulSM_A2;
+ ULONG ulSM_A3;
+ ULONG ulSM_A4;
+ ULONG ulSM_A5;
+ ULONG ulSM_A6;
+ ULONG ulSM_A7;
+ UCHAR ucSM_A0_sign;
+ UCHAR ucSM_A1_sign;
+ UCHAR ucSM_A2_sign;
+ UCHAR ucSM_A3_sign;
+ UCHAR ucSM_A4_sign;
+ UCHAR ucSM_A5_sign;
+ UCHAR ucSM_A6_sign;
+ UCHAR ucSM_A7_sign;
+ ULONG ulMargin_RO_a;
+ ULONG ulMargin_RO_b;
+ ULONG ulMargin_RO_c;
+ ULONG ulMargin_fixed;
+ ULONG ulMargin_Fmax_mean;
+ ULONG ulMargin_plat_mean;
+ ULONG ulMargin_Fmax_sigma;
+ ULONG ulMargin_plat_sigma;
+ ULONG ulMargin_DC_sigma;
+ ULONG ulLoadLineSlop;
+ ULONG ulaTDClimitPerDPM[8];
+ ULONG ulaNoCalcVddcPerDPM[8];
+ ULONG ulAVFS_meanNsigma_Acontant0;
+ ULONG ulAVFS_meanNsigma_Acontant1;
+ ULONG ulAVFS_meanNsigma_Acontant2;
+ USHORT usAVFS_meanNsigma_DC_tol_sigma;
+ USHORT usAVFS_meanNsigma_Platform_mean;
+ USHORT usAVFS_meanNsigma_Platform_sigma;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a0;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a1;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a2;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a0;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a1;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSON_b;
+ USHORT usMaxVoltage_0_25mv;
+ UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF;
+ UCHAR ucEnableGB_VDROOP_TABLE_CKSON;
+ UCHAR ucEnableGB_FUSE_TABLE_CKSOFF;
+ UCHAR ucEnableGB_FUSE_TABLE_CKSON;
+ USHORT usPSM_Age_ComFactor;
+ UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage;
+ UCHAR ucReserved;
+}ATOM_ASIC_PROFILING_INFO_V3_6;
+
typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
ULONG ulMaxSclkFreq;
CGS_IND_REG__SMC,
CGS_IND_REG__UVD_CTX,
CGS_IND_REG__DIDT,
+ CGS_IND_REG_GC_CAC,
CGS_IND_REG__AUDIO_ENDPT
};
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
+ CGS_SYSTEM_INFO_GFX_SE_INFO,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
struct cgs_acpi_method_argument {
uint32_t type;
- uint32_t method_length;
uint32_t data_length;
union{
uint32_t value;
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type);
+
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
+ cgs_rel_firmware rel_firmware;
/* cg pg interface*/
cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state;
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type) \
+ CGS_CALL(rel_firmware, dev, type)
#define cgs_set_powergating_state(dev, block_type, state) \
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
- goto err;
+ goto err1;
pr_info("amdgpu: powerplay initialized\n");
return 0;
+err1:
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
err:
pr_err("amdgpu: powerplay initialization failed\n");
return ret;
if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+
return ret;
}
case AMD_PP_EVENT_COMPLETE_INIT:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
+ case AMD_PP_EVENT_READJUST_POWER_STATE:
+ pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
+ ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
default:
break;
}
PP_CHECK_HW(hwmgr);
- if (hwmgr->hwmgr_func->get_pp_table == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return 0;
- }
+ if (!hwmgr->soft_pp_table)
+ return -EINVAL;
+
+ *table = (char *)hwmgr->soft_pp_table;
- return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+ return hwmgr->soft_pp_table_size;
}
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
PP_CHECK_HW(hwmgr);
- if (hwmgr->hwmgr_func->set_pp_table == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return 0;
+ if (!hwmgr->hardcode_pp_table) {
+ hwmgr->hardcode_pp_table =
+ kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+
+ if (!hwmgr->hardcode_pp_table)
+ return -ENOMEM;
+
+ /* to avoid powerplay crash when hardcode pptable is empty */
+ memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size);
}
- return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+ memcpy(hwmgr->hardcode_pp_table, buf, size);
+
+ hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
+
+ return amd_powerplay_reset(handle);
}
static int pp_dpm_force_clock_level(void *handle,
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
+static int pp_dpm_get_sclk_od(void *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+}
+
+static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+}
+
+static int pp_dpm_get_mclk_od(void *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+}
+
+static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
.set_pp_table = pp_dpm_set_pp_table,
.force_clock_level = pp_dpm_force_clock_level,
.print_clock_levels = pp_dpm_print_clock_levels,
+ .get_sclk_od = pp_dpm_get_sclk_od,
+ .set_sclk_od = pp_dpm_set_sclk_od,
+ .get_mclk_od = pp_dpm_get_mclk_od,
+ .set_mclk_od = pp_dpm_set_mclk_od,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
return 0;
}
+int amd_powerplay_reset(void *handle)
+{
+ struct pp_instance *instance = (struct pp_instance *)handle;
+ struct pp_eventmgr *eventmgr;
+ struct pem_event_data event_data = { {0} };
+ int ret;
+
+ if (instance == NULL)
+ return -EINVAL;
+
+ eventmgr = instance->eventmgr;
+ if (!eventmgr || !eventmgr->pp_eventmgr_fini)
+ return -EINVAL;
+
+ eventmgr->pp_eventmgr_fini(eventmgr);
+
+ ret = pp_sw_fini(handle);
+ if (ret)
+ return ret;
+
+ kfree(instance->hwmgr->ps);
+
+ ret = pp_sw_init(handle);
+ if (ret)
+ return ret;
+
+ hw_init_power_state_table(instance->hwmgr);
+
+ if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
+ return -EINVAL;
+
+ ret = eventmgr->pp_eventmgr_init(eventmgr);
+ if (ret)
+ return ret;
+
+ return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+}
+
/* export this function to DAL */
int amd_powerplay_display_configuration_change(void *handle,
pem_unregister_interrupts(eventmgr);
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
- if (eventmgr != NULL)
- kfree(eventmgr);
}
int eventmgr_init(struct pp_instance *handle)
int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
- /* TODO */
- return 0;
+ return phm_disable_dynamic_state_management(eventmgr->hwmgr);
}
int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
cz_enable_disable_vce_dpm(hwmgr, false);
- /* TODO: to figure out why vce can't be poweroff*/
+ cz_dpm_powerdown_vce(hwmgr);
cz_hwmgr->vce_power_gated = true;
} else {
cz_dpm_powerup_vce(hwmgr);
}
}
} else {
+ cz_hwmgr->vce_power_gated = bgate;
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
+ struct cz_hwmgr *data;
+
+ data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
result = cz_initialize_dpm_defaults(hwmgr);
if (result != 0) {
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr;
- int ret = 0;
-
- cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
- if (cz_hwmgr == NULL)
- return -ENOMEM;
-
- hwmgr->backend = cz_hwmgr;
hwmgr->hwmgr_func = &cz_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
- return ret;
+ return 0;
}
static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_hwmgr *data;
uint32_t i;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
bool stay_in_boot;
int result;
+ data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_default_on = false;
data->sram_end = SMC_RAM_END;
data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
return 0;
}
+static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
/**
* Get the location of various tables inside the FW image.
*
return 0;
}
+/**
+* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return if success then 0;
+*/
+static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
/**
* Initial switch from ARB F0->F1
*
MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
+static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return fiji_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
struct fiji_single_dpm_table *dpm_table, uint32_t count)
{
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
return 0;
}
+static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
return 0;
}
+static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
return 0;
}
+static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -1);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
+ "Failed to force MCLK DPM0!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -1);
+ }
+
+ if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_Voltage_Cntl_Disable) == 0),
+ "Failed to disable voltage DPM during DPM Stop Function!",
+ return -1);
+
+ return 0;
+}
+
static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
uint32_t sources)
{
return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
+static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
return result;
}
+static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = fiji_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = fiji_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = fiji_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = fiji_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = fiji_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = fiji_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = fiji_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
+}
+
static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
CG_FDO_CTRL2, FDO_PWM_MODE);
}
-static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
return is_update_required;
}
+static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct fiji_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct fiji_power_state *fiji_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+ fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct fiji_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct fiji_power_state *fiji_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+ fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &fiji_hwmgr_backend_fini,
.asic_setup = &fiji_setup_asic_task,
.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
+ .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
.force_dpm_level = &fiji_dpm_force_dpm_level,
.get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
.get_power_state_size = &fiji_get_power_state_size,
.get_fan_control_mode = fiji_get_fan_control_mode,
.check_states_equal = fiji_check_states_equal,
.check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
- .get_pp_table = fiji_get_pp_table,
- .set_pp_table = fiji_set_pp_table,
.force_clock_level = fiji_force_clock_level,
.print_clock_levels = fiji_print_clock_levels,
+ .get_sclk_od = fiji_get_sclk_od,
+ .set_sclk_od = fiji_set_sclk_od,
+ .get_mclk_od = fiji_get_mclk_od,
+ .set_mclk_od = fiji_set_mclk_od,
};
int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data;
- int ret = 0;
-
- data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_fiji_thermal_initialize(hwmgr);
- return ret;
+ return 0;
}
bool pg_acp_init;
bool frtc_enabled;
bool frtc_status_changed;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
};
/* To convert to Q8.8 format for firmware */
fiji_hwmgr->dte_tj_offset = tmp;
if (!tmp) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
fiji_hwmgr->fast_watermark_threshold = 100;
- tmp = 1;
- fiji_hwmgr->enable_dte_feature = tmp ? false : true;
- fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
- fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+ if (hwmgr->powercontainment_enabled) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ tmp = 1;
+ fiji_hwmgr->enable_dte_feature = tmp ? false : true;
+ fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
+ fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+ }
}
}
return result;
}
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableCac));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable CAC in SMC.", result = -1);
+
+ data->cac_enabled = false;
+ }
+ return result;
+}
+
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
return result;
}
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
+ int smc_result;
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_TDCLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable TDCLimit in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_DTE) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDTE));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable DTE in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable PkgPwrTracking in SMC.",
+ result = smc_result);
+ }
+ data->power_containment_features = 0;
+ }
+
+ return result;
+}
+
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+
struct fiji_pt_config_reg {
uint32_t offset;
uint32_t mask;
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
return ret;
}
+int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
+{
+ int ret = -1;
+ bool enabled;
+
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface)) {
+ if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
+ } else {
+ ret = phm_dispatch_table(hwmgr,
+ &(hwmgr->disable_dynamic_state_management),
+ NULL, NULL);
+ }
+
+ enabled = ret == 0 ? false : true;
+
+ cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+ return ret;
+}
+
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
{
PHM_FUNC_CHECK(hwmgr);
{
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+ if (display_config == NULL)
return -EINVAL;
hwmgr->display_config = *display_config;
- /* to do pass other display configuration in furture */
+
+ if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+ return -EINVAL;
+
+ /* TODO: pass other display configuration in the future */
if (hwmgr->hwmgr_func->store_cc6_data)
hwmgr->hwmgr_func->store_cc6_data(hwmgr,
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
#include "cgs_common.h"
#include "power_state.h"
#include "hwmgr.h"
hwmgr->hw_revision = pp_init->rev_id;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
+ hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
switch (hwmgr->chip_family) {
- case AMD_FAMILY_CZ:
+ case AMDGPU_FAMILY_CZ:
cz_hwmgr_init(hwmgr);
break;
- case AMD_FAMILY_VI:
+ case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TONGA:
tonga_hwmgr_init(hwmgr);
if (hwmgr == NULL || hwmgr->ps == NULL)
return -EINVAL;
+ /* do hwmgr finish*/
+ kfree(hwmgr->hardcode_pp_table);
+
+ kfree(hwmgr->backend);
+
+ kfree(hwmgr->start_thermal_controller.function_list);
+
+ kfree(hwmgr->set_temperature_range.function_list);
+
kfree(hwmgr->ps);
kfree(hwmgr);
return 0;
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
int phm_find_boot_level(void *table,
uint8_t phases;
uint8_t cks_enable;
uint8_t cks_voffset;
+ uint32_t sclk_offset;
};
typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
data->uvd_power_gated = bgate;
if (bgate) {
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
polaris10_update_uvd_dpm(hwmgr, true);
polaris10_phm_powerdown_uvd(hwmgr);
} else {
polaris10_phm_powerup_uvd(hwmgr);
polaris10_update_uvd_dpm(hwmgr, false);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
return 0;
return 0;
}
+static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
/**
* Get the location of various tables inside the FW image.
*
return 0;
}
+static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
/**
* Initial switch from ARB F0->F1
*
MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
+static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return polaris10_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
table->Smio[level] |=
data->mvdd_voltage_table.entries[level].smio_low;
}
- table->SmioMask2 = data->vddci_voltage_table.mask_low;
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
}
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i].vddc -
(uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
}
if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
}
mem_level->MclkFrequency = clock;
- mem_level->StutterEnable = 0;
mem_level->EnabledForThrottle = 1;
mem_level->EnabledForActivity = 0;
mem_level->UpHyst = 0;
mem_level->VoltageDownHyst = 0;
mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
mem_level->StutterEnable = false;
-
mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
data->display_timing.num_existing_displays = info.display_count;
return result;
}
- /* in order to prevent MC activity from stutter mode to push DPM up.
+ /* In order to prevent MC activity from stutter mode to push DPM up,
* the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
+ * a higher state by default such that we are not affected by
* up threshold or and MCLK DPM latency.
*/
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ levels[0].ActivityLevel = 0x1f;
CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
data->smc_state_table.MemoryDpmLevelCount =
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table", );
- } else {
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
us_mvdd = 0;
if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
table->VceLevelCount = (uint8_t)(mm_table->count);
table->VceBootLevel = 0;
table->VceLevel[count].MinVoltage = 0;
table->VceLevel[count].MinVoltage |=
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/*retrieve divider value for VBIOS */
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
table->SamuBootLevel = 0;
table->SamuLevelCount = (uint8_t)(mm_table->count);
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/* retrieve divider value for VBIOS */
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
table->UvdLevelCount = (uint8_t)(mm_table->count);
table->UvdBootLevel = 0;
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
/* retrieve divider value for VBIOS */
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
}
+
return result;
}
static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
* if the part is SS or FF. if RO >= 1660MHz, part is FF.
*/
efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
+ ixSMU_EFUSE_0 + (67 * 4));
efuse &= 0xFF000000;
efuse = efuse >> 24;
- efuse2 &= 0xF;
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
- /* Populate Stretch amount */
- data->smc_state_table.ClockStretcherAmount = stretch_amount;
+ ro = efuse * (max -min)/255 + min;
/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
for (i = 0; i < sclk_table->count; i++) {
data->smc_state_table.Sclk_CKS_masterEn0_7 |=
sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
if (volt_without_cks >= volt_with_cks)
volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
}
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
+ data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
stretch_amount2 = 0;
return -EINVAL);
}
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
- GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
- if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
- && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (polaris10_clock_stretch_amount_conversion
- [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
- |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(
- data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
- }
-
value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
value &= 0xFFFFFFFE;
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
config = VR_SVI2_PLANE_2;
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
} else {
config = VR_STATIC_VOLTAGE;
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
return 0;
}
+
+int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, data->sram_end);
+
+ polaris10_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ data->sram_end);
+
+ result = polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, data->sram_end);
+ polaris10_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ data->sram_end);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+
/**
* Initializes the SMC table and uploads it
*
"Failed to populate Clock Stretcher Data Table!",
return result);
}
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
table->CurrSclkPllRange = 0xff;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
return 0;
}
+static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
return 0;
}
+static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t soft_register_value = 0;
+ uint32_t handshake_disables_offset = data->soft_regs_start
+ + offsetof(SMU74_SoftRegisters, HandshakeDisables);
/* enable SCLK dpm */
if (!data->sclk_dpm_key_disabled)
/* enable MCLK dpm */
if (0 == data->mclk_dpm_key_disabled) {
+/* Disable UVD - SMU handshake for MCLK. */
+ soft_register_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, handshake_disables_offset);
+ soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ handshake_disables_offset, soft_register_value);
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr->smumgr,
"Failed to enable MCLK DPM during DPM Start Function!",
return -1);
-
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
return 0;
}
+static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -1);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -1);
+ }
+
+ if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ return 0;
+}
+
static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
{
bool protection;
return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
+static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
tmp_result = polaris10_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable SCLK control!", result = tmp_result);
int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
+ int tmp_result, result = 0;
- return 0;
+ tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = polaris10_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = polaris10_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = polaris10_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = polaris10_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = polaris10_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = polaris10_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
}
int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
+ if (hwmgr->powercontainment_enabled)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
if (hwmgr->chip_id == CHIP_POLARIS11)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint16_t vv_id;
- uint16_t vddc = 0;
+ uint32_t vddc = 0;
uint16_t i, j;
uint32_t sclk = 0;
struct phm_ppt_v1_information *table_info =
}
}
+ if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+ VOLTAGE_TYPE_VDDC,
+ sclk, vv_id, &vddc) != 0) {
+ printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+ continue;
+ }
- PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
- "Error retrieving EVV voltage value!",
- continue);
-
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+ /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+ * real voltage level in unit of 0.01mv */
+ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
"Invalid VDDC value", result = -EINVAL;);
/* the voltage should not be zero nor equal to leakage ID */
return 0;
}
+int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ uint32_t i;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
+ if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+ return 0;
+
+ for (i = 0; i < lookup_table->count; i++) {
+ if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+ dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+
int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_hwmgr *data;
struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
uint32_t temp_reg;
int result;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_default_on = false;
data->sram_end = SMC_RAM_END;
data->mclk_dpm0_activity_target = 0xa;
data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+ data->enable_tdc_limit_feature = true;
+ data->enable_pkg_pwr_tracking_feature = true;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->mclk_stutter_mode_threshold = 40000;
+
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
}
+ if (table_info->cac_dtp_table->usClockStretchAmount != 0)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+
polaris10_set_features_platform_caps(hwmgr);
+ polaris10_patch_voltage_workaround(hwmgr);
polaris10_init_dpm_defaults(hwmgr);
/* Get leakage voltage based on leakage ID. */
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
+ PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
(ATOM_Tonga_MCLK_Dependency_Table *)
(((unsigned long)powerplay_table) +
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = mclk_dep_table->entries
[state_entry->ucMemoryClockIndexLow].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
[state_entry->ucEngineClockIndexLow].ulSclk;
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenLow);
[polaris10_power_state->performance_level_count++]);
performance_level->memory_clock = mclk_dep_table->entries
[state_entry->ucMemoryClockIndexHigh].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
+
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
[state_entry->ucEngineClockIndexHigh].ulSclk;
+
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenHigh);
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
switch (state->classification.ui_label) {
case PP_StateUILabel_Performance:
data->use_pcie_performance_levels = true;
-
for (i = 0; i < ps->performance_level_count; i++) {
if (data->pcie_gen_performance.max <
ps->performance_levels[i].pcie_gen)
ps->performance_levels[i].pcie_lane)
data->pcie_lane_performance.max =
ps->performance_levels[i].pcie_lane;
-
if (data->pcie_lane_performance.min >
ps->performance_levels[i].pcie_lane)
data->pcie_lane_performance.min =
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
if (!bgate) {
- data->smc_state_table.SamuBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
+ data->smc_state_table.SamuBootLevel = 0;
mm_boot_level_offset = data->dpm_table_start +
offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
mm_boot_level_offset /= 4;
return 0;
}
+static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+}
+
static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int tmp_result, result = 0;
"Failed to program memory timing parameters!",
result = tmp_result);
+ tmp_result = polaris10_notify_smc_display(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify smc display settings!",
+ result = tmp_result);
+
tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to unfreeze SCLK MCLK DPM!",
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
+
int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
polaris10_notify_smc_display_change(hwmgr, false);
- else
- polaris10_notify_smc_display_change(hwmgr, true);
return 0;
}
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
- polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
-
return 0;
}
return 0;
}
- data->need_long_memory_training = true;
+ data->need_long_memory_training = false;
/*
* PPMCME_FirmwareDescriptorEntry *pfd = NULL;
return result;
}
-static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
CG_FDO_CTRL2, FDO_PWM_MODE);
}
+static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct polaris10_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct polaris10_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
.backend_init = &polaris10_hwmgr_backend_init,
.backend_fini = &polaris10_hwmgr_backend_fini,
.check_states_equal = polaris10_check_states_equal,
.set_fan_control_mode = polaris10_set_fan_control_mode,
.get_fan_control_mode = polaris10_get_fan_control_mode,
- .get_pp_table = polaris10_get_pp_table,
- .set_pp_table = polaris10_set_pp_table,
.force_clock_level = polaris10_force_clock_level,
.print_clock_levels = polaris10_print_clock_levels,
.enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+ .get_sclk_od = polaris10_get_sclk_od,
+ .set_sclk_od = polaris10_set_sclk_od,
+ .get_mclk_od = polaris10_get_mclk_od,
+ .set_mclk_od = polaris10_set_mclk_od,
};
int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data;
-
- data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_polaris10_thermal_initialize(hwmgr);
uint32_t up_hyst;
uint32_t disable_dpm_mask;
bool apply_optimized_settings;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
+ uint32_t avfs_vdroop_override_setting;
+ bool apply_avfs_cks_off_voltage;
+ uint32_t frame_time_x2;
};
/* To convert to Q8.8 format for firmware */
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
- sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
return result;
}
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableCac));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable CAC in SMC.", result = -1);
+
+ data->cac_enabled = false;
+ }
+ return result;
+}
+
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
return result;
}
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
+ int smc_result;
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_TDCLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable TDCLimit in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_DTE) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDTE));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable DTE in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable PkgPwrTracking in SMC.",
+ result = smc_result);
+ }
+ data->power_containment_features = 0;
+ }
+
+ return result;
+}
+
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
POLARIS10_CONFIGREG_MAX
};
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
int ret;
struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
return 0;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
0 : -1;
return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
}
+bool acpi_atcs_notify_pcie_device_ready(void *device)
+{
+ int32_t temp_buffer = 1;
+
+ return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
+ ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
+ &temp_buffer,
+ NULL,
+ 0,
+ sizeof(temp_buffer),
+ 0);
+}
+
+
int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
{
struct atcs_pref_req_input atcs_input;
int result;
struct cgs_system_info info = {0};
- if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
+ if( 0 != acpi_atcs_notify_pcie_device_ready(device))
return -EINVAL;
info.size = sizeof(struct cgs_system_info);
ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
&atcs_input,
&atcs_output,
- 0,
+ 1,
sizeof(atcs_input),
sizeof(atcs_output));
if (result != 0)
pin_assignment->ucGpioPinBitShift;
gpio_pin_assignment->us_gpio_pin_aindex =
le16_to_cpu(pin_assignment->usGpioPin_AIndex);
- return false;
+ return true;
}
offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
}
- return true;
+ return false;
}
/**
}
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
{
int result;
if (0 != result)
return result;
- *voltage = get_voltage_info_param_space.usVoltageLevel;
+ *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
return result;
}
return 0;
}
+
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+{
+ ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
+
+ if (param == NULL)
+ return -EINVAL;
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+ NULL, NULL, NULL);
+ if (!profile)
+ return -1;
+
+ param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
+ param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
+ param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
+ param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
+ param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
+ param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
+ param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
+ param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
+ param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
+ param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
+ param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+ param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
+ param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
+ param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
+ param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
+ param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+ param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
+
+ return 0;
+}
};
typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
+struct pp_atom_ctrl__avfs_parameters {
+ uint32_t ulAVFS_meanNsigma_Acontant0;
+ uint32_t ulAVFS_meanNsigma_Acontant1;
+ uint32_t ulAVFS_meanNsigma_Acontant2;
+ uint16_t usAVFS_meanNsigma_DC_tol_sigma;
+ uint16_t usAVFS_meanNsigma_Platform_mean;
+ uint16_t usAVFS_meanNsigma_Platform_sigma;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a0;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a1;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b;
+ uint16_t usMaxVoltage_0_25mv;
+ uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF;
+ uint8_t ucEnableGB_VDROOP_TABLE_CKSON;
+ uint8_t ucEnableGB_FUSE_TABLE_CKSOFF;
+ uint8_t ucEnableGB_FUSE_TABLE_CKSON;
+ uint16_t usPSM_Age_ComFactor;
+ uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage;
+ uint8_t ucReserved;
+};
+
extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level);
extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
+
+extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+
#endif
static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
struct pp_hwmgr *hwmgr)
{
- const void *table_addr = NULL;
+ const void *table_addr = hwmgr->soft_pp_table;
uint8_t frev, crev;
uint16_t size;
- table_addr = cgs_atom_get_data_table(hwmgr->device,
- GetIndexIntoMasterTable(DATA, PowerPlayInfo),
- &size, &frev, &crev);
+ if (!table_addr) {
+ table_addr = cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+ &size, &frev, &crev);
- hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table_size = size;
+ }
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
}
const ATOM_PPLIB_VCE_State_Table *vce_table =
get_vce_state_table(hwmgr, table);
- if (vce_table > 0)
+ if (vce_table)
return vce_table->numEntries;
return 0;
static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
- hwmgr->soft_pp_table = NULL;
- }
-
if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
table->Smio[count] |=
data->mvdd_voltage_table.entries[count].smio_low;
}
- table->SmioMask2 = data->vddci_voltage_table.mask_low;
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
}
}
}
- /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
- /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
- /* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
- if (NULL != allowed_vdd_mclk_table) {
- /* Initialize Vddci DPM table based on allow Mclk values */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
- data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
- data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
- data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
- }
-
/* setup PCIE gen speed levels*/
tonga_setup_default_pcie_tables(hwmgr);
reg_value = 0;
if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
- VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) {
+ (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment))) {
table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
/* ACDC Switch GPIO */
reg_value = 0;
if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
- PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) {
+ (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment))) {
table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
}
reg_value = 0;
- if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
+ if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalOutGPIO);
int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
{
int result = 0;
SMU72_Discrete_DpmTable *table = NULL;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ tonga_hwmgr *data;
pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
phw_tonga_ulv_parm *ulv;
PP_ASSERT_WITH_CODE((NULL != hwmgr),
"Invalid Parameter!", return -1;);
+ data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_defaule_on = 0;
data->sram_end = SMC_RAM_END;
data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
* if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
* Peak Current Control feature is enabled and we should program PCC HW register
*/
- if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
CG_FDO_CTRL2, FDO_PWM_MODE);
}
-static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
return size;
}
+static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct tonga_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct tonga_power_state *tonga_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+ tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct tonga_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct tonga_power_state *tonga_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+ tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.backend_init = &tonga_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
.asic_setup = &tonga_setup_asic_task,
.dynamic_state_management_enable = &tonga_enable_dpm_tasks,
+ .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
.apply_state_adjust_rules = tonga_apply_state_adjust_rules,
.force_dpm_level = &tonga_force_dpm_level,
.power_state_set = tonga_set_power_state_tasks,
.check_states_equal = tonga_check_states_equal,
.set_fan_control_mode = tonga_set_fan_control_mode,
.get_fan_control_mode = tonga_get_fan_control_mode,
- .get_pp_table = tonga_get_pp_table,
- .set_pp_table = tonga_set_pp_table,
.force_clock_level = tonga_force_clock_level,
.print_clock_levels = tonga_print_clock_levels,
+ .get_sclk_od = tonga_get_sclk_od,
+ .set_sclk_od = tonga_set_sclk_od,
+ .get_mclk_od = tonga_get_mclk_od,
+ .set_mclk_od = tonga_set_mclk_od,
};
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- tonga_hwmgr *data;
-
- data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
- memset(data, 0x00, sizeof(tonga_hwmgr));
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_tonga_thermal_initialize(hwmgr);
bool samu_power_gated; /* 1: gated, 0:not gated */
bool acp_power_gated; /* 1: gated, 0:not gated */
bool pg_acp_init;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
};
typedef struct tonga_hwmgr tonga_hwmgr;
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
} ATOM_Tonga_SCLK_Dependency_Table;
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ UCHAR ucVddInd; /* Base voltage */
+ USHORT usVddcOffset; /* Offset relative to base voltage */
+ ULONG ulSclk;
+ USHORT usEdcCurrent;
+ UCHAR ucReliabilityTemperature;
+ UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+ ULONG ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+ ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
typedef struct _ATOM_Tonga_PCIE_Record {
UCHAR ucPCIEGenSpeed;
UCHAR usPCIELaneWidth;
(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
if (0 != powerplay_table->usPPMTableOffset) {
- if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
+ if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnablePlatformPowerManagement);
}
static int get_sclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
- const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
+ const PPTable_Generic_SubTable_Header *sclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
- PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
- "Invalid PowerPlay Table!", return -1);
+ if (sclk_dep_table->ucRevId < 1) {
+ const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+ (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
- table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
- * sclk_dep_table->ucNumEntries;
+ PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+ "Invalid PowerPlay Table!", return -1);
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+ * tonga_table->ucNumEntries;
- if (NULL == sclk_table)
- return -ENOMEM;
+ sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
- memset(sclk_table, 0x00, table_size);
-
- sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
-
- for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- sclk_dep_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- sclk_dep_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- sclk_dep_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
- }
+ if (NULL == sclk_table)
+ return -ENOMEM;
+ memset(sclk_table, 0x00, table_size);
+
+ sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+ for (i = 0; i < tonga_table->ucNumEntries; i++) {
+ sclk_table->entries[i].vddInd =
+ tonga_table->entries[i].ucVddInd;
+ sclk_table->entries[i].vdd_offset =
+ tonga_table->entries[i].usVddcOffset;
+ sclk_table->entries[i].clk =
+ tonga_table->entries[i].ulSclk;
+ sclk_table->entries[i].cks_enable =
+ (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table->entries[i].cks_voffset =
+ (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ }
+ } else {
+ const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+ (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+
+ PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+ "Invalid PowerPlay Table!", return -1);
+
+ table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+ * polaris_table->ucNumEntries;
+
+ sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
+
+ if (NULL == sclk_table)
+ return -ENOMEM;
+
+ memset(sclk_table, 0x00, table_size);
+
+ sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+ for (i = 0; i < polaris_table->ucNumEntries; i++) {
+ sclk_table->entries[i].vddInd =
+ polaris_table->entries[i].ucVddInd;
+ sclk_table->entries[i].vdd_offset =
+ polaris_table->entries[i].usVddcOffset;
+ sclk_table->entries[i].clk =
+ polaris_table->entries[i].ulSclk;
+ sclk_table->entries[i].cks_enable =
+ (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table->entries[i].cks_voffset =
+ (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+ }
+ }
*pp_tonga_sclk_dep_table = sclk_table;
return 0;
const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
(const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
- const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+ const PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
- hwmgr->soft_pp_table = NULL;
- }
-
- if (NULL != pp_table_information->vdd_dep_on_sclk)
- pp_table_information->vdd_dep_on_sclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_sclk);
+ pp_table_information->vdd_dep_on_sclk = NULL;
- if (NULL != pp_table_information->vdd_dep_on_mclk)
- pp_table_information->vdd_dep_on_mclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_mclk);
+ pp_table_information->vdd_dep_on_mclk = NULL;
- if (NULL != pp_table_information->valid_mclk_values)
- pp_table_information->valid_mclk_values = NULL;
+ kfree(pp_table_information->valid_mclk_values);
+ pp_table_information->valid_mclk_values = NULL;
- if (NULL != pp_table_information->valid_sclk_values)
- pp_table_information->valid_sclk_values = NULL;
+ kfree(pp_table_information->valid_sclk_values);
+ pp_table_information->valid_sclk_values = NULL;
- if (NULL != pp_table_information->vddc_lookup_table)
- pp_table_information->vddc_lookup_table = NULL;
+ kfree(pp_table_information->vddc_lookup_table);
+ pp_table_information->vddc_lookup_table = NULL;
- if (NULL != pp_table_information->vddgfx_lookup_table)
- pp_table_information->vddgfx_lookup_table = NULL;
+ kfree(pp_table_information->vddgfx_lookup_table);
+ pp_table_information->vddgfx_lookup_table = NULL;
- if (NULL != pp_table_information->mm_dep_table)
- pp_table_information->mm_dep_table = NULL;
+ kfree(pp_table_information->mm_dep_table);
+ pp_table_information->mm_dep_table = NULL;
- if (NULL != pp_table_information->cac_dtp_table)
- pp_table_information->cac_dtp_table = NULL;
+ kfree(pp_table_information->cac_dtp_table);
+ pp_table_information->cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table)
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != pp_table_information->ppm_parameter_table)
- pp_table_information->ppm_parameter_table = NULL;
+ kfree(pp_table_information->ppm_parameter_table);
+ pp_table_information->ppm_parameter_table = NULL;
- if (NULL != pp_table_information->pcie_table)
- pp_table_information->pcie_table = NULL;
+ kfree(pp_table_information->pcie_table);
+ pp_table_information->pcie_table = NULL;
- if (NULL != hwmgr->pptable) {
- kfree(hwmgr->pptable);
- hwmgr->pptable = NULL;
- }
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
return result;
}
uint32_t chip_family;
uint32_t chip_id;
uint32_t rev_id;
+ bool powercontainment_enabled;
};
enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_None = 0,
int (*set_pp_table)(void *handle, const char *buf, size_t size);
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
};
struct amd_powerplay {
int amd_powerplay_fini(void *handle);
+int amd_powerplay_reset(void *handle);
+
int amd_powerplay_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *input);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
+extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
int (*dynamic_state_management_enable)(
struct pp_hwmgr *hw_mgr);
+ int (*dynamic_state_management_disable)(
+ struct pp_hwmgr *hw_mgr);
int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps);
int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
- int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
- int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+ int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
};
struct pp_table_func {
uint8_t ucVr_I2C_Line;
uint8_t ucPlx_I2C_address;
uint8_t ucPlx_I2C_Line;
+ uint32_t usBoostPowerLimit;
+ uint8_t ucCKS_LDO_REFSEL;
};
struct phm_ppm_table {
struct pp_smumgr *smumgr;
const void *soft_pp_table;
uint32_t soft_pp_table_size;
+ void *hardcode_pp_table;
bool need_pp_table_upload;
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
+ bool powercontainment_enabled;
uint32_t fan_ctrl_default_mode;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
#pragma pack(push, 1)
+#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
#define PPSMC_SWSTATE_FLAG_DC 0x01
#define PPSMC_SWSTATE_FLAG_UVD 0x02
#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
+#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+
#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
extern int acpi_pcie_perf_request(void *device,
uint8_t perf_req,
bool advertise);
+extern bool acpi_atcs_notify_pcie_device_ready(void *device);
#define SMU__NUM_LCLK_DPM_LEVELS 8
#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define EXP_M1 35
+#define EXP_M2 92821
+#define EXP_B 66629747
+
+#define EXP_M1_1 365
+#define EXP_M2_1 658700
+#define EXP_B_1 305506134
+
+#define EXP_M1_2 189
+#define EXP_M2_2 379692
+#define EXP_B_2 194609469
+
+#define EXP_M1_3 99
+#define EXP_M2_3 217915
+#define EXP_B_3 122255994
+
+#define EXP_M1_4 51
+#define EXP_M2_4 122643
+#define EXP_B_4 74893384
+
+#define EXP_M1_5 423
+#define EXP_M2_5 1103326
+#define EXP_B_5 728122621
+
enum SID_OPTION {
SID_OPTION_HI,
SID_OPTION_LO,
uint32_t CacConfigTable;
uint32_t CacStatusTable;
-
uint32_t mcRegisterTable;
-
uint32_t mcArbDramTimingTable;
-
-
-
uint32_t PmFuseTable;
uint32_t Globals;
uint32_t ClockStretcherTable;
uint32_t VftTable;
- uint32_t Reserved[21];
+ uint32_t Reserved1;
+ uint32_t AvfsTable;
+ uint32_t AvfsCksOffGbvTable;
+ uint32_t AvfsMeanNSigma;
+ uint32_t AvfsSclkOffsetTable;
+ uint32_t Reserved[16];
uint32_t Signature;
};
struct SMU_ClockStretcherDataTableEntry {
uint8_t minVID;
uint8_t maxVID;
-
-
uint16_t setting;
};
typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
typedef struct VFT_TABLE_t VFT_TABLE_t;
+/* Total margin, root mean square of Fmax + DC + Platform */
+struct AVFS_Margin_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+ int32_t a0;
+ int32_t a1;
+ int32_t a2;
+ uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct AVFS_CksOff_Gbv_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_meanNsigma_t {
+ uint32_t Aconstant[3];
+ uint16_t DC_tol_sigma;
+ uint16_t Platform_mean;
+ uint16_t Platform_sigma;
+ uint16_t PSM_Age_CompFactor;
+ uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+ uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
#endif
typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
+struct SMU_QuadraticCoeffs {
+ int32_t m1;
+ uint32_t b;
+
+ int16_t m2;
+ uint8_t m1_shift;
+ uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
struct SMU74_Discrete_DpmTable {
SMU74_PIDController GraphicsPIDController;
uint8_t ThermOutPolarity;
uint8_t ThermOutMode;
uint8_t BootPhases;
- uint32_t Reserved[4];
+
+ uint8_t VRHotLevel;
+ uint8_t LdoRefSel;
+ uint8_t Reserved1[2];
+ uint16_t FanStartTemperature;
+ uint16_t FanStopTemperature;
+ uint16_t MaxVoltage;
+ uint16_t Reserved2;
+ uint32_t Reserved[1];
SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
SMU74_Discrete_MemoryLevel MemoryACPILevel;
uint32_t CurrSclkPllRange;
sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
+ GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+ SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
};
typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
-struct SMU_QuadraticCoeffs {
- int32_t m1;
- uint32_t b;
-
- int16_t m2;
- uint8_t m1_shift;
- uint8_t m2_shift;
-};
-typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
-
struct SMU74_Discrete_PmFuses {
uint8_t BapmVddCVidHiSidd[8];
uint8_t BapmVddCVidLoSidd[8];
#define DB_PCC_SHIFT 26
#define DB_EDC_SHIFT 27
+#define BTCGB0_Vdroop_Enable_MASK 0x1
+#define BTCGB1_Vdroop_Enable_MASK 0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT 0
+#define BTCGB1_Vdroop_Enable_SHIFT 1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+
#pragma pack(pop)
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
/* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
/* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
- { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } },
- { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } },
- { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } }
+ { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+ { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+ { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
+ { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+ { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
+ { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+ { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
+ { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
};
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
- {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
+ {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
/**
* Set the address for reading/writing the SMC SRAM space.
&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ uint32_t efuse;
+
+ efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse &= 0x00000001;
+ if (efuse)
+ return true;
+
+ return false;
+}
+
/**
* Send a message to the SMC, and wait for its response.
*
*/
int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
+ int ret;
+
if (!polaris10_is_smc_ram_running(smumgr))
return -1;
+
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Previous Message.\n");
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ if (ret != 1)
+ printk("\n failed to send pre message %x ret is %d \n", msg, ret);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Message.\n");
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send message %x ret is %d \n", msg, ret);
return 0;
}
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
(cgs_handle_t)smu_data->smu_buffer.handle);
return -1;);
+ if (polaris10_is_hw_avfs_present(smumgr))
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+ else
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
return 0;
}
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
handle->smu_mgr = smumgr;
switch (smumgr->chip_family) {
- case AMD_FAMILY_CZ:
+ case AMDGPU_FAMILY_CZ:
cz_smum_init(smumgr);
break;
- case AMD_FAMILY_VI:
+ case AMDGPU_FAMILY_VI:
switch (smumgr->chip_id) {
case CHIP_TONGA:
tonga_smum_init(smumgr);
int smum_fini(struct pp_smumgr *smumgr)
{
+ kfree(smumgr->device);
kfree(smumgr);
return 0;
}
static int tonga_smu_fini(struct pp_smumgr *smumgr)
{
+ struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
TP_fast_assign(
__entry->entity = sched_job->s_entity;
__entry->sched_job = sched_job;
- __entry->fence = &sched_job->s_fence->base;
+ __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
),
TP_fast_assign(
- __entry->fence = &fence->base;
+ __entry->fence = &fence->finished;
),
TP_printk("fence=%p signaled", __entry->fence)
);
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(1);
+ entity->fence_context = fence_context_alloc(2);
return 0;
}
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
- /* Fence is from the same scheduler */
- if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
- /* Ignore it when it is already scheduled */
- fence_put(entity->dependency);
- return false;
- }
- /* Wait for fence to be scheduled */
- entity->cb.func = amd_sched_entity_clear_dep;
- list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
- return true;
+ /*
+ * Fence is from the same scheduler, only need to wait for
+ * it to be scheduled
+ */
+ fence = fence_get(&s_fence->scheduled);
+ fence_put(entity->dependency);
+ entity->dependency = fence;
+ if (!fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
+ return true;
+
+ /* Ignore it when it is already scheduled */
+ fence_put(fence);
+ return false;
}
if (!fence_add_callback(entity->dependency, &entity->cb,
return added;
}
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
- schedule_work(&job->work_free_job);
-}
-
/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *next;
+ struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
+ /* remove job from ring_mirror_list */
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- if (cancel_delayed_work(&s_job->work_tdr))
- amd_sched_job_put(s_job);
+ struct amd_sched_job *next;
+
+ spin_unlock(&sched->job_list_lock);
+ cancel_delayed_work_sync(&s_job->work_tdr);
+ spin_lock(&sched->job_list_lock);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (next) {
- INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(next);
+ if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
}
+ spin_unlock(&sched->job_list_lock);
+ sched->ops->free_job(s_job);
}
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ finish_cb);
+ schedule_work(&job->finish_work);
+}
+
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
+ spin_lock(&sched->job_list_lock);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
- {
- INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(s_job);
+ list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node) == s_job)
+ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ spin_unlock(&sched->job_list_lock);
+}
+
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+ struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ work_tdr.work);
+
+ job->sched->ops->timedout_job(job);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job;
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ fence_put(s_job->s_fence->parent);
+ s_job->s_fence->parent = NULL;
+ }
+ }
+ spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job;
+ int r;
+
+ spin_lock(&sched->job_list_lock);
+ s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node);
+ if (s_job)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct fence *fence = sched->ops->run_job(s_job);
+ if (fence) {
+ s_fence->parent = fence_get(fence);
+ r = fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
+ if (r == -ENOENT)
+ amd_sched_process_job(fence, &s_fence->cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
+ fence_put(fence);
+ } else {
+ DRM_ERROR("Failed to run job!\n");
+ amd_sched_process_job(NULL, &s_fence->cb);
+ }
}
+ spin_unlock(&sched->job_list_lock);
}
/**
{
struct amd_sched_entity *entity = sched_job->s_entity;
- sched_job->use_sched = 1;
- fence_add_callback(&sched_job->s_fence->base,
- &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
+ fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref *refcount),
- void *owner, struct fence **fence)
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner)
{
- INIT_LIST_HEAD(&job->node);
- kref_init(&job->refcount);
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->s_fence->s_job = job;
- job->timeout_callback = timeout_cb;
- job->free_callback = free_cb;
+ INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_LIST_HEAD(&job->node);
+ INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
- if (fence)
- *fence = &job->s_fence->base;
return 0;
}
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
- unsigned long flags;
atomic_dec(&sched->hw_rq_count);
-
- /* remove job from ring_mirror_list */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_del_init(&s_fence->s_job->node);
- sched->ops->finish_job(s_fence->s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- amd_sched_fence_signal(s_fence);
+ amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->base);
+ fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+ if (kthread_should_park()) {
+ kthread_parkme();
+ return true;
+ }
+
+ return false;
+}
+
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity;
+ struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (entity = amd_sched_select_entity(sched)) ||
- kthread_should_stop());
+ (!amd_sched_blocked(sched) &&
+ (entity = amd_sched_select_entity(sched))) ||
+ kthread_should_stop());
if (!entity)
continue;
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_pre_schedule(sched, sched_job);
+ amd_sched_job_begin(sched_job);
+
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
+ s_fence->parent = fence_get(fence);
r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
#include <linux/kfifo.h>
#include <linux/fence.h>
-#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
-
struct amd_gpu_scheduler;
struct amd_sched_rq;
};
struct amd_sched_fence {
- struct fence base;
+ struct fence scheduled;
+ struct fence finished;
struct fence_cb cb;
- struct list_head scheduled_cb;
+ struct fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
- struct amd_sched_job *s_job;
};
struct amd_sched_job {
- struct kref refcount;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- bool use_sched; /* true if the job goes to scheduler */
- struct fence_cb cb_free_job;
- struct work_struct work_free_job;
- struct list_head node;
- struct delayed_work work_tdr;
- void (*timeout_callback) (struct work_struct *work);
- void (*free_callback)(struct kref *refcount);
+ struct fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops;
+extern const struct fence_ops amd_sched_fence_ops_scheduled;
+extern const struct fence_ops amd_sched_fence_ops_finished;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{
- struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+ if (f->ops == &amd_sched_fence_ops_scheduled)
+ return container_of(f, struct amd_sched_fence, scheduled);
- if (__f->base.ops == &amd_sched_fence_ops)
- return __f;
+ if (f->ops == &amd_sched_fence_ops_finished)
+ return container_of(f, struct amd_sched_fence, finished);
return NULL;
}
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*begin_job)(struct amd_sched_job *sched_job);
- void (*finish_job)(struct amd_sched_job *sched_job);
+ void (*timedout_job)(struct amd_sched_job *sched_job);
+ void (*free_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_signal(struct amd_sched_fence *fence);
+void amd_sched_fence_finished(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref* refcount),
- void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
- if (job)
- kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
- if (job)
- kref_put(&job->refcount, job->free_callback);
-}
-
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner);
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
#endif
#include <drm/drmP.h>
#include "gpu_scheduler.h"
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+ void *owner)
{
struct amd_sched_fence *fence = NULL;
unsigned seq;
if (fence == NULL)
return NULL;
- INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner;
- fence->sched = s_entity->sched;
+ fence->sched = entity->sched;
spin_lock_init(&fence->lock);
- seq = atomic_inc_return(&s_entity->fence_seq);
- fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
- s_entity->fence_context, seq);
+ seq = atomic_inc_return(&entity->fence_seq);
+ fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
-void amd_sched_fence_signal(struct amd_sched_fence *fence)
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->base);
+ int ret = fence_signal(&fence->scheduled);
+
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-}
-
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job)
-{
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
- sched->ops->begin_job(s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ FENCE_TRACE(&fence->scheduled, "was already signaled\n");
}
-void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ int ret = fence_signal(&fence->finished);
- set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
- list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
- list_del_init(&cur->node);
- cur->func(&s_fence->base, cur);
- }
+ if (!ret)
+ FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ else
+ FENCE_TRACE(&fence->finished, "was already signaled\n");
}
static const char *amd_sched_fence_get_driver_name(struct fence *fence)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct fence *f)
{
- call_rcu(&f->rcu, amd_sched_fence_free);
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ call_rcu(&fence->finished.rcu, amd_sched_fence_free);
}
-const struct fence_ops amd_sched_fence_ops = {
+/**
+ * amd_sched_fence_release_scheduled - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void amd_sched_fence_release_finished(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(&fence->scheduled);
+}
+
+const struct fence_ops amd_sched_fence_ops_scheduled = {
+ .get_driver_name = amd_sched_fence_get_driver_name,
+ .get_timeline_name = amd_sched_fence_get_timeline_name,
+ .enable_signaling = amd_sched_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = amd_sched_fence_release_scheduled,
+};
+
+const struct fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
- .release = amd_sched_fence_release,
+ .release = amd_sched_fence_release_finished,
};
tristate "ARC PGU"
depends on DRM && OF
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
{
struct arcpgu_drm_private *arcpgu = dev->dev_private;
- if (arcpgu->fbdev)
- drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
+ drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
-int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(arcpgu->regs)) {
- dev_err(drm->dev, "Could not remap IO mem\n");
+ if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
- }
dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
arc_pgu_read(arcpgu, ARCPGU_REG_ID));
encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+ of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
- ret = arcpgu_drm_sim_init(drm, 0);
+ ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
}
return 0;
}
-int arcpgu_unload(struct drm_device *drm)
+static int arcpgu_unload(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
depends on COMMON_CLK
select DRM_ARM
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
{
struct hdlcd_drm_private *hdlcd = drm->dev_private;
- if (hdlcd->fbdev)
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
+ drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
if (stat & VSYNC_IRQ)
- drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_handle_vblank(&dcrtc->crtc);
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
tristate "AST server chips"
depends on DRM && PCI
select DRM_TTM
- select FB_SYS_COPYAREA
- select FB_SYS_FILLRECT
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable
static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
- if (ast_fb->obj)
- drm_gem_object_unreference_unlocked(ast_fb->obj);
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
{
}
-static int ast_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void ast_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
.evict_flags = ast_bo_evict_flags,
- .move = ast_bo_move,
+ .move = NULL,
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
{
- drm_handle_vblank(c->dev, 0);
+ drm_crtc_handle_vblank(c);
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
if (!ret)
ret = atmel_hlcdc_check_endpoint(dev, &ep);
- of_node_put(ep_np);
- if (ret)
+ if (ret) {
+ of_node_put(ep_np);
return ret;
+ }
}
for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
if (!ret)
ret = atmel_hlcdc_attach_endpoint(dev, &ep);
- of_node_put(ep_np);
- if (ret)
+ if (ret) {
+ of_node_put(ep_np);
return ret;
+ }
}
return 0;
atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
factor_reg);
+ } else {
+ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
}
}
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_TTM
help
Choose this option for qemu.
{
}
-static int bochs_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-}
-
-
static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
.evict_flags = bochs_bo_evict_flags,
- .move = bochs_bo_move,
+ .move = NULL,
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
- if (bochs_fb->obj)
- drm_gem_object_unreference_unlocked(bochs_fb->obj);
+
+ drm_gem_object_unreference_unlocked(bochs_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
---help---
Silicon Image sii902x bridge chip driver.
+config DRM_TOSHIBA_TC358767
+ tristate "Toshiba TC358767 eDP bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ ---help---
+ Toshiba TC358767 eDP bridge chip driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
+source "drivers/gpu/drm/bridge/adv7511/Kconfig"
+
endmenu
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
--- /dev/null
+config DRM_I2C_ADV7511
+ tristate "AV7511 encoder"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
+config DRM_I2C_ADV7533
+ bool "ADV7533 encoder"
+ depends on DRM_I2C_ADV7511
+ select DRM_MIPI_DSI
+ default y
+ help
+ Support for the Analog Devices ADV7533 DSI to HDMI encoder.
--- /dev/null
+adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
--- /dev/null
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define ADV7511_REG_CHIP_REVISION 0x00
+#define ADV7511_REG_N0 0x01
+#define ADV7511_REG_N1 0x02
+#define ADV7511_REG_N2 0x03
+#define ADV7511_REG_SPDIF_FREQ 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1 0x05
+#define ADV7511_REG_CTS_AUTOMATIC2 0x06
+#define ADV7511_REG_CTS_MANUAL0 0x07
+#define ADV7511_REG_CTS_MANUAL1 0x08
+#define ADV7511_REG_CTS_MANUAL2 0x09
+#define ADV7511_REG_AUDIO_SOURCE 0x0a
+#define ADV7511_REG_AUDIO_CONFIG 0x0b
+#define ADV7511_REG_I2S_CONFIG 0x0c
+#define ADV7511_REG_I2S_WIDTH 0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
+#define ADV7511_REG_AUDIO_CFG1 0x12
+#define ADV7511_REG_AUDIO_CFG2 0x13
+#define ADV7511_REG_AUDIO_CFG3 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
+#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION 0x3b
+#define ADV7511_REG_VIC_MANUAL 0x3c
+#define ADV7511_REG_VIC_SEND 0x3d
+#define ADV7511_REG_VIC_DETECTED 0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
+#define ADV7511_REG_PACKET_ENABLE0 0x40
+#define ADV7511_REG_POWER 0x41
+#define ADV7511_REG_STATUS 0x42
+#define ADV7511_REG_EDID_I2C_ADDR 0x43
+#define ADV7511_REG_PACKET_ENABLE1 0x44
+#define ADV7511_REG_PACKET_I2C_ADDR 0x45
+#define ADV7511_REG_DSD_ENABLE 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
+#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
+#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
+#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
+#define ADV7511_REG_INT(x) (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV 0x9d
+#define ADV7511_REG_PLL_STATUS 0x9e
+#define ADV7511_REG_HDMI_POWER 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
+#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS 0xb8
+#define ADV7511_REG_BCAPS 0xbe
+#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT 0xc4
+#define ADV7511_REG_DDC_STATUS 0xc8
+#define ADV7511_REG_EDID_READ_CTRL 0xc9
+#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
+#define ADV7511_REG_POWER2 0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV 0xde
+#define ADV7511_REG_ARC_CTRL 0xdf
+#define ADV7511_REG_CEC_I2C_ADDR 0xe1
+#define ADV7511_REG_CEC_CTRL 0xe2
+#define ADV7511_REG_CHIP_ID_HIGH 0xf5
+#define ADV7511_REG_CHIP_ID_LOW 0xf6
+
+#define ADV7511_CSC_ENABLE BIT(7)
+#define ADV7511_CSC_UPDATE_MODE BIT(5)
+
+#define ADV7511_INT0_HPD BIT(7)
+#define ADV7511_INT0_VSYNC BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
+#define ADV7511_INT0_EDID_READY BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR BIT(7)
+#define ADV7511_INT1_BKSV BIT(6)
+#define ADV7511_INT1_CEC_TX_READY BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK 0x2
+#define ADV7511_HDMI_CFG_MODE_DVI 0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
+
+#define ADV7511_AUDIO_SELECT_I2C 0x0
+#define ADV7511_AUDIO_SELECT_SPDIF 0x1
+#define ADV7511_AUDIO_SELECT_DSD 0x2
+#define ADV7511_AUDIO_SELECT_HBR 0x3
+#define ADV7511_AUDIO_SELECT_DST 0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16 0x2
+#define ADV7511_I2S_SAMPLE_LEN_20 0x3
+#define ADV7511_I2S_SAMPLE_LEN_18 0x4
+#define ADV7511_I2S_SAMPLE_LEN_22 0x5
+#define ADV7511_I2S_SAMPLE_LEN_19 0x8
+#define ADV7511_I2S_SAMPLE_LEN_23 0x9
+#define ADV7511_I2S_SAMPLE_LEN_24 0xb
+#define ADV7511_I2S_SAMPLE_LEN_17 0xc
+#define ADV7511_I2S_SAMPLE_LEN_21 0xd
+
+#define ADV7511_SAMPLE_FREQ_44100 0x0
+#define ADV7511_SAMPLE_FREQ_48000 0x2
+#define ADV7511_SAMPLE_FREQ_32000 0x3
+#define ADV7511_SAMPLE_FREQ_88200 0x8
+#define ADV7511_SAMPLE_FREQ_96000 0xa
+#define ADV7511_SAMPLE_FREQ_176400 0xc
+#define ADV7511_SAMPLE_FREQ_192000 0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
+#define ADV7511_STATUS_HPD BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
+#define ADV7511_PACKET_ENABLE_GM BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+
+#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S 0
+#define ADV7511_AUDIO_SOURCE_SPDIF 1
+
+#define ADV7511_I2S_FORMAT_I2S 0
+#define ADV7511_I2S_FORMAT_RIGHT_J 1
+#define ADV7511_I2S_FORMAT_LEFT_J 2
+
+#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+ ADV7511_INPUT_CLOCK_1X,
+ ADV7511_INPUT_CLOCK_2X,
+ ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+ ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+ ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+ ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+ ADV7511_INPUT_SYNC_PULSE_DE = 0,
+ ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+ ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+ ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
+ * the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+ ADV7511_SYNC_POLARITY_PASSTHROUGH,
+ ADV7511_SYNC_POLARITY_LOW,
+ ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth: Number of bits per color component (8, 10 or 12)
+ * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock: The input video clock style (1x, 2x, DDR)
+ * @input_style: The input component arrangement variant
+ * @input_justification: Video input format bit justification
+ * @clock_delay: Clock delay for the input clock (in ps)
+ * @embedded_sync: Video input uses BT.656-style embedded sync
+ * @sync_pulse: Select the sync pulse
+ * @vsync_polarity: vsync input signal configuration
+ * @hsync_polarity: hsync input signal configuration
+ */
+struct adv7511_link_config {
+ unsigned int input_color_depth;
+ enum hdmi_colorspace input_colorspace;
+ enum adv7511_input_clock input_clock;
+ unsigned int input_style;
+ enum adv7511_input_justification input_justification;
+
+ int clock_delay;
+
+ bool embedded_sync;
+ enum adv7511_input_sync_pulse sync_pulse;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+ ADV7511_CSC_SCALING_1 = 0,
+ ADV7511_CSC_SCALING_2 = 1,
+ ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable: Whether to enable color space conversion
+ * @csc_scaling_factor: Color space conversion scaling factor
+ * @csc_coefficents: Color space conversion coefficents
+ * @hdmi_mode: Whether to use HDMI or DVI output mode
+ * @avi_infoframe: HDMI infoframe
+ */
+struct adv7511_video_config {
+ bool csc_enable;
+ enum adv7511_csc_scaling csc_scaling_factor;
+ const uint16_t *csc_coefficents;
+
+ bool hdmi_mode;
+ struct hdmi_avi_infoframe avi_infoframe;
+};
+
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+ bool use_timing_gen;
+
+ enum adv7511_type type;
+};
+
+#ifdef CONFIG_DRM_I2C_ADV7533
+void adv7533_dsi_power_on(struct adv7511 *adv);
+void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
+int adv7533_patch_registers(struct adv7511 *adv);
+void adv7533_uninit_cec(struct adv7511 *adv);
+int adv7533_init_cec(struct adv7511 *adv);
+int adv7533_attach_dsi(struct adv7511 *adv);
+void adv7533_detach_dsi(struct adv7511 *adv);
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
+#else
+static inline void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_mode_set(struct adv7511 *adv,
+ struct drm_display_mode *mode)
+{
+}
+
+static inline int adv7533_patch_registers(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline void adv7533_uninit_cec(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_init_cec(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline void adv7533_detach_dsi(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __DRM_I2C_ADV7511_H__ */
--- /dev/null
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+
+#include "adv7511.h"
+
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7511_fixed_registers[] = {
+ { 0x98, 0x03 },
+ { 0x9a, 0xe0 },
+ { 0x9c, 0x30 },
+ { 0x9d, 0x61 },
+ { 0xa2, 0xa4 },
+ { 0xa3, 0xa4 },
+ { 0xe0, 0xd0 },
+ { 0xf9, 0x00 },
+ { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+ 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+ 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+ 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+ 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+ 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+ 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+ 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+ 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+ 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+ 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+ 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+ 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+ 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADV7511_REG_CHIP_REVISION:
+ case ADV7511_REG_SPDIF_FREQ:
+ case ADV7511_REG_CTS_AUTOMATIC1:
+ case ADV7511_REG_CTS_AUTOMATIC2:
+ case ADV7511_REG_VIC_DETECTED:
+ case ADV7511_REG_VIC_SEND:
+ case ADV7511_REG_AUX_VIC_DETECTED:
+ case ADV7511_REG_STATUS:
+ case ADV7511_REG_GC(1):
+ case ADV7511_REG_INT(0):
+ case ADV7511_REG_INT(1):
+ case ADV7511_REG_PLL_STATUS:
+ case ADV7511_REG_AN(0):
+ case ADV7511_REG_AN(1):
+ case ADV7511_REG_AN(2):
+ case ADV7511_REG_AN(3):
+ case ADV7511_REG_AN(4):
+ case ADV7511_REG_AN(5):
+ case ADV7511_REG_AN(6):
+ case ADV7511_REG_AN(7):
+ case ADV7511_REG_HDCP_STATUS:
+ case ADV7511_REG_BCAPS:
+ case ADV7511_REG_BKSV(0):
+ case ADV7511_REG_BKSV(1):
+ case ADV7511_REG_BKSV(2):
+ case ADV7511_REG_BKSV(3):
+ case ADV7511_REG_BKSV(4):
+ case ADV7511_REG_DDC_STATUS:
+ case ADV7511_REG_EDID_READ_CTRL:
+ case ADV7511_REG_BSTATUS(0):
+ case ADV7511_REG_BSTATUS(1):
+ case ADV7511_REG_CHIP_ID_HIGH:
+ case ADV7511_REG_CHIP_ID_LOW:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults_raw = adv7511_register_defaults,
+ .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+ .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+ const uint16_t *coeff,
+ unsigned int scaling_factor)
+{
+ unsigned int i;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+ if (enable) {
+ for (i = 0; i < 12; ++i) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_CSC_UPPER(i),
+ 0x1f, coeff[i] >> 8);
+ regmap_write(adv7511->regmap,
+ ADV7511_REG_CSC_LOWER(i),
+ coeff[i] & 0xff);
+ }
+ }
+
+ if (enable)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0xe0, 0x80 | (scaling_factor << 5));
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0x80, 0x00);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0xff);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0xff);
+ }
+
+ return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0x00);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0x00);
+ }
+
+ return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+ 0x0734, 0x04ad, 0x0000, 0x1c1b,
+ 0x1ddc, 0x04ad, 0x1f24, 0x0135,
+ 0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+ struct drm_connector *connector,
+ bool rgb)
+{
+ struct adv7511_video_config config;
+ bool output_format_422, output_format_ycbcr;
+ unsigned int mode;
+ uint8_t infoframe[17];
+
+ if (adv7511->edid)
+ config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+ else
+ config.hdmi_mode = false;
+
+ hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+ config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ if (rgb) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ } else {
+ config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+ config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+ if ((connector->display_info.color_formats &
+ DRM_COLOR_FORMAT_YCRCB422) &&
+ config.hdmi_mode) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace =
+ HDMI_COLORSPACE_YUV422;
+ } else {
+ config.csc_enable = true;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ }
+ }
+
+ if (config.hdmi_mode) {
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+ switch (config.avi_infoframe.colorspace) {
+ case HDMI_COLORSPACE_YUV444:
+ output_format_422 = false;
+ output_format_ycbcr = true;
+ break;
+ case HDMI_COLORSPACE_YUV422:
+ output_format_422 = true;
+ output_format_ycbcr = true;
+ break;
+ default:
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ break;
+ }
+ } else {
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ }
+
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+ adv7511_set_colormap(adv7511, config.csc_enable,
+ config.csc_coefficents,
+ config.csc_scaling_factor);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+ (output_format_422 << 7) | output_format_ycbcr);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+ ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+ hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+ sizeof(infoframe));
+
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ infoframe + 1, sizeof(infoframe) - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+ const struct adv7511_link_config *config)
+{
+ /*
+ * The input style values documented in the datasheet don't match the
+ * hardware register field values :-(
+ */
+ static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+ unsigned int clock_delay;
+ unsigned int color_depth;
+ unsigned int input_id;
+
+ clock_delay = (config->clock_delay + 1200) / 400;
+ color_depth = config->input_color_depth == 8 ? 3
+ : (config->input_color_depth == 10 ? 1 : 2);
+
+ /* TODO Support input ID 6 */
+ if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+ input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+ ? 5 : 0;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+ input_id = config->embedded_sync ? 8 : 7;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+ input_id = config->embedded_sync ? 4 : 3;
+ else
+ input_id = config->embedded_sync ? 2 : 1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+ input_id);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+ (color_depth << 4) |
+ (input_styles[config->input_style] << 2));
+ regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+ config->input_justification << 3);
+ regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+ config->sync_pulse << 2);
+
+ regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+ adv7511->embedded_sync = config->embedded_sync;
+ adv7511->hsync_polarity = config->hsync_polarity;
+ adv7511->vsync_polarity = config->vsync_polarity;
+ adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ adv7511->current_edid_segment = -1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
+
+ /*
+ * Per spec it is allowed to pulse the HPD signal to indicate that the
+ * EDID information has changed. Some monitors do this when they wakeup
+ * from standby or are enabled. When the HPD goes low the adv7511 is
+ * reset and the outputs are disabled which might cause the monitor to
+ * go to standby again. To avoid this we ignore the HPD pin for the
+ * first few seconds after enabling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
+
+ /*
+ * Most of the registers are reset during power down or when HPD is low.
+ */
+ regcache_sync(adv7511->regmap);
+
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_on(adv7511);
+
+ adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_off(adv7511);
+
+ adv7511->powered = false;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+ unsigned int irq0;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return false;
+
+ if (irq0 & ADV7511_INT0_HPD) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_HPD);
+ return true;
+ }
+
+ return false;
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
+{
+ unsigned int irq0, irq1;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
+
+ if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
+ drm_helper_hpd_irq_event(adv7511->connector.dev);
+
+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+ adv7511->edid_read = true;
+
+ if (adv7511->i2c_main->irq)
+ wake_up_all(&adv7511->wq);
+ }
+
+ return 0;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+ int ret;
+
+ ret = adv7511_irq_process(adv7511, true);
+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+{
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+ adv7511->edid_read, msecs_to_jiffies(timeout));
+ } else {
+ for (; timeout > 0; timeout -= 25) {
+ ret = adv7511_irq_process(adv7511, false);
+ if (ret < 0)
+ break;
+
+ if (adv7511->edid_read)
+ break;
+
+ msleep(25);
+ }
+ }
+
+ return adv7511->edid_read ? 0 : -EIO;
+}
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+{
+ struct adv7511 *adv7511 = data;
+ struct i2c_msg xfer[2];
+ uint8_t offset;
+ unsigned int i;
+ int ret;
+
+ if (len > 128)
+ return -EINVAL;
+
+ if (adv7511->current_edid_segment != block / 2) {
+ unsigned int status;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if (status != 2) {
+ adv7511->edid_read = false;
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+ ret = adv7511_wait_for_edid(adv7511, 200);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+
+ xfer[0].addr = adv7511->i2c_edid->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &offset;
+ xfer[1].addr = adv7511->i2c_edid->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 64;
+ xfer[1].buf = adv7511->edid_buf;
+
+ offset = 0;
+
+ for (i = 0; i < 4; ++i) {
+ ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+ ARRAY_SIZE(xfer));
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+
+ xfer[1].buf += 64;
+ offset += 64;
+ }
+
+ adv7511->current_edid_segment = block / 2;
+ }
+
+ if (block % 2 == 0)
+ memcpy(buf, adv7511->edid_buf, len);
+ else
+ memcpy(buf, adv7511->edid_buf + 128, len);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * ADV75xx helpers
+ */
+
+static int adv7511_get_modes(struct adv7511 *adv7511,
+ struct drm_connector *connector)
+{
+ struct edid *edid;
+ unsigned int count;
+
+ /* Reading the EDID only works if the device is powered */
+ if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
+ adv7511->current_edid_segment = -1;
+ }
+
+ edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+ if (!adv7511->powered)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+
+ kfree(adv7511->edid);
+ adv7511->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+ return count;
+}
+
+static enum drm_connector_status
+adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+{
+ enum drm_connector_status status;
+ unsigned int val;
+ bool hpd;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ return connector_status_disconnected;
+
+ if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ hpd = adv7511_hpd(adv7511);
+
+ /* The chip resets itself when the cable is disconnected, so in case
+ * there is a pending HPD interrupt and the cable is connected there was
+ * at least one transition from disconnected to connected and the chip
+ * has to be reinitialized. */
+ if (status == connector_status_connected && hpd && adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ adv7511_get_modes(adv7511, connector);
+ if (adv7511->status == connector_status_connected)
+ status = connector_status_disconnected;
+ } else {
+ /* Renable HPD sensing */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_BOTH);
+ }
+
+ adv7511->status = status;
+ return status;
+}
+
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void adv7511_mode_set(struct adv7511 *adv7511,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ unsigned int low_refresh_rate;
+ unsigned int hsync_polarity = 0;
+ unsigned int vsync_polarity = 0;
+
+ if (adv7511->embedded_sync) {
+ unsigned int hsync_offset, hsync_len;
+ unsigned int vsync_offset, vsync_len;
+
+ hsync_offset = adj_mode->crtc_hsync_start -
+ adj_mode->crtc_hdisplay;
+ vsync_offset = adj_mode->crtc_vsync_start -
+ adj_mode->crtc_vdisplay;
+ hsync_len = adj_mode->crtc_hsync_end -
+ adj_mode->crtc_hsync_start;
+ vsync_len = adj_mode->crtc_vsync_end -
+ adj_mode->crtc_vsync_start;
+
+ /* The hardware vsync generator has a off-by-one bug */
+ vsync_offset += 1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+ ((hsync_offset >> 10) & 0x7) << 5);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+ (hsync_offset >> 2) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+ ((hsync_offset & 0x3) << 6) |
+ ((hsync_len >> 4) & 0x3f));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+ ((hsync_len & 0xf) << 4) |
+ ((vsync_offset >> 6) & 0xf));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+ ((vsync_offset & 0x3f) << 2) |
+ ((vsync_len >> 8) & 0x3));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+ vsync_len & 0xff);
+
+ hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+ } else {
+ enum adv7511_sync_polarity mode_hsync_polarity;
+ enum adv7511_sync_polarity mode_vsync_polarity;
+
+ /**
+ * If the input signal is always low or always high we want to
+ * invert or let it passthrough depending on the polarity of the
+ * current mode.
+ **/
+ if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adv7511->hsync_polarity != mode_hsync_polarity &&
+ adv7511->hsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ hsync_polarity = 1;
+
+ if (adv7511->vsync_polarity != mode_vsync_polarity &&
+ adv7511->vsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ vsync_polarity = 1;
+ }
+
+ if (mode->vrefresh <= 24000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+ else if (mode->vrefresh <= 25000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+ else if (mode->vrefresh <= 30000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+ else
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ regmap_update_bits(adv7511->regmap, 0x17,
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+ if (adv7511->type == ADV7533)
+ adv7533_mode_set(adv7511, adj_mode);
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
+ /*
+ * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+ * supposed to give better results.
+ */
+
+ adv7511->f_tmds = mode->clock;
+}
+
+/* Connector funcs */
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+static int adv7511_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static enum drm_mode_status
+adv7511_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
+ .get_modes = adv7511_connector_get_modes,
+ .mode_valid = adv7511_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7511_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7511_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7511_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7511_bridge_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7511_bridge_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_off(adv);
+}
+
+static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7511_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7511_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7511_connector_helper_funcs);
+ drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ if (adv->type == ADV7533)
+ ret = adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7511_bridge_funcs = {
+ .enable = adv7511_bridge_enable,
+ .disable = adv7511_bridge_disable,
+ .mode_set = adv7511_bridge_mode_set,
+ .attach = adv7511_bridge_attach,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+ struct adv7511_link_config *config)
+{
+ const char *str;
+ int ret;
+
+ of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+ if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+ config->input_color_depth != 12)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-colorspace", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "rgb"))
+ config->input_colorspace = HDMI_COLORSPACE_RGB;
+ else if (!strcmp(str, "yuv422"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV422;
+ else if (!strcmp(str, "yuv444"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV444;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-clock", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "1x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_1X;
+ else if (!strcmp(str, "2x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_2X;
+ else if (!strcmp(str, "ddr"))
+ config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+ else
+ return -EINVAL;
+
+ if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+ config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+ ret = of_property_read_u32(np, "adi,input-style",
+ &config->input_style);
+ if (ret)
+ return ret;
+
+ if (config->input_style < 1 || config->input_style > 3)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-justification",
+ &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "left"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_LEFT;
+ else if (!strcmp(str, "evenly"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_EVENLY;
+ else if (!strcmp(str, "right"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_RIGHT;
+ else
+ return -EINVAL;
+
+ } else {
+ config->input_style = 1;
+ config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+ }
+
+ of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+ if (config->clock_delay < -1200 || config->clock_delay > 1600)
+ return -EINVAL;
+
+ config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+ /* Hardcode the sync pulse configurations for now. */
+ config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+ config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+ config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+ return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct adv7511_link_config link_config;
+ struct adv7511 *adv7511;
+ struct device *dev = &i2c->dev;
+ unsigned int val;
+ int ret;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+ if (!adv7511)
+ return -ENOMEM;
+
+ adv7511->powered = false;
+ adv7511->status = connector_status_disconnected;
+
+ if (dev->of_node)
+ adv7511->type = (enum adv7511_type)of_device_get_match_data(dev);
+ else
+ adv7511->type = id->driver_data;
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
+ if (ret)
+ return ret;
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+ * inactive to wake up the encoder.
+ */
+ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+ if (IS_ERR(adv7511->gpio_pd))
+ return PTR_ERR(adv7511->gpio_pd);
+
+ if (adv7511->gpio_pd) {
+ mdelay(5);
+ gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+ }
+
+ adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+ if (IS_ERR(adv7511->regmap))
+ return PTR_ERR(adv7511->regmap);
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "Rev. %d\n", val);
+
+ if (adv7511->type == ADV7511)
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ else
+ ret = adv7533_patch_registers(adv7511);
+ if (ret)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+ packet_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+ adv7511_packet_disable(adv7511, 0xffff);
+
+ adv7511->i2c_main = i2c;
+ adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+ if (!adv7511->i2c_edid)
+ return -ENOMEM;
+
+ if (adv7511->type == ADV7533) {
+ ret = adv7533_init_cec(adv7511);
+ if (ret)
+ goto err_i2c_unregister_edid;
+ }
+
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_unregister_cec;
+ }
+
+ /* CEC is unused for now */
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+
+ adv7511_power_off(adv7511);
+
+ i2c_set_clientdata(i2c, adv7511);
+
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ adv7511->bridge.funcs = &adv7511_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7511 bridge\n");
+ goto err_unregister_cec;
+ }
+
+ return 0;
+
+err_unregister_cec:
+ adv7533_uninit_cec(adv7511);
+err_i2c_unregister_edid:
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ if (adv7511->type == ADV7533) {
+ adv7533_detach_dsi(adv7511);
+ adv7533_uninit_cec(adv7511);
+ }
+
+ drm_bridge_remove(&adv7511->bridge);
+
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ kfree(adv7511->edid);
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+ { "adv7533", ADV7533 },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *)ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+ { .compatible = "adi,adv7533", .data = (void *)ADV7533 },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct mipi_dsi_driver adv7533_dsi_driver = {
+ .driver.name = "adv7533",
+};
+
+static struct i2c_driver adv7511_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+};
+
+static int __init adv7511_init(void)
+{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_register(&adv7533_dsi_driver);
+
+ return i2c_add_driver(&adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+ i2c_del_driver(&adv7511_driver);
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_graph.h>
+
+#include "adv7511.h"
+
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+};
+
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ struct drm_display_mode *mode = &adv->curr_mode;
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+
+ if (adv->use_timing_gen)
+ adv7511_dsi_config_timing_gen(adv);
+
+ /* set number of dsi lanes */
+ regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
+
+ if (adv->use_timing_gen) {
+ /* reset internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ } else {
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+ }
+
+ /* enable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv->regmap_cec, 0x55, 0x00);
+
+ regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+}
+
+void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+ /* disable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x0b);
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+}
+
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ int lanes, ret;
+
+ if (adv->num_dsi_lanes != 4)
+ return;
+
+ if (mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "failed to change host lanes\n");
+ }
+}
+
+int adv7533_patch_registers(struct adv7511 *adv)
+{
+ return regmap_register_patch(adv->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+}
+
+void adv7533_uninit_cec(struct adv7511 *adv)
+{
+ i2c_unregister_device(adv->i2c_cec);
+}
+
+static const int cec_i2c_addr = 0x78;
+
+int adv7533_init_cec(struct adv7511 *adv)
+{
+ int ret;
+
+ adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+ if (!adv->i2c_cec)
+ return -ENOMEM;
+
+ adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv->regmap_cec)) {
+ ret = PTR_ERR(adv->regmap_cec);
+ goto err;
+ }
+
+ ret = regmap_register_patch(adv->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ adv7533_uninit_cec(adv);
+ return ret;
+}
+
+int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ struct device *dev = &adv->i2c_main->dev;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ int ret = 0;
+ const struct mipi_dsi_device_info info = { .type = "adv7533",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(adv->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv->dsi = dsi;
+
+ dsi->lanes = adv->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+ return ret;
+}
+
+void adv7533_detach_dsi(struct adv7511 *adv)
+{
+ mipi_dsi_detach(adv->dsi);
+ mipi_dsi_device_unregister(adv->dsi);
+}
+
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint)
+ return -ENODEV;
+
+ adv->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv->host_node) {
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv->host_node);
+
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv->rgb = true;
+ adv->embedded_sync = false;
+
+ return 0;
+}
num_modes += drm_panel_get_modes(dp->plat_data->panel);
if (dp->plat_data->get_modes)
- num_modes += dp->plat_data->get_modes(dp->plat_data);
+ num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
return num_modes;
}
switch (dp->plat_data->dev_type) {
case RK3288_DP:
+ case RK3399_EDP:
/*
* Like Rk3288 DisplayPort TRM indicate that "Main link
* containing 4 physical lanes of 2.7/1.62 Gbps/lane".
};
enum dp_irq_type {
- DP_IRQ_TYPE_HP_CABLE_IN,
- DP_IRQ_TYPE_HP_CABLE_OUT,
- DP_IRQ_TYPE_HP_CHANGE,
- DP_IRQ_TYPE_UNKNOWN,
+ DP_IRQ_TYPE_HP_CABLE_IN = BIT(0),
+ DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1),
+ DP_IRQ_TYPE_HP_CHANGE = BIT(2),
+ DP_IRQ_TYPE_UNKNOWN = BIT(3),
};
struct video_info {
reg = SEL_24M | TX_DVDD_BIT_1_0625V;
writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
- writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+ reg = REF_CLK_24M;
+ if (dp->plat_data->dev_type == RK3288_DP)
+ reg ^= REF_CLK_MASK;
+
+ writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
u32 reg;
u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
phy_pd_addr = ANALOGIX_DP_PD;
switch (block) {
analogix_dp_reset_aux(dp);
/* Disable AUX transaction H/W retry */
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
AUX_HW_RETRY_COUNT_SEL(3) |
AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
#define HSYNC_POLARITY_CFG (0x1 << 0)
/* ANALOGIX_DP_PLL_REG_1 */
-#define REF_CLK_24M (0x1 << 1)
-#define REF_CLK_27M (0x0 << 1)
+#define REF_CLK_24M (0x1 << 0)
+#define REF_CLK_27M (0x0 << 0)
+#define REF_CLK_MASK (0x1 << 0)
/* ANALOGIX_DP_LANE_MAP */
#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
}
static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = dw_hdmi_connector_detect,
- .destroy = dw_hdmi_connector_destroy,
- .force = dw_hdmi_connector_force,
-};
-
-static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
drm_connector_helper_add(&hdmi->connector,
&dw_hdmi_connector_helper_funcs);
- if (drm_core_check_feature(drm, DRIVER_ATOMIC))
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- else
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init(drm, &hdmi->connector,
+ &dw_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
--- /dev/null
+/*
+ * tc358767 eDP bridge driver
+ *
+ * Copyright (C) 2016 CogentEmbedded Inc
+ * Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
+ *
+ * Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+/* Registers */
+
+/* Display Parallel Interface */
+#define DPIPXLFMT 0x0440
+#define VS_POL_ACTIVE_LOW (1 << 10)
+#define HS_POL_ACTIVE_LOW (1 << 9)
+#define DE_POL_ACTIVE_HIGH (0 << 8)
+#define SUB_CFG_TYPE_CONFIG1 (0 << 2) /* LSB aligned */
+#define SUB_CFG_TYPE_CONFIG2 (1 << 2) /* Loosely Packed */
+#define SUB_CFG_TYPE_CONFIG3 (2 << 2) /* LSB aligned 8-bit */
+#define DPI_BPP_RGB888 (0 << 0)
+#define DPI_BPP_RGB666 (1 << 0)
+#define DPI_BPP_RGB565 (2 << 0)
+
+/* Video Path */
+#define VPCTRL0 0x0450
+#define OPXLFMT_RGB666 (0 << 8)
+#define OPXLFMT_RGB888 (1 << 8)
+#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */
+#define FRMSYNC_ENABLED (1 << 4) /* Video Timing Gen Enabled */
+#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */
+#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */
+#define HTIM01 0x0454
+#define HTIM02 0x0458
+#define VTIM01 0x045c
+#define VTIM02 0x0460
+#define VFUEN0 0x0464
+#define VFUEN BIT(0) /* Video Frame Timing Upload */
+
+/* System */
+#define TC_IDREG 0x0500
+#define SYSCTRL 0x0510
+#define DP0_AUDSRC_NO_INPUT (0 << 3)
+#define DP0_AUDSRC_I2S_RX (1 << 3)
+#define DP0_VIDSRC_NO_INPUT (0 << 0)
+#define DP0_VIDSRC_DSI_RX (1 << 0)
+#define DP0_VIDSRC_DPI_RX (2 << 0)
+#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+
+/* Control */
+#define DP0CTL 0x0600
+#define VID_MN_GEN BIT(6) /* Auto-generate M/N values */
+#define EF_EN BIT(5) /* Enable Enhanced Framing */
+#define VID_EN BIT(1) /* Video transmission enable */
+#define DP_EN BIT(0) /* Enable DPTX function */
+
+/* Clocks */
+#define DP0_VIDMNGEN0 0x0610
+#define DP0_VIDMNGEN1 0x0614
+#define DP0_VMNGENSTATUS 0x0618
+
+/* Main Channel */
+#define DP0_SECSAMPLE 0x0640
+#define DP0_VIDSYNCDELAY 0x0644
+#define DP0_TOTALVAL 0x0648
+#define DP0_STARTVAL 0x064c
+#define DP0_ACTIVEVAL 0x0650
+#define DP0_SYNCVAL 0x0654
+#define DP0_MISC 0x0658
+#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define BPC_6 (0 << 5)
+#define BPC_8 (1 << 5)
+
+/* AUX channel */
+#define DP0_AUXCFG0 0x0660
+#define DP0_AUXCFG1 0x0664
+#define AUX_RX_FILTER_EN BIT(16)
+
+#define DP0_AUXADDR 0x0668
+#define DP0_AUXWDATA(i) (0x066c + (i) * 4)
+#define DP0_AUXRDATA(i) (0x067c + (i) * 4)
+#define DP0_AUXSTATUS 0x068c
+#define AUX_STATUS_MASK 0xf0
+#define AUX_STATUS_SHIFT 4
+#define AUX_TIMEOUT BIT(1)
+#define AUX_BUSY BIT(0)
+#define DP0_AUXI2CADR 0x0698
+
+/* Link Training */
+#define DP0_SRCCTRL 0x06a0
+#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
+#define DP0_SRCCTRL_EN810B BIT(12)
+#define DP0_SRCCTRL_NOTP (0 << 8)
+#define DP0_SRCCTRL_TP1 (1 << 8)
+#define DP0_SRCCTRL_TP2 (2 << 8)
+#define DP0_SRCCTRL_LANESKEW BIT(7)
+#define DP0_SRCCTRL_SSCG BIT(3)
+#define DP0_SRCCTRL_LANES_1 (0 << 2)
+#define DP0_SRCCTRL_LANES_2 (1 << 2)
+#define DP0_SRCCTRL_BW27 (1 << 1)
+#define DP0_SRCCTRL_BW162 (0 << 1)
+#define DP0_SRCCTRL_AUTOCORRECT BIT(0)
+#define DP0_LTSTAT 0x06d0
+#define LT_LOOPDONE BIT(13)
+#define LT_STATUS_MASK (0x1f << 8)
+#define LT_CHANNEL1_EQ_BITS (DP_CHANNEL_EQ_BITS << 4)
+#define LT_INTERLANE_ALIGN_DONE BIT(3)
+#define LT_CHANNEL0_EQ_BITS (DP_CHANNEL_EQ_BITS)
+#define DP0_SNKLTCHGREQ 0x06d4
+#define DP0_LTLOOPCTRL 0x06d8
+#define DP0_SNKLTCTRL 0x06e4
+
+/* PHY */
+#define DP_PHY_CTRL 0x0800
+#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
+#define BGREN BIT(25) /* AUX PHY BGR Enable */
+#define PWR_SW_EN BIT(24) /* PHY Power Switch Enable */
+#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
+#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
+#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
+#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
+
+/* PLL */
+#define DP0_PLLCTRL 0x0900
+#define DP1_PLLCTRL 0x0904 /* not defined in DS */
+#define PXL_PLLCTRL 0x0908
+#define PLLUPDATE BIT(2)
+#define PLLBYP BIT(1)
+#define PLLEN BIT(0)
+#define PXL_PLLPARAM 0x0914
+#define IN_SEL_REFCLK (0 << 14)
+#define SYS_PLLPARAM 0x0918
+#define REF_FREQ_38M4 (0 << 8) /* 38.4 MHz */
+#define REF_FREQ_19M2 (1 << 8) /* 19.2 MHz */
+#define REF_FREQ_26M (2 << 8) /* 26 MHz */
+#define REF_FREQ_13M (3 << 8) /* 13 MHz */
+#define SYSCLK_SEL_LSCLK (0 << 4)
+#define LSCLK_DIV_1 (0 << 0)
+#define LSCLK_DIV_2 (1 << 0)
+
+/* Test & Debug */
+#define TSTCTL 0x0a00
+#define PLL_DBG 0x0a04
+
+static bool tc_test_pattern;
+module_param_named(test, tc_test_pattern, bool, 0644);
+
+struct tc_edp_link {
+ struct drm_dp_link base;
+ u8 assr;
+ int scrambler_dis;
+ int spread;
+ int coding8b10b;
+ u8 swing;
+ u8 preemp;
+};
+
+struct tc_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct drm_dp_aux aux;
+
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+
+ /* link settings */
+ struct tc_edp_link link;
+
+ /* display edid */
+ struct edid *edid;
+ /* current mode */
+ struct drm_display_mode *mode;
+
+ u32 rev;
+ u8 assr;
+
+ struct gpio_desc *sd_gpio;
+ struct gpio_desc *reset_gpio;
+ struct clk *refclk;
+};
+
+static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a)
+{
+ return container_of(a, struct tc_data, aux);
+}
+
+static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
+{
+ return container_of(b, struct tc_data, bridge);
+}
+
+static inline struct tc_data *connector_to_tc(struct drm_connector *c)
+{
+ return container_of(c, struct tc_data, connector);
+}
+
+/* Simple macros to avoid repeated error checks */
+#define tc_write(reg, var) \
+ do { \
+ ret = regmap_write(tc->regmap, reg, var); \
+ if (ret) \
+ goto err; \
+ } while (0)
+#define tc_read(reg, var) \
+ do { \
+ ret = regmap_read(tc->regmap, reg, var); \
+ if (ret) \
+ goto err; \
+ } while (0)
+
+static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
+ unsigned int cond_mask,
+ unsigned int cond_value,
+ unsigned long sleep_us, u64 timeout_us)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+ unsigned int val;
+ int ret;
+
+ for (;;) {
+ ret = regmap_read(map, addr, &val);
+ if (ret)
+ break;
+ if ((val & cond_mask) == cond_value)
+ break;
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
+ ret = regmap_read(map, addr, &val);
+ break;
+ }
+ if (sleep_us)
+ usleep_range((sleep_us >> 2) + 1, sleep_us);
+ }
+ return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
+}
+
+static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
+{
+ return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
+ 1000, 1000 * timeout_ms);
+}
+
+static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
+{
+ int ret;
+ u32 value;
+
+ ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
+ if (ret < 0)
+ return ret;
+ if (value & AUX_BUSY) {
+ if (value & AUX_TIMEOUT) {
+ dev_err(tc->dev, "i2c access timeout!\n");
+ return -ETIMEDOUT;
+ }
+ return -EBUSY;
+ }
+
+ *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
+ return 0;
+}
+
+static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct tc_data *tc = aux_to_tc(aux);
+ size_t size = min_t(size_t, 8, msg->size);
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ u8 *buf = msg->buffer;
+ u32 tmp = 0;
+ int i = 0;
+ int ret;
+
+ if (size == 0)
+ return 0;
+
+ ret = tc_aux_wait_busy(tc, 100);
+ if (ret)
+ goto err;
+
+ if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
+ /* Store data */
+ while (i < size) {
+ if (request == DP_AUX_NATIVE_WRITE)
+ tmp = tmp | (buf[i] << (8 * (i & 0x3)));
+ else
+ tmp = (tmp << 8) | buf[i];
+ i++;
+ if (((i % 4) == 0) || (i == size)) {
+ tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tmp = 0;
+ }
+ }
+ } else if (request != DP_AUX_I2C_READ &&
+ request != DP_AUX_NATIVE_READ) {
+ return -EINVAL;
+ }
+
+ /* Store address */
+ tc_write(DP0_AUXADDR, msg->address);
+ /* Start transfer */
+ tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
+
+ ret = tc_aux_wait_busy(tc, 100);
+ if (ret)
+ goto err;
+
+ ret = tc_aux_get_status(tc, &msg->reply);
+ if (ret)
+ goto err;
+
+ if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
+ /* Read data */
+ while (i < size) {
+ if ((i % 4) == 0)
+ tc_read(DP0_AUXRDATA(i >> 2), &tmp);
+ buf[i] = tmp & 0xff;
+ tmp = tmp >> 8;
+ i++;
+ }
+ }
+
+ return size;
+err:
+ return ret;
+}
+
+static const char * const training_pattern1_errors[] = {
+ "No errors",
+ "Aux write error",
+ "Aux read error",
+ "Max voltage reached error",
+ "Loop counter expired error",
+ "res", "res", "res"
+};
+
+static const char * const training_pattern2_errors[] = {
+ "No errors",
+ "Aux write error",
+ "Aux read error",
+ "Clock recovery failed error",
+ "Loop counter expired error",
+ "res", "res", "res"
+};
+
+static u32 tc_srcctrl(struct tc_data *tc)
+{
+ /*
+ * No training pattern, skew lane 1 data by two LSCLK cycles with
+ * respect to lane 0 data, AutoCorrect Mode = 0
+ */
+ u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW;
+
+ if (tc->link.scrambler_dis)
+ reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
+ if (tc->link.coding8b10b)
+ /* Enable 8/10B Encoder (TxData[19:16] not used) */
+ reg |= DP0_SRCCTRL_EN810B;
+ if (tc->link.spread)
+ reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
+ if (tc->link.base.num_lanes == 2)
+ reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
+ if (tc->link.base.rate != 162000)
+ reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
+ return reg;
+}
+
+static void tc_wait_pll_lock(struct tc_data *tc)
+{
+ /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
+ usleep_range(3000, 6000);
+}
+
+static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
+{
+ int ret;
+ int i_pre, best_pre = 1;
+ int i_post, best_post = 1;
+ int div, best_div = 1;
+ int mul, best_mul = 1;
+ int delta, best_delta;
+ int ext_div[] = {1, 2, 3, 5, 7};
+ int best_pixelclock = 0;
+ int vco_hi = 0;
+
+ dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
+ refclk);
+ best_delta = pixelclock;
+ /* Loop over all possible ext_divs, skipping invalid configurations */
+ for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) {
+ /*
+ * refclk / ext_pre_div should be in the 1 to 200 MHz range.
+ * We don't allow any refclk > 200 MHz, only check lower bounds.
+ */
+ if (refclk / ext_div[i_pre] < 1000000)
+ continue;
+ for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
+ for (div = 1; div <= 16; div++) {
+ u32 clk;
+ u64 tmp;
+
+ tmp = pixelclock * ext_div[i_pre] *
+ ext_div[i_post] * div;
+ do_div(tmp, refclk);
+ mul = tmp;
+
+ /* Check limits */
+ if ((mul < 1) || (mul > 128))
+ continue;
+
+ clk = (refclk / ext_div[i_pre] / div) * mul;
+ /*
+ * refclk * mul / (ext_pre_div * pre_div)
+ * should be in the 150 to 650 MHz range
+ */
+ if ((clk > 650000000) || (clk < 150000000))
+ continue;
+
+ clk = clk / ext_div[i_post];
+ delta = clk - pixelclock;
+
+ if (abs(delta) < abs(best_delta)) {
+ best_pre = i_pre;
+ best_post = i_post;
+ best_div = div;
+ best_mul = mul;
+ best_delta = delta;
+ best_pixelclock = clk;
+ }
+ }
+ }
+ }
+ if (best_pixelclock == 0) {
+ dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n",
+ pixelclock);
+ return -EINVAL;
+ }
+
+ dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock,
+ best_delta);
+ dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk,
+ ext_div[best_pre], best_div, best_mul, ext_div[best_post]);
+
+ /* if VCO >= 300 MHz */
+ if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000)
+ vco_hi = 1;
+ /* see DS */
+ if (best_div == 16)
+ best_div = 0;
+ if (best_mul == 128)
+ best_mul = 0;
+
+ /* Power up PLL and switch to bypass */
+ tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
+
+ tc_write(PXL_PLLPARAM,
+ (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */
+ (ext_div[best_pre] << 20) | /* External Pre-divider */
+ (ext_div[best_post] << 16) | /* External Post-divider */
+ IN_SEL_REFCLK | /* Use RefClk as PLL input */
+ (best_div << 8) | /* Divider for PLL RefClk */
+ (best_mul << 0)); /* Multiplier for PLL */
+
+ /* Force PLL parameter update and disable bypass */
+ tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
+
+ tc_wait_pll_lock(tc);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_pxl_pll_dis(struct tc_data *tc)
+{
+ /* Enable PLL bypass, power down PLL */
+ return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP);
+}
+
+static int tc_stream_clock_calc(struct tc_data *tc)
+{
+ int ret;
+ /*
+ * If the Stream clock and Link Symbol clock are
+ * asynchronous with each other, the value of M changes over
+ * time. This way of generating link clock and stream
+ * clock is called Asynchronous Clock mode. The value M
+ * must change while the value N stays constant. The
+ * value of N in this Asynchronous Clock mode must be set
+ * to 2^15 or 32,768.
+ *
+ * LSCLK = 1/10 of high speed link clock
+ *
+ * f_STRMCLK = M/N * f_LSCLK
+ * M/N = f_STRMCLK / f_LSCLK
+ *
+ */
+ tc_write(DP0_VIDMNGEN1, 32768);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_aux_link_setup(struct tc_data *tc)
+{
+ unsigned long rate;
+ u32 value;
+ int ret;
+
+ rate = clk_get_rate(tc->refclk);
+ switch (rate) {
+ case 38400000:
+ value = REF_FREQ_38M4;
+ break;
+ case 26000000:
+ value = REF_FREQ_26M;
+ break;
+ case 19200000:
+ value = REF_FREQ_19M2;
+ break;
+ case 13000000:
+ value = REF_FREQ_13M;
+ break;
+ default:
+ dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
+ return -EINVAL;
+ }
+
+ /* Setup DP-PHY / PLL */
+ value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ tc_write(SYS_PLLPARAM, value);
+
+ tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+
+ /*
+ * Initially PLLs are in bypass. Force PLL parameter update,
+ * disable PLL bypass, enable PLL
+ */
+ tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
+ 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(tc->dev, "Timeout waiting for PHY to become ready");
+ return ret;
+ } else if (ret)
+ goto err;
+
+ /* Setup AUX link */
+ tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
+ (0x06 << 8) | /* Aux Bit Period Calculator Threshold */
+ (0x3f << 0)); /* Aux Response Timeout Timer */
+
+ return 0;
+err:
+ dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret);
+ return ret;
+}
+
+static int tc_get_display_props(struct tc_data *tc)
+{
+ int ret;
+ /* temp buffer */
+ u8 tmp[8];
+
+ /* Read DP Rx Link Capability */
+ ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_read;
+ if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
+ goto err_dpcd_inval;
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.coding8b10b = tmp[0] & BIT(0);
+ tc->link.scrambler_dis = 0;
+ /* read assr */
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+
+ dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
+ tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
+ (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.base.num_lanes,
+ (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
+ "enhanced" : "non-enhanced");
+ dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b);
+ dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n",
+ tc->link.assr, tc->assr);
+
+ return 0;
+
+err_dpcd_read:
+ dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
+ return ret;
+err_dpcd_inval:
+ dev_err(tc->dev, "invalid DPCD\n");
+ return -EINVAL;
+}
+
+static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+{
+ int ret;
+ int vid_sync_dly;
+ int max_tu_symbol;
+
+ int left_margin = mode->htotal - mode->hsync_end;
+ int right_margin = mode->hsync_start - mode->hdisplay;
+ int hsync_len = mode->hsync_end - mode->hsync_start;
+ int upper_margin = mode->vtotal - mode->vsync_end;
+ int lower_margin = mode->vsync_start - mode->vdisplay;
+ int vsync_len = mode->vsync_end - mode->vsync_start;
+
+ dev_dbg(tc->dev, "set mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
+ left_margin, right_margin, hsync_len);
+ dev_dbg(tc->dev, "V margin %d,%d sync %d\n",
+ upper_margin, lower_margin, vsync_len);
+ dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
+
+
+ /* LCD Ctl Frame Size */
+ tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+ tc_write(HTIM01, (left_margin << 16) | /* H back porch */
+ (hsync_len << 0)); /* Hsync */
+ tc_write(HTIM02, (right_margin << 16) | /* H front porch */
+ (mode->hdisplay << 0)); /* width */
+ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
+ (vsync_len << 0)); /* Vsync */
+ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
+ (mode->vdisplay << 0)); /* height */
+ tc_write(VFUEN0, VFUEN); /* update settings */
+
+ /* Test pattern settings */
+ tc_write(TSTCTL,
+ (120 << 24) | /* Red Color component value */
+ (20 << 16) | /* Green Color component value */
+ (99 << 8) | /* Blue Color component value */
+ (1 << 4) | /* Enable I2C Filter */
+ (2 << 0) | /* Color bar Mode */
+ 0);
+
+ /* DP Main Stream Attributes */
+ vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
+ tc_write(DP0_VIDSYNCDELAY,
+ (0x003e << 16) | /* thresh_dly */
+ (vid_sync_dly << 0));
+
+ tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+
+ tc_write(DP0_STARTVAL,
+ ((upper_margin + vsync_len) << 16) |
+ ((left_margin + hsync_len) << 0));
+
+ tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+
+ tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_link_training(struct tc_data *tc, int pattern)
+{
+ const char * const *errors;
+ u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT;
+ int timeout;
+ int retry;
+ u32 value;
+ int ret;
+
+ if (pattern == DP_TRAINING_PATTERN_1) {
+ srcctrl |= DP0_SRCCTRL_TP1;
+ errors = training_pattern1_errors;
+ } else {
+ srcctrl |= DP0_SRCCTRL_TP2;
+ errors = training_pattern2_errors;
+ }
+
+ /* Set DPCD 0x102 for Training Part 1 or 2 */
+ tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern);
+
+ tc_write(DP0_LTLOOPCTRL,
+ (0x0f << 28) | /* Defer Iteration Count */
+ (0x0f << 24) | /* Loop Iteration Count */
+ (0x0d << 0)); /* Loop Timer Delay */
+
+ retry = 5;
+ do {
+ /* Set DP0 Training Pattern */
+ tc_write(DP0_SRCCTRL, srcctrl);
+
+ /* Enable DP0 to start Link Training */
+ tc_write(DP0CTL, DP_EN);
+
+ /* wait */
+ timeout = 1000;
+ do {
+ tc_read(DP0_LTSTAT, &value);
+ udelay(1);
+ } while ((!(value & LT_LOOPDONE)) && (--timeout));
+ if (timeout == 0) {
+ dev_err(tc->dev, "Link training timeout!\n");
+ } else {
+ int pattern = (value >> 11) & 0x3;
+ int error = (value >> 8) & 0x7;
+
+ dev_dbg(tc->dev,
+ "Link training phase %d done after %d uS: %s\n",
+ pattern, 1000 - timeout, errors[error]);
+ if (pattern == DP_TRAINING_PATTERN_1 && error == 0)
+ break;
+ if (pattern == DP_TRAINING_PATTERN_2) {
+ value &= LT_CHANNEL1_EQ_BITS |
+ LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS;
+ /* in case of two lanes */
+ if ((tc->link.base.num_lanes == 2) &&
+ (value == (LT_CHANNEL1_EQ_BITS |
+ LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS)))
+ break;
+ /* in case of one line */
+ if ((tc->link.base.num_lanes == 1) &&
+ (value == (LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS)))
+ break;
+ }
+ }
+ /* restart */
+ tc_write(DP0CTL, 0);
+ usleep_range(10, 20);
+ } while (--retry);
+ if (retry == 0) {
+ dev_err(tc->dev, "Failed to finish training phase %d\n",
+ pattern);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_main_link_setup(struct tc_data *tc)
+{
+ struct drm_dp_aux *aux = &tc->aux;
+ struct device *dev = tc->dev;
+ unsigned int rate;
+ u32 dp_phy_ctrl;
+ int timeout;
+ bool aligned;
+ bool ready;
+ u32 value;
+ int ret;
+ u8 tmp[8];
+
+ /* display mode should be set at this point */
+ if (!tc->mode)
+ return -EINVAL;
+
+ /* from excel file - DP0_SrcCtrl */
+ tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
+ DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
+ DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
+ /* from excel file - DP1_SrcCtrl */
+ tc_write(0x07a0, 0x00003083);
+
+ rate = clk_get_rate(tc->refclk);
+ switch (rate) {
+ case 38400000:
+ value = REF_FREQ_38M4;
+ break;
+ case 26000000:
+ value = REF_FREQ_26M;
+ break;
+ case 19200000:
+ value = REF_FREQ_19M2;
+ break;
+ case 13000000:
+ value = REF_FREQ_13M;
+ break;
+ default:
+ return -EINVAL;
+ }
+ value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ tc_write(SYS_PLLPARAM, value);
+ /* Setup Main Link */
+ dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ msleep(100);
+
+ /* PLL setup */
+ tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ /* PXL PLL setup */
+ if (tc_test_pattern) {
+ ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
+ 1000 * tc->mode->clock);
+ if (ret)
+ goto err;
+ }
+
+ /* Reset/Enable Main Links */
+ dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ usleep_range(100, 200);
+ dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+
+ timeout = 1000;
+ do {
+ tc_read(DP_PHY_CTRL, &value);
+ udelay(1);
+ } while ((!(value & PHY_RDY)) && (--timeout));
+
+ if (timeout == 0) {
+ dev_err(dev, "timeout waiting for phy become ready");
+ return -ETIMEDOUT;
+ }
+
+ /* Set misc: 8 bits per color */
+ ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
+ if (ret)
+ goto err;
+
+ /*
+ * ASSR mode
+ * on TC358767 side ASSR configured through strap pin
+ * seems there is no way to change this setting from SW
+ *
+ * check is tc configured for same mode
+ */
+ if (tc->assr != tc->link.assr) {
+ dev_dbg(dev, "Trying to set display to ASSR: %d\n",
+ tc->assr);
+ /* try to set ASSR on display side */
+ tmp[0] = tc->assr;
+ ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]);
+ if (ret < 0)
+ goto err_dpcd_read;
+ /* read back */
+ ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+
+ if (tmp[0] != tc->assr) {
+ dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
+ tc->assr);
+ /* trying with disabled scrambler */
+ tc->link.scrambler_dis = 1;
+ }
+ }
+
+ /* Setup Link & DPRx Config for Training */
+ ret = drm_dp_link_configure(aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* DOWNSPREAD_CTRL */
+ tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00;
+ /* MAIN_LINK_CHANNEL_CODING_SET */
+ tmp[1] = tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00;
+ ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ ret = tc_link_training(tc, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto err;
+
+ ret = tc_link_training(tc, DP_TRAINING_PATTERN_2);
+ if (ret)
+ goto err;
+
+ /* Clear DPCD 0x102 */
+ /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
+ tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00;
+ ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* Clear Training Pattern, set AutoCorrect Mode = 1 */
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+
+ /* Wait */
+ timeout = 100;
+ do {
+ udelay(1);
+ /* Read DPCD 0x202-0x207 */
+ ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+ ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
+ DP_CHANNEL_EQ_BITS)); /* Lane0 */
+ aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
+ } while ((--timeout) && !(ready && aligned));
+
+ if (timeout == 0) {
+ /* Read DPCD 0x200-0x201 */
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+ dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
+ dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
+ tmp[1]);
+ dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]);
+ dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n",
+ tmp[4]);
+ dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]);
+ dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
+ tmp[6]);
+
+ if (!ready)
+ dev_err(dev, "Lane0/1 not ready\n");
+ if (!aligned)
+ dev_err(dev, "Lane0/1 not aligned\n");
+ return -EAGAIN;
+ }
+
+ ret = tc_set_video_mode(tc, tc->mode);
+ if (ret)
+ goto err;
+
+ /* Set M/N */
+ ret = tc_stream_clock_calc(tc);
+ if (ret)
+ goto err;
+
+ return 0;
+err_dpcd_read:
+ dev_err(tc->dev, "Failed to read DPCD: %d\n", ret);
+ return ret;
+err_dpcd_write:
+ dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
+err:
+ return ret;
+}
+
+static int tc_main_link_stream(struct tc_data *tc, int state)
+{
+ int ret;
+ u32 value;
+
+ dev_dbg(tc->dev, "stream: %d\n", state);
+
+ if (state) {
+ value = VID_MN_GEN | DP_EN;
+ if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ value |= EF_EN;
+ tc_write(DP0CTL, value);
+ /*
+ * VID_EN assertion should be delayed by at least N * LSCLK
+ * cycles from the time VID_MN_GEN is enabled in order to
+ * generate stable values for VID_M. LSCLK is 270 MHz or
+ * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(),
+ * so a delay of at least 203 us should suffice.
+ */
+ usleep_range(500, 1000);
+ value |= VID_EN;
+ tc_write(DP0CTL, value);
+ /* Set input interface */
+ value = DP0_AUDSRC_NO_INPUT;
+ if (tc_test_pattern)
+ value |= DP0_VIDSRC_COLOR_BAR;
+ else
+ value |= DP0_VIDSRC_DPI_RX;
+ tc_write(SYSCTRL, value);
+ } else {
+ tc_write(DP0CTL, 0);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static enum drm_connector_status
+tc_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ drm_panel_prepare(tc->panel);
+}
+
+static void tc_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ ret = tc_main_link_setup(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link setup error: %d\n", ret);
+ return;
+ }
+
+ ret = tc_main_link_stream(tc, 1);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link stream start error: %d\n", ret);
+ return;
+ }
+
+ drm_panel_enable(tc->panel);
+}
+
+static void tc_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ drm_panel_disable(tc->panel);
+
+ ret = tc_main_link_stream(tc, 0);
+ if (ret < 0)
+ dev_err(tc->dev, "main link stream stop error: %d\n", ret);
+}
+
+static void tc_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ drm_panel_unprepare(tc->panel);
+}
+
+static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ /* Fixup sync polarities, both hsync and vsync are active low */
+ adj->flags = mode->flags;
+ adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+
+ return true;
+}
+
+static int tc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Accept any mode */
+ return MODE_OK;
+}
+
+static void tc_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ tc->mode = mode;
+}
+
+static int tc_connector_get_modes(struct drm_connector *connector)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+ struct edid *edid;
+ unsigned int count;
+
+ if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
+ count = tc->panel->funcs->get_modes(tc->panel);
+ if (count > 0)
+ return count;
+ }
+
+ edid = drm_get_edid(connector, &tc->aux.ddc);
+
+ kfree(tc->edid);
+ tc->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ return count;
+}
+
+static void tc_connector_set_polling(struct tc_data *tc,
+ struct drm_connector *connector)
+{
+ /* TODO: add support for HPD */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+}
+
+static struct drm_encoder *
+tc_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+
+ return tc->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
+ .get_modes = tc_connector_get_modes,
+ .mode_valid = tc_connector_mode_valid,
+ .best_encoder = tc_connector_best_encoder,
+};
+
+static void tc_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = tc_connector_detect,
+ .destroy = tc_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tc_bridge_attach(struct drm_bridge *bridge)
+{
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ struct tc_data *tc = bridge_to_tc(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ /* Create eDP connector */
+ drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
+ ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret)
+ return ret;
+
+ if (tc->panel)
+ drm_panel_attach(tc->panel, &tc->connector);
+
+ drm_display_info_set_bus_formats(&tc->connector.display_info,
+ &bus_format, 1);
+ drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs tc_bridge_funcs = {
+ .attach = tc_bridge_attach,
+ .mode_set = tc_bridge_mode_set,
+ .pre_enable = tc_bridge_pre_enable,
+ .enable = tc_bridge_enable,
+ .disable = tc_bridge_disable,
+ .post_disable = tc_bridge_post_disable,
+ .mode_fixup = tc_bridge_mode_fixup,
+};
+
+static bool tc_readable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != SYSCTRL;
+}
+
+static const struct regmap_range tc_volatile_ranges[] = {
+ regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
+ regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
+ regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
+ regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
+ regmap_reg_range(VFUEN0, VFUEN0),
+};
+
+static const struct regmap_access_table tc_volatile_table = {
+ .yes_ranges = tc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
+};
+
+static bool tc_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg != TC_IDREG) &&
+ (reg != DP0_LTSTAT) &&
+ (reg != DP0_SNKLTCHGREQ);
+}
+
+static const struct regmap_config tc_regmap_config = {
+ .name = "tc358767",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PLL_DBG,
+ .cache_type = REGCACHE_RBTREE,
+ .readable_reg = tc_readable_reg,
+ .volatile_table = &tc_volatile_table,
+ .writeable_reg = tc_writeable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *ep;
+ struct tc_data *tc;
+ int ret;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->dev = dev;
+
+ /* port@2 is the output port */
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_warn(dev, "endpoint %s not connected\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -ENODEV;
+ }
+ of_node_put(ep);
+ tc->panel = of_drm_find_panel(remote);
+ if (tc->panel) {
+ dev_dbg(dev, "found panel %s\n", remote->full_name);
+ } else {
+ dev_dbg(dev, "waiting for panel %s\n",
+ remote->full_name);
+ of_node_put(remote);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(remote);
+ }
+
+ /* Shut down GPIO is optional */
+ tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->sd_gpio))
+ return PTR_ERR(tc->sd_gpio);
+
+ if (tc->sd_gpio) {
+ gpiod_set_value_cansleep(tc->sd_gpio, 0);
+ usleep_range(5000, 10000);
+ }
+
+ /* Reset GPIO is optional */
+ tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(tc->reset_gpio))
+ return PTR_ERR(tc->reset_gpio);
+
+ if (tc->reset_gpio) {
+ gpiod_set_value_cansleep(tc->reset_gpio, 1);
+ usleep_range(5000, 10000);
+ }
+
+ tc->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(tc->refclk)) {
+ ret = PTR_ERR(tc->refclk);
+ dev_err(dev, "Failed to get refclk: %d\n", ret);
+ return ret;
+ }
+
+ tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
+ if (IS_ERR(tc->regmap)) {
+ ret = PTR_ERR(tc->regmap);
+ dev_err(dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev);
+ if (ret) {
+ dev_err(tc->dev, "can not read device ID: %d\n", ret);
+ return ret;
+ }
+
+ if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) {
+ dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev);
+ return -EINVAL;
+ }
+
+ tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+
+ ret = tc_aux_link_setup(tc);
+ if (ret)
+ return ret;
+
+ /* Register DP AUX channel */
+ tc->aux.name = "TC358767 AUX i2c adapter";
+ tc->aux.dev = tc->dev;
+ tc->aux.transfer = tc_aux_transfer;
+ ret = drm_dp_aux_register(&tc->aux);
+ if (ret)
+ return ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret)
+ goto err_unregister_aux;
+
+ tc_connector_set_polling(tc, &tc->connector);
+
+ tc->bridge.funcs = &tc_bridge_funcs;
+ tc->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&tc->bridge);
+ if (ret) {
+ dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
+ goto err_unregister_aux;
+ }
+
+ i2c_set_clientdata(client, tc);
+
+ return 0;
+err_unregister_aux:
+ drm_dp_aux_unregister(&tc->aux);
+ return ret;
+}
+
+static int tc_remove(struct i2c_client *client)
+{
+ struct tc_data *tc = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&tc->bridge);
+ drm_dp_aux_unregister(&tc->aux);
+
+ tc_pxl_pll_dis(tc);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc358767_i2c_ids[] = {
+ { "tc358767", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
+
+static const struct of_device_id tc358767_of_ids[] = {
+ { .compatible = "toshiba,tc358767", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358767_of_ids);
+
+static struct i2c_driver tc358767_driver = {
+ .driver = {
+ .name = "tc358767",
+ .of_match_table = tc358767_of_ids,
+ },
+ .id_table = tc358767_i2c_ids,
+ .probe = tc_probe,
+ .remove = tc_remove,
+};
+module_i2c_driver(tc358767_driver);
+
+MODULE_AUTHOR("Andrey Gusakov <andrey.gusakov@cogentembedded.com>");
+MODULE_DESCRIPTION("tc358767 eDP encoder driver");
+MODULE_LICENSE("GPL");
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
- if (cirrus_fb->obj)
- drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
{
}
-static int cirrus_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
.evict_flags = cirrus_bo_evict_flags,
- .move = cirrus_bo_move,
+ .move = NULL,
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
if (old_blob == new_blob)
return;
- if (old_blob)
- drm_property_unreference_blob(old_blob);
+ drm_property_unreference_blob(old_blob);
if (new_blob)
drm_property_reference_blob(new_blob);
*blob = new_blob;
*/
void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ unsigned crtc_mask = 0;
+ struct drm_crtc *crtc;
int ret;
+ bool global = false;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->acquire_ctx != state->acquire_ctx)
+ continue;
+
+ crtc_mask |= drm_crtc_mask(crtc);
+ crtc->acquire_ctx = NULL;
+ }
+
+ if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
+ global = true;
+
+ dev->mode_config.acquire_ctx = NULL;
+ }
retry:
drm_modeset_backoff(state->acquire_ctx);
- ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
if (ret)
goto retry;
+
+ drm_for_each_crtc(crtc, dev)
+ if (drm_crtc_mask(crtc) & crtc_mask)
+ crtc->acquire_ctx = state->acquire_ctx;
+
+ if (global)
+ dev->mode_config.acquire_ctx = state->acquire_ctx;
}
EXPORT_SYMBOL(drm_atomic_legacy_backoff);
mb();
for (; addr < end; addr += size)
clflushopt(addr);
+ clflushopt(end - 1); /* force serialisation */
mb();
return;
}
}
EXPORT_SYMBOL(drm_mode_object_reference);
+/**
+ * drm_crtc_force_disable - Forcibly turn off a CRTC
+ * @crtc: CRTC to turn off
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable(struct drm_crtc *crtc)
+{
+ struct drm_mode_set set = {
+ .crtc = crtc,
+ };
+
+ return drm_mode_set_config_internal(&set);
+}
+EXPORT_SYMBOL(drm_crtc_force_disable);
+
+/**
+ * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
+ * @dev: DRM device whose CRTCs to turn off
+ *
+ * Drivers may want to call this on unload to ensure that all displays are
+ * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ drm_modeset_lock_all(dev);
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->enabled) {
+ ret = drm_crtc_force_disable(crtc);
+ if (ret)
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_force_disable_all);
+
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_mode_set set;
- int ret;
if (!fb)
return;
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = drm_mode_set_config_internal(&set);
- if (ret)
+ if (drm_crtc_force_disable(crtc))
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
connector->dev = dev;
connector->funcs = funcs;
- connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
- if (connector->connector_id < 0) {
- ret = connector->connector_id;
+ ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out_put;
- }
+ connector->index = ret;
+ ret = 0;
connector->connector_type = connector_type;
connector->connector_type_id =
ida_remove(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
- ida_remove(&config->connector_ida, connector->connector_id);
+ ida_remove(&config->connector_ida, connector->index);
out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
connector->connector_type_id);
ida_remove(&dev->mode_config.connector_ida,
- connector->connector_id);
+ connector->index);
kfree(connector->display_info.bus_formats);
drm_mode_object_unregister(dev, &connector->base);
}
EXPORT_SYMBOL(drm_connector_unregister);
-/**
- * drm_connector_register_all - register all connectors
- * @dev: drm device
- *
- * This function registers all connectors in sysfs and other places so that
- * userspace can start to access them. drm_connector_register_all() is called
- * automatically from drm_dev_register() to complete the device registration,
- * if they don't call drm_connector_register() on each connector individually.
- *
- * When a device is unplugged and should be removed from userspace access,
- * call drm_connector_unregister_all(), which is the inverse of this
- * function.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register_all(struct drm_device *dev)
+static void drm_connector_unregister_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_connector_unregister(connector);
+}
+
+static int drm_connector_register_all(struct drm_device *dev)
{
struct drm_connector *connector;
int ret;
drm_connector_unregister_all(dev);
return ret;
}
-EXPORT_SYMBOL(drm_connector_register_all);
-
-/**
- * drm_connector_unregister_all - unregister connector userspace interfaces
- * @dev: drm device
- *
- * This functions unregisters all connectors from sysfs and other places so
- * that userspace can no longer access them. Drivers should call this as the
- * first step tearing down the device instace, or when the underlying
- * physical device disappeared (e.g. USB unplug), right before calling
- * drm_dev_unregister().
- */
-void drm_connector_unregister_all(struct drm_device *dev)
-{
- struct drm_connector *connector;
-
- /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- drm_connector_unregister(connector);
-}
-EXPORT_SYMBOL(drm_connector_unregister_all);
static int drm_encoder_register_all(struct drm_device *dev)
{
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_crtc *new_crtc;
- struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_crtc **save_encoder_crtcs, *new_crtc;
+ struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
- struct drm_connector *save_connectors, *connector;
+ struct drm_connector *connector;
int count = 0, ro, fail = 0;
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
- save_encoders = kzalloc(dev->mode_config.num_encoder *
- sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders)
+ save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!save_encoder_crtcs)
return -ENOMEM;
- save_connectors = kzalloc(dev->mode_config.num_connector *
- sizeof(struct drm_connector), GFP_KERNEL);
- if (!save_connectors) {
- kfree(save_encoders);
+ save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!save_connector_encoders) {
+ kfree(save_encoder_crtcs);
return -ENOMEM;
}
*/
count = 0;
drm_for_each_encoder(encoder, dev) {
- save_encoders[count++] = *encoder;
+ save_encoder_crtcs[count++] = encoder->crtc;
}
count = 0;
drm_for_each_connector(connector, dev) {
- save_connectors[count++] = *connector;
+ save_connector_encoders[count++] = connector->encoder;
}
save_set.crtc = set->crtc;
mode_changed = true;
}
- /* take a reference on all connectors in set */
+ /* take a reference on all unbound connectors in set, reuse the
+ * already taken reference for bound connectors
+ */
for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro]->encoder)
+ continue;
drm_connector_reference(set->connectors[ro]);
}
}
}
- /* after fail drop reference on all connectors in save set */
- count = 0;
- drm_for_each_connector(connector, dev) {
- drm_connector_unreference(&save_connectors[count++]);
- }
-
- kfree(save_connectors);
- kfree(save_encoders);
+ kfree(save_connector_encoders);
+ kfree(save_encoder_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
drm_for_each_encoder(encoder, dev) {
- *encoder = save_encoders[count++];
+ encoder->crtc = save_encoder_crtcs[count++];
}
count = 0;
drm_for_each_connector(connector, dev) {
- *connector = save_connectors[count++];
+ connector->encoder = save_connector_encoders[count++];
}
- /* after fail drop reference on all connectors in set */
+ /* after fail drop reference on all unbound connectors in set, let
+ * bound connectors keep their reference
+ */
for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro]->encoder)
+ continue;
drm_connector_unreference(set->connectors[ro]);
}
save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
- kfree(save_connectors);
- kfree(save_encoders);
+ kfree(save_connector_encoders);
+ kfree(save_encoder_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
- res = PTR_ERR(drm_dp_aux_dev_class);
- goto out;
+ return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
WARN_ON(!mutex_is_locked(&mgr->qlock));
/* construct a chunk from the first msg in the tx_msg queue */
- if (list_empty(&mgr->tx_msg_downq)) {
- mgr->tx_down_in_progress = false;
+ if (list_empty(&mgr->tx_msg_downq))
return;
- }
- mgr->tx_down_in_progress = true;
txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up(&mgr->tx_waitq);
}
- if (list_empty(&mgr->tx_msg_downq)) {
- mgr->tx_down_in_progress = false;
- return;
- }
}
/* called holding qlock */
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
- if (!mgr->tx_down_in_progress)
+ if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
+ * @connector: DRM connector for this port
* @mgr: manager for this port
* @port: unverified pointer to a port
*
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (mgr->tx_down_in_progress)
+ if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
drm_dp_port_teardown_pdt(port, port->pdt);
if (!port->input && port->vcpi.vcpi > 0) {
- if (mgr->mst_state) {
- drm_dp_mst_reset_vcpi_slots(mgr, port);
- drm_dp_update_payload_part1(mgr);
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- }
+ drm_dp_mst_reset_vcpi_slots(mgr, port);
+ drm_dp_update_payload_part1(mgr);
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kref_put(&port->kref, drm_dp_free_mst_port);
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+ drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
+
+/**
+ * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+{
+ if (fbdev_cma)
+ drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize, drv_size;
+ unsigned int in_size, out_size, drv_size, ksize;
bool is_driver_ioctl;
dev = file_priv->minor->dev;
}
drv_size = _IOC_SIZE(ioctl->cmd);
- usize = _IOC_SIZE(cmd);
- asize = max(usize, drv_size);
- cmd = ioctl->cmd;
+ out_size = in_size = _IOC_SIZE(cmd);
+ if ((cmd & ioctl->cmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((cmd & ioctl->cmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
if (unlikely(retcode))
goto err_i1;
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
+ if (ksize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
}
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
}
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
}
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
/* Enforce sane locking for kms driver ioctls. Core ioctls are
* too messy still. */
if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
mutex_unlock(&drm_global_mutex);
}
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
- }
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ retcode = -EFAULT;
err_i1:
if (!ioctl)
* shouldn't be used by any drivers.
*
* Returns:
- * True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
+ * True if the @nr corresponds to a DRM core ioctl number, false otherwise.
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
/*
* Wake up any waiters so they don't hang. This is just to paper over
- * isssues for UMS drivers which aren't in full control of their
+ * issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- /* UMS was only ever support on pci devices. */
+ /* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
*
* This is the legacy version of drm_crtc_vblank_count_and_time().
*/
-u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime)
+static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+ struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 vblank_count;
return vblank_count;
}
-EXPORT_SYMBOL(drm_vblank_count_and_time);
/**
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the native KMS version of drm_vblank_count_and_time().
*/
u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime)
seq = drm_vblank_count_and_time(dev, pipe, &now);
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
vblwait->request.sequence, seq, pipe);
goto done;
}
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ }
+
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
- vblwait->request.sequence = seq + 1;
- }
-
DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
vblwait->request.sequence, pipe);
- vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, pipe) -
vblwait->request.sequence) <= (1 << 23)) ||
# include <asm/agp.h>
#else
# ifdef __powerpc__
-# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
- * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect
- * output signal on the TE signal line when display module reaches line N
- * defined by STS[n:0].
+ * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
+ * the Tearing Effect output signal of the display module
* @dsi: DSI peripheral device
- * @param: STS[10:0]
+ * @scanline: scanline to use as trigger
+ *
* Return: 0 on success or a negative error code on failure
*/
-int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
{
- u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8,
- param & 0xff };
+ u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
+ scanline & 0xff };
ssize_t err;
err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
return 0;
}
-EXPORT_SYMBOL(mipi_dsi_set_tear_scanline);
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+ return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
#else
return vmalloc_32(size);
#endif
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp |= _PAGE_NO_CACHE;
+ tmp = pgprot_noncached_wc(tmp);
#endif
return tmp;
}
* pages and mappings in fault()
*/
#if defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
vma->vm_ops = &drm_vm_ops;
break;
int ret;
ret = etnaviv_gpu_init(g);
- if (ret) {
- dev_err(g->dev, "hw init failed: %d\n", ret);
+ if (ret)
priv->gpu[i] = NULL;
- }
}
}
}
return 0;
}
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+{
+ u32 pmc, ppc;
+
+ /* enable clock gating */
+ ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+
+ /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
+ if (gpu->identity.revision == 0x4301 ||
+ gpu->identity.revision == 0x4302)
+ ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
+
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
+
+ pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
+
+ /* Disable PA clock gating for GC400+ except for GC420 */
+ if (gpu->identity.model >= chipModel_GC400 &&
+ gpu->identity.model != chipModel_GC420)
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
+
+ /*
+ * Disable PE clock gating on revs < 5.0.0.0 when HZ is
+ * present without a bug fix.
+ */
+ if (gpu->identity.revision < 0x5000 &&
+ gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
+ !(gpu->identity.minor_features1 &
+ chipMinorFeatures1_DISABLE_PE_GATING))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
+
+ if (gpu->identity.revision < 0x5422)
+ pmc |= BIT(15); /* Unknown bit */
+
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
+
+ gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
+}
+
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
}
+ /* enable module-level clock gating */
+ etnaviv_gpu_enable_mlcg(gpu);
+
/*
* Update GPU AXI cache atttribute to "cacheable, no allocate".
* This is necessary to prevent the iMX6 SoC locking up.
bool mmuv2;
ret = pm_runtime_get_sync(gpu->dev);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(gpu->dev, "Failed to enable GPU power domain\n");
return ret;
+ }
etnaviv_hw_identify(gpu);
}
ret = etnaviv_hw_reset(gpu);
- if (ret)
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset failed\n");
goto fail;
+ }
/* Setup IOMMU.. eventually we will (I think) do this once per context
* and have separate page tables per context. For now, to keep things
}
if (!iommu) {
+ dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
ret = -ENOMEM;
goto fail;
}
gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
if (!gpu->mmu) {
+ dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
iommu_domain_free(iommu);
ret = -ENOMEM;
goto fail;
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+ etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000
#define VIVS_PM_MODULE_STATUS 0x00000108
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
config DRM_EXYNOS_IOMMU
bool
- depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ depends on EXYNOS_IOMMU
default y
comment "CRTCs"
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
/*
struct exynos_dp_device {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct drm_connector *connector;
struct drm_bridge *ptn_bridge;
struct drm_device *drm_dev;
struct device *dev;
return exynos_dp_crtc_clock_enable(plat_data, false);
}
-static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+ struct drm_connector *connector)
{
struct exynos_dp_device *dp = to_dp(plat_data);
- struct drm_connector *connector = &dp->connector;
struct drm_display_mode *mode;
int num_modes = 0;
int ret;
drm_connector_register(connector);
+ dp->connector = connector;
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
#include <drm/drmP.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
dev_name(private->dma_dev));
- /*
- * create mapping to manage iommu table and set a pointer to iommu
- * mapping structure to iommu_mapping of private data.
- * also this iommu_mapping can be used to check if iommu is supported
- * or not.
- */
+ /* create common IOMMU mapping for all devices attached to Exynos DRM */
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
struct drm_property *plane_zpos_property;
struct device *dma_dev;
- unsigned long da_start;
- unsigned long da_space_size;
void *mapping;
unsigned int pipe;
struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
- if (exynos_gem->kvaddr)
- vunmap(exynos_gem->kvaddr);
+ vunmap(exynos_gem->kvaddr);
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
.timing_base = 0x0,
.has_clksel = 1,
.has_limited_fmt = 1,
- .has_hw_trigger = 1,
};
static struct fimd_driver_data exynos3_fimd_driver_data = {
.lcdblk_vt_shift = 24,
.lcdblk_bypass_shift = 15,
.lcdblk_mic_bypass_shift = 11,
- .trg_type = I80_HW_TRG,
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
.has_mic_bypass = 1,
.has_dp_clk = 1,
- .has_hw_trigger = 1,
- .has_trigger_per_te = 1,
};
struct fimd_context {
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
-#define G2D_SRC_STRIDE_REG 0x0308
+#define G2D_SRC_STRIDE 0x0308
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
-#define G2D_DST_STRIDE_REG 0x0408
+#define G2D_DST_STRIDE 0x0408
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
- case G2D_SRC_STRIDE_REG:
+ case G2D_SRC_STRIDE:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
- case G2D_DST_STRIDE_REG:
+ case G2D_DST_STRIDE:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
- case G2D_SRC_STRIDE_REG:
- case G2D_DST_STRIDE_REG:
+ case G2D_SRC_STRIDE:
+ case G2D_DST_STRIDE:
if (for_addr)
goto err;
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
-#include <linux/kref.h>
-
-#include <asm/dma-iommu.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
/*
* drm_create_iommu_mapping - create a mapping structure
*
*/
int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
- struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
- if (!priv->da_start)
- priv->da_start = EXYNOS_DEV_ADDR_START;
- if (!priv->da_space_size)
- priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
-
- mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
- priv->da_space_size);
-
- if (IS_ERR(mapping))
- return PTR_ERR(mapping);
-
- priv->mapping = mapping;
-
- return 0;
+ return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
+ EXYNOS_DEV_ADDR_SIZE);
}
/*
* drm_release_iommu_mapping - release iommu mapping structure
*
* @drm_dev: DRM device
- *
- * if mapping->kref becomes 0 then all things related to iommu mapping
- * will be released
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- arm_iommu_release_mapping(priv->mapping);
+ __exynos_iommu_release_mapping(priv);
}
/*
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
- if (!priv->mapping)
- return 0;
-
- subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
- sizeof(*subdrv_dev->dma_parms),
- GFP_KERNEL);
- if (!subdrv_dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
-
- if (subdrv_dev->archdata.mapping)
- arm_iommu_detach_device(subdrv_dev);
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
- ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
- if (ret < 0) {
- DRM_DEBUG_KMS("failed iommu attach.\n");
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
return ret;
- }
+
+ ret = __exynos_iommu_attach(priv, subdrv_dev);
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
return 0;
}
struct device *subdrv_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- struct dma_iommu_mapping *mapping = priv->mapping;
-
- if (!mapping || !mapping->domain)
- return;
- arm_iommu_detach_device(subdrv_dev);
+ __exynos_iommu_detach(priv, subdrv_dev);
+ clear_dma_max_seg_size(subdrv_dev);
}
#ifdef CONFIG_DRM_EXYNOS_IOMMU
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+ unsigned long start, unsigned long size)
+{
+ priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
+ size);
+ return IS_ERR(priv->mapping);
+}
+
+static inline void
+__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+ arm_iommu_release_mapping(priv->mapping);
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ if (dev->archdata.mapping)
+ arm_iommu_detach_device(dev);
+
+ return arm_iommu_attach_device(dev, priv->mapping);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+
+#elif defined(CONFIG_IOMMU_DMA)
+#include <linux/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+ unsigned long start, unsigned long size)
+{
+ struct iommu_domain *domain;
+ int ret;
+
+ domain = iommu_domain_alloc(priv->dma_dev->bus);
+ if (!domain)
+ return -ENOMEM;
+
+ ret = iommu_get_dma_cookie(domain);
+ if (ret)
+ goto free_domain;
+
+ ret = iommu_dma_init_domain(domain, start, size);
+ if (ret)
+ goto put_cookie;
+
+ priv->mapping = domain;
+ return 0;
+
+put_cookie:
+ iommu_put_dma_cookie(domain);
+free_domain:
+ iommu_domain_free(domain);
+ return ret;
+}
+
+static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ iommu_put_dma_cookie(domain);
+ iommu_domain_free(domain);
+ priv->mapping = NULL;
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ return iommu_attach_device(domain, dev);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ iommu_detach_device(domain, dev);
+}
+#else
+#error Unsupported architecture and IOMMU/DMA-mapping glue code
+#endif
+
int drm_create_iommu_mapping(struct drm_device *drm_dev);
void drm_release_iommu_mapping(struct drm_device *drm_dev);
DRM_ERROR("Failed to find ddc node in device tree\n");
return -ENODEV;
}
+ of_node_put(dev->of_node);
out_get_ddc_adpt:
hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
ret = -ENODEV;
goto err_ddc;
}
+ of_node_put(dev->of_node);
out_get_phy_port:
if (hdata->drv_data->is_apb_phy) {
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_PANEL
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
select REGMAP_MMIO
select VIDEOMODE_HELPERS
help
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ drm_crtc_vblank_off(crtc);
+
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+
+ drm_crtc_vblank_on(crtc);
}
static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_plane *primary;
struct drm_crtc *crtc = &fsl_dev->crtc;
- unsigned int i, j, reg_num;
int ret;
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+
primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
if (!primary)
return -ENOMEM;
drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
- if (!strcmp(fsl_dev->soc->name, "ls1021a"))
- reg_num = LS1021A_LAYER_REG_NUM;
- else
- reg_num = VF610_LAYER_REG_NUM;
- for (i = 0; i < fsl_dev->soc->total_layer; i++) {
- for (j = 1; j <= reg_num; j++)
- regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
- }
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
-
return 0;
}
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/console.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mm.h>
#include <linux/regmap.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .cache_type = REGCACHE_RBTREE,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
};
if (!fsl_dev)
return 0;
+ disable_irq(fsl_dev->irq);
drm_kms_helper_poll_disable(fsl_dev->drm);
- regcache_cache_only(fsl_dev->regmap, true);
- regcache_mark_dirty(fsl_dev->regmap);
- clk_disable(fsl_dev->clk);
- clk_unprepare(fsl_dev->clk);
+
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
+ console_unlock();
+
+ fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
+ if (IS_ERR(fsl_dev->state)) {
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+ console_unlock();
+
+ drm_kms_helper_poll_enable(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
+ return PTR_ERR(fsl_dev->state);
+ }
+
+ clk_disable_unprepare(fsl_dev->pix_clk);
+ clk_disable_unprepare(fsl_dev->clk);
return 0;
}
if (!fsl_dev)
return 0;
- ret = clk_enable(fsl_dev->clk);
+ ret = clk_prepare_enable(fsl_dev->clk);
if (ret < 0) {
dev_err(dev, "failed to enable dcu clk\n");
- clk_unprepare(fsl_dev->clk);
return ret;
}
- ret = clk_prepare(fsl_dev->clk);
+
+ ret = clk_prepare_enable(fsl_dev->pix_clk);
if (ret < 0) {
- dev_err(dev, "failed to prepare dcu clk\n");
+ dev_err(dev, "failed to enable pix clk\n");
return ret;
}
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+ drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
+
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+ console_unlock();
+
drm_kms_helper_poll_enable(fsl_dev->drm);
- regcache_cache_only(fsl_dev->regmap, false);
- regcache_sync(fsl_dev->regmap);
+ enable_irq(fsl_dev->irq);
return 0;
}
.name = "ls1021a",
.total_layer = 16,
.max_layer = 4,
+ .layer_regs = LS1021A_LAYER_REG_NUM,
};
static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
.name = "vf610",
.total_layer = 64,
.max_layer = 6,
+ .layer_regs = VF610_LAYER_REG_NUM,
};
static const struct of_device_id fsl_dcu_of_match[] = {
unsigned int total_layer;
/*max layer number DCU supported*/
unsigned int max_layer;
+ unsigned int layer_regs;
};
struct fsl_dcu_drm_device {
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
const struct fsl_dcu_soc_data *soc;
+ struct drm_atomic_state *state;
};
void fsl_dcu_fbdev_init(struct drm_device *dev);
if (ret)
goto err;
- ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+ ret = fsl_dcu_create_outputs(fsl_dev);
if (ret)
goto err;
: NULL;
}
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
- struct drm_encoder *encoder);
int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_crtc *crtc);
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev);
#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
DRM_FORMAT_YUV422,
};
+void fsl_dcu_drm_init_planes(struct drm_device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ int i, j;
+
+ for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+ for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
+ }
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+}
+
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
{
struct drm_plane *primary;
#ifndef __FSL_DCU_DRM_PLANE_H__
#define __FSL_DCU_DRM_PLANE_H__
+void fsl_dcu_drm_init_planes(struct drm_device *dev);
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
#endif /* __FSL_DCU_DRM_PLANE_H__ */
*/
#include <linux/backlight.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
.mode_valid = fsl_dcu_drm_connector_mode_valid,
};
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
- struct drm_encoder *encoder)
+static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_panel *panel)
{
+ struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
- struct device_node *panel_node;
int ret;
fsl_dev->connector.encoder = encoder;
mode_config->dpms_property,
DRM_MODE_DPMS_OFF);
- panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
- if (!panel_node) {
- dev_err(fsl_dev->dev, "fsl,panel property not found\n");
- ret = -ENODEV;
- goto err_sysfs;
- }
-
- fsl_dev->connector.panel = of_drm_find_panel(panel_node);
- if (!fsl_dev->connector.panel) {
- ret = -EPROBE_DEFER;
- goto err_panel;
- }
- of_node_put(panel_node);
-
- ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+ ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
goto err_sysfs;
return 0;
-err_panel:
- of_node_put(panel_node);
err_sysfs:
drm_connector_unregister(connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
}
+
+static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
+ const struct of_endpoint *ep)
+{
+ struct drm_bridge *bridge;
+ struct device_node *np;
+
+ np = of_graph_get_remote_port_parent(ep->local_node);
+
+ fsl_dev->connector.panel = of_drm_find_panel(np);
+ if (fsl_dev->connector.panel) {
+ of_node_put(np);
+ return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+ }
+
+ bridge = of_drm_find_bridge(np);
+ of_node_put(np);
+ if (!bridge)
+ return -ENODEV;
+
+ fsl_dev->encoder.bridge = bridge;
+ bridge->encoder = &fsl_dev->encoder;
+
+ return drm_bridge_attach(fsl_dev->drm, bridge);
+}
+
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
+{
+ struct of_endpoint ep;
+ struct device_node *ep_node, *panel_node;
+ int ret;
+
+ /* This is for backward compatibility */
+ panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+ if (panel_node) {
+ fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!fsl_dev->connector.panel)
+ return -EPROBE_DEFER;
+ return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+ }
+
+ ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL);
+ if (!ep_node)
+ return -ENODEV;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ of_node_put(ep_node);
+ if (ret)
+ return -ENODEV;
+
+ return fsl_dcu_attach_endpoint(fsl_dev, &ep);
+}
goto err_node_put;
}
+ of_node_put(np);
clk_prepare_enable(tcon->ipg_clk);
dev_info(dev, "Using TCON in bypass mode\n");
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
- select FB_CFB_COPYAREA
- select FB_CFB_FILLRECT
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select HISI_KIRIN_DW_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
If M is selected the module will be called kirin-drm.
ade_set_medianoc_qos(acrtc);
ade_display_enable(acrtc);
ade_dump_regs(ctx->base);
+ drm_crtc_vblank_on(crtc);
acrtc->enable = true;
}
if (!acrtc->enable)
return;
+ drm_crtc_vblank_off(crtc);
ade_power_down(ctx);
acrtc->enable = false;
}
}
ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
- if (!ctx->ade_core_clk) {
+ if (IS_ERR(ctx->ade_core_clk)) {
DRM_ERROR("failed to parse clk ADE_CORE\n");
- return -ENODEV;
+ return PTR_ERR(ctx->ade_core_clk);
}
ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
- if (!ctx->media_noc_clk) {
+ if (IS_ERR(ctx->media_noc_clk)) {
DRM_ERROR("failed to parse clk CODEC_JPEG\n");
- return -ENODEV;
+ return PTR_ERR(ctx->media_noc_clk);
}
ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
- if (!ctx->ade_pix_clk) {
+ if (IS_ERR(ctx->ade_pix_clk)) {
DRM_ERROR("failed to parse clk ADE_PIX\n");
- return -ENODEV;
+ return PTR_ERR(ctx->ade_pix_clk);
}
return 0;
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
-config DRM_I2C_ADV7511
- tristate "AV7511 encoder"
- select REGMAP_I2C
- help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
-
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
default m if DRM_NOUVEAU
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
-
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
+++ /dev/null
-/*
- * Analog Devices ADV7511 HDMI transmitter driver
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
-
-#include "adv7511.h"
-
-struct adv7511 {
- struct i2c_client *i2c_main;
- struct i2c_client *i2c_edid;
-
- struct regmap *regmap;
- struct regmap *packet_memory_regmap;
- enum drm_connector_status status;
- bool powered;
-
- unsigned int f_tmds;
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
- bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-
- bool embedded_sync;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
- bool rgb;
-
- struct edid *edid;
-
- struct gpio_desc *gpio_pd;
-};
-
-static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
-{
- return to_encoder_slave(encoder)->slave_priv;
-}
-
-/* ADI recommended values for proper operation. */
-static const struct reg_sequence adv7511_fixed_registers[] = {
- { 0x98, 0x03 },
- { 0x9a, 0xe0 },
- { 0x9c, 0x30 },
- { 0x9d, 0x61 },
- { 0xa2, 0xa4 },
- { 0xa3, 0xa4 },
- { 0xe0, 0xd0 },
- { 0xf9, 0x00 },
- { 0x55, 0x02 },
-};
-
-/* -----------------------------------------------------------------------------
- * Register access
- */
-
-static const uint8_t adv7511_register_defaults[] = {
- 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
- 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
- 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
- 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
- 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
- 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
- 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
- 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
- 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
- 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
- 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
- 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
- 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
- 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
- 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
- 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case ADV7511_REG_CHIP_REVISION:
- case ADV7511_REG_SPDIF_FREQ:
- case ADV7511_REG_CTS_AUTOMATIC1:
- case ADV7511_REG_CTS_AUTOMATIC2:
- case ADV7511_REG_VIC_DETECTED:
- case ADV7511_REG_VIC_SEND:
- case ADV7511_REG_AUX_VIC_DETECTED:
- case ADV7511_REG_STATUS:
- case ADV7511_REG_GC(1):
- case ADV7511_REG_INT(0):
- case ADV7511_REG_INT(1):
- case ADV7511_REG_PLL_STATUS:
- case ADV7511_REG_AN(0):
- case ADV7511_REG_AN(1):
- case ADV7511_REG_AN(2):
- case ADV7511_REG_AN(3):
- case ADV7511_REG_AN(4):
- case ADV7511_REG_AN(5):
- case ADV7511_REG_AN(6):
- case ADV7511_REG_AN(7):
- case ADV7511_REG_HDCP_STATUS:
- case ADV7511_REG_BCAPS:
- case ADV7511_REG_BKSV(0):
- case ADV7511_REG_BKSV(1):
- case ADV7511_REG_BKSV(2):
- case ADV7511_REG_BKSV(3):
- case ADV7511_REG_BKSV(4):
- case ADV7511_REG_DDC_STATUS:
- case ADV7511_REG_EDID_READ_CTRL:
- case ADV7511_REG_BSTATUS(0):
- case ADV7511_REG_BSTATUS(1):
- case ADV7511_REG_CHIP_ID_HIGH:
- case ADV7511_REG_CHIP_ID_LOW:
- return true;
- }
-
- return false;
-}
-
-static const struct regmap_config adv7511_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = 0xff,
- .cache_type = REGCACHE_RBTREE,
- .reg_defaults_raw = adv7511_register_defaults,
- .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
-
- .volatile_reg = adv7511_register_volatile,
-};
-
-/* -----------------------------------------------------------------------------
- * Hardware configuration
- */
-
-static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
- const uint16_t *coeff,
- unsigned int scaling_factor)
-{
- unsigned int i;
-
- regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
- ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
-
- if (enable) {
- for (i = 0; i < 12; ++i) {
- regmap_update_bits(adv7511->regmap,
- ADV7511_REG_CSC_UPPER(i),
- 0x1f, coeff[i] >> 8);
- regmap_write(adv7511->regmap,
- ADV7511_REG_CSC_LOWER(i),
- coeff[i] & 0xff);
- }
- }
-
- if (enable)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
- 0xe0, 0x80 | (scaling_factor << 5));
- else
- regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
- 0x80, 0x00);
-
- regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
- ADV7511_CSC_UPDATE_MODE, 0);
-}
-
-static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
-{
- if (packet & 0xff)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
- packet, 0xff);
-
- if (packet & 0xff00) {
- packet >>= 8;
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
- packet, 0xff);
- }
-
- return 0;
-}
-
-static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
-{
- if (packet & 0xff)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
- packet, 0x00);
-
- if (packet & 0xff00) {
- packet >>= 8;
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
- packet, 0x00);
- }
-
- return 0;
-}
-
-/* Coefficients for adv7511 color space conversion */
-static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
- 0x0734, 0x04ad, 0x0000, 0x1c1b,
- 0x1ddc, 0x04ad, 0x1f24, 0x0135,
- 0x0000, 0x04ad, 0x087c, 0x1b77,
-};
-
-static void adv7511_set_config_csc(struct adv7511 *adv7511,
- struct drm_connector *connector,
- bool rgb)
-{
- struct adv7511_video_config config;
- bool output_format_422, output_format_ycbcr;
- unsigned int mode;
- uint8_t infoframe[17];
-
- if (adv7511->edid)
- config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
- else
- config.hdmi_mode = false;
-
- hdmi_avi_infoframe_init(&config.avi_infoframe);
-
- config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
-
- if (rgb) {
- config.csc_enable = false;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- } else {
- config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
- config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
-
- if ((connector->display_info.color_formats &
- DRM_COLOR_FORMAT_YCRCB422) &&
- config.hdmi_mode) {
- config.csc_enable = false;
- config.avi_infoframe.colorspace =
- HDMI_COLORSPACE_YUV422;
- } else {
- config.csc_enable = true;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- }
- }
-
- if (config.hdmi_mode) {
- mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
- switch (config.avi_infoframe.colorspace) {
- case HDMI_COLORSPACE_YUV444:
- output_format_422 = false;
- output_format_ycbcr = true;
- break;
- case HDMI_COLORSPACE_YUV422:
- output_format_422 = true;
- output_format_ycbcr = true;
- break;
- default:
- output_format_422 = false;
- output_format_ycbcr = false;
- break;
- }
- } else {
- mode = ADV7511_HDMI_CFG_MODE_DVI;
- output_format_422 = false;
- output_format_ycbcr = false;
- }
-
- adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
-
- adv7511_set_colormap(adv7511, config.csc_enable,
- config.csc_coefficents,
- config.csc_scaling_factor);
-
- regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
- (output_format_422 << 7) | output_format_ycbcr);
-
- regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
- ADV7511_HDMI_CFG_MODE_MASK, mode);
-
- hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
- sizeof(infoframe));
-
- /* The AVI infoframe id is not configurable */
- regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
- infoframe + 1, sizeof(infoframe) - 1);
-
- adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
-}
-
-static void adv7511_set_link_config(struct adv7511 *adv7511,
- const struct adv7511_link_config *config)
-{
- /*
- * The input style values documented in the datasheet don't match the
- * hardware register field values :-(
- */
- static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
-
- unsigned int clock_delay;
- unsigned int color_depth;
- unsigned int input_id;
-
- clock_delay = (config->clock_delay + 1200) / 400;
- color_depth = config->input_color_depth == 8 ? 3
- : (config->input_color_depth == 10 ? 1 : 2);
-
- /* TODO Support input ID 6 */
- if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
- input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
- ? 5 : 0;
- else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
- input_id = config->embedded_sync ? 8 : 7;
- else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
- input_id = config->embedded_sync ? 4 : 3;
- else
- input_id = config->embedded_sync ? 2 : 1;
-
- regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
- input_id);
- regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
- (color_depth << 4) |
- (input_styles[config->input_style] << 2));
- regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
- config->input_justification << 3);
- regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
- config->sync_pulse << 2);
-
- regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
-
- adv7511->embedded_sync = config->embedded_sync;
- adv7511->hsync_polarity = config->hsync_polarity;
- adv7511->vsync_polarity = config->vsync_polarity;
- adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
-}
-
-static void adv7511_power_on(struct adv7511 *adv7511)
-{
- adv7511->current_edid_segment = -1;
-
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- if (adv7511->i2c_main->irq) {
- /*
- * Documentation says the INT_ENABLE registers are reset in
- * POWER_DOWN mode. My 7511w preserved the bits, however.
- * Still, let's be safe and stick to the documentation.
- */
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
- }
-
- /*
- * Per spec it is allowed to pulse the HPD signal to indicate that the
- * EDID information has changed. Some monitors do this when they wakeup
- * from standby or are enabled. When the HPD goes low the adv7511 is
- * reset and the outputs are disabled which might cause the monitor to
- * go to standby again. To avoid this we ignore the HPD pin for the
- * first few seconds after enabling the output.
- */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HPD_SRC_MASK,
- ADV7511_REG_POWER2_HPD_SRC_NONE);
-
- /*
- * Most of the registers are reset during power down or when HPD is low.
- */
- regcache_sync(adv7511->regmap);
-
- adv7511->powered = true;
-}
-
-static void adv7511_power_off(struct adv7511 *adv7511)
-{
- /* TODO: setup additional power down modes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
- regcache_mark_dirty(adv7511->regmap);
-
- adv7511->powered = false;
-}
-
-/* -----------------------------------------------------------------------------
- * Interrupt and hotplug detection
- */
-
-static bool adv7511_hpd(struct adv7511 *adv7511)
-{
- unsigned int irq0;
- int ret;
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
- if (ret < 0)
- return false;
-
- if (irq0 & ADV7511_INT0_HPD) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_HPD);
- return true;
- }
-
- return false;
-}
-
-static int adv7511_irq_process(struct adv7511 *adv7511)
-{
- unsigned int irq0, irq1;
- int ret;
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
- if (ret < 0)
- return ret;
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
- if (ret < 0)
- return ret;
-
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
-
- if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
-
- if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
- adv7511->edid_read = true;
-
- if (adv7511->i2c_main->irq)
- wake_up_all(&adv7511->wq);
- }
-
- return 0;
-}
-
-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-{
- struct adv7511 *adv7511 = devid;
- int ret;
-
- ret = adv7511_irq_process(adv7511);
- return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
-}
-
-/* -----------------------------------------------------------------------------
- * EDID retrieval
- */
-
-static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
-{
- int ret;
-
- if (adv7511->i2c_main->irq) {
- ret = wait_event_interruptible_timeout(adv7511->wq,
- adv7511->edid_read, msecs_to_jiffies(timeout));
- } else {
- for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
- if (ret < 0)
- break;
-
- if (adv7511->edid_read)
- break;
-
- msleep(25);
- }
- }
-
- return adv7511->edid_read ? 0 : -EIO;
-}
-
-static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
- size_t len)
-{
- struct adv7511 *adv7511 = data;
- struct i2c_msg xfer[2];
- uint8_t offset;
- unsigned int i;
- int ret;
-
- if (len > 128)
- return -EINVAL;
-
- if (adv7511->current_edid_segment != block / 2) {
- unsigned int status;
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
- &status);
- if (ret < 0)
- return ret;
-
- if (status != 2) {
- adv7511->edid_read = false;
- regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
- block);
- ret = adv7511_wait_for_edid(adv7511, 200);
- if (ret < 0)
- return ret;
- }
-
- /* Break this apart, hopefully more I2C controllers will
- * support 64 byte transfers than 256 byte transfers
- */
-
- xfer[0].addr = adv7511->i2c_edid->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &offset;
- xfer[1].addr = adv7511->i2c_edid->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = 64;
- xfer[1].buf = adv7511->edid_buf;
-
- offset = 0;
-
- for (i = 0; i < 4; ++i) {
- ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
- ARRAY_SIZE(xfer));
- if (ret < 0)
- return ret;
- else if (ret != 2)
- return -EIO;
-
- xfer[1].buf += 64;
- offset += 64;
- }
-
- adv7511->current_edid_segment = block / 2;
- }
-
- if (block % 2 == 0)
- memcpy(buf, adv7511->edid_buf, len);
- else
- memcpy(buf, adv7511->edid_buf + 128, len);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * Encoder operations
- */
-
-static int adv7511_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
- struct edid *edid;
- unsigned int count;
-
- /* Reading the EDID only works if the device is powered */
- if (!adv7511->powered) {
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- if (adv7511->i2c_main->irq) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
- }
- adv7511->current_edid_segment = -1;
- }
-
- edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
-
- if (!adv7511->powered)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
-
- kfree(adv7511->edid);
- adv7511->edid = edid;
- if (!edid)
- return 0;
-
- drm_mode_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
-
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
-
- return count;
-}
-
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- adv7511_power_on(adv7511);
- else
- adv7511_power_off(adv7511);
-}
-
-static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
- enum drm_connector_status status;
- unsigned int val;
- bool hpd;
- int ret;
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
- if (ret < 0)
- return connector_status_disconnected;
-
- if (val & ADV7511_STATUS_HPD)
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- hpd = adv7511_hpd(adv7511);
-
- /* The chip resets itself when the cable is disconnected, so in case
- * there is a pending HPD interrupt and the cable is connected there was
- * at least one transition from disconnected to connected and the chip
- * has to be reinitialized. */
- if (status == connector_status_connected && hpd && adv7511->powered) {
- regcache_mark_dirty(adv7511->regmap);
- adv7511_power_on(adv7511);
- adv7511_get_modes(encoder, connector);
- if (adv7511->status == connector_status_connected)
- status = connector_status_disconnected;
- } else {
- /* Renable HPD sensing */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
- ADV7511_REG_POWER2_HPD_SRC_MASK,
- ADV7511_REG_POWER2_HPD_SRC_BOTH);
- }
-
- adv7511->status = status;
- return status;
-}
-
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
- unsigned int low_refresh_rate;
- unsigned int hsync_polarity = 0;
- unsigned int vsync_polarity = 0;
-
- if (adv7511->embedded_sync) {
- unsigned int hsync_offset, hsync_len;
- unsigned int vsync_offset, vsync_len;
-
- hsync_offset = adj_mode->crtc_hsync_start -
- adj_mode->crtc_hdisplay;
- vsync_offset = adj_mode->crtc_vsync_start -
- adj_mode->crtc_vdisplay;
- hsync_len = adj_mode->crtc_hsync_end -
- adj_mode->crtc_hsync_start;
- vsync_len = adj_mode->crtc_vsync_end -
- adj_mode->crtc_vsync_start;
-
- /* The hardware vsync generator has a off-by-one bug */
- vsync_offset += 1;
-
- regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
- ((hsync_offset >> 10) & 0x7) << 5);
- regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
- (hsync_offset >> 2) & 0xff);
- regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
- ((hsync_offset & 0x3) << 6) |
- ((hsync_len >> 4) & 0x3f));
- regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
- ((hsync_len & 0xf) << 4) |
- ((vsync_offset >> 6) & 0xf));
- regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
- ((vsync_offset & 0x3f) << 2) |
- ((vsync_len >> 8) & 0x3));
- regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
- vsync_len & 0xff);
-
- hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
- vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
- } else {
- enum adv7511_sync_polarity mode_hsync_polarity;
- enum adv7511_sync_polarity mode_vsync_polarity;
-
- /**
- * If the input signal is always low or always high we want to
- * invert or let it passthrough depending on the polarity of the
- * current mode.
- **/
- if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
- else
- mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
-
- if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
- else
- mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
-
- if (adv7511->hsync_polarity != mode_hsync_polarity &&
- adv7511->hsync_polarity !=
- ADV7511_SYNC_POLARITY_PASSTHROUGH)
- hsync_polarity = 1;
-
- if (adv7511->vsync_polarity != mode_vsync_polarity &&
- adv7511->vsync_polarity !=
- ADV7511_SYNC_POLARITY_PASSTHROUGH)
- vsync_polarity = 1;
- }
-
- if (mode->vrefresh <= 24000)
- low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (mode->vrefresh <= 25000)
- low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (mode->vrefresh <= 30000)
- low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
- else
- low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
-
- regmap_update_bits(adv7511->regmap, 0xfb,
- 0x6, low_refresh_rate << 1);
- regmap_update_bits(adv7511->regmap, 0x17,
- 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
-
- /*
- * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
- * supposed to give better results.
- */
-
- adv7511->f_tmds = mode->clock;
-}
-
-static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
- .dpms = adv7511_encoder_dpms,
- .mode_valid = adv7511_encoder_mode_valid,
- .mode_set = adv7511_encoder_mode_set,
- .detect = adv7511_encoder_detect,
- .get_modes = adv7511_get_modes,
-};
-
-/* -----------------------------------------------------------------------------
- * Probe & remove
- */
-
-static int adv7511_parse_dt(struct device_node *np,
- struct adv7511_link_config *config)
-{
- const char *str;
- int ret;
-
- memset(config, 0, sizeof(*config));
-
- of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
- if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
- config->input_color_depth != 12)
- return -EINVAL;
-
- ret = of_property_read_string(np, "adi,input-colorspace", &str);
- if (ret < 0)
- return ret;
-
- if (!strcmp(str, "rgb"))
- config->input_colorspace = HDMI_COLORSPACE_RGB;
- else if (!strcmp(str, "yuv422"))
- config->input_colorspace = HDMI_COLORSPACE_YUV422;
- else if (!strcmp(str, "yuv444"))
- config->input_colorspace = HDMI_COLORSPACE_YUV444;
- else
- return -EINVAL;
-
- ret = of_property_read_string(np, "adi,input-clock", &str);
- if (ret < 0)
- return ret;
-
- if (!strcmp(str, "1x"))
- config->input_clock = ADV7511_INPUT_CLOCK_1X;
- else if (!strcmp(str, "2x"))
- config->input_clock = ADV7511_INPUT_CLOCK_2X;
- else if (!strcmp(str, "ddr"))
- config->input_clock = ADV7511_INPUT_CLOCK_DDR;
- else
- return -EINVAL;
-
- if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
- config->input_clock != ADV7511_INPUT_CLOCK_1X) {
- ret = of_property_read_u32(np, "adi,input-style",
- &config->input_style);
- if (ret)
- return ret;
-
- if (config->input_style < 1 || config->input_style > 3)
- return -EINVAL;
-
- ret = of_property_read_string(np, "adi,input-justification",
- &str);
- if (ret < 0)
- return ret;
-
- if (!strcmp(str, "left"))
- config->input_justification =
- ADV7511_INPUT_JUSTIFICATION_LEFT;
- else if (!strcmp(str, "evenly"))
- config->input_justification =
- ADV7511_INPUT_JUSTIFICATION_EVENLY;
- else if (!strcmp(str, "right"))
- config->input_justification =
- ADV7511_INPUT_JUSTIFICATION_RIGHT;
- else
- return -EINVAL;
-
- } else {
- config->input_style = 1;
- config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
- }
-
- of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
- if (config->clock_delay < -1200 || config->clock_delay > 1600)
- return -EINVAL;
-
- config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
-
- /* Hardcode the sync pulse configurations for now. */
- config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
- config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
- config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
-
- return 0;
-}
-
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
-static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
-{
- struct adv7511_link_config link_config;
- struct adv7511 *adv7511;
- struct device *dev = &i2c->dev;
- unsigned int val;
- int ret;
-
- if (!dev->of_node)
- return -EINVAL;
-
- adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
- if (!adv7511)
- return -ENOMEM;
-
- adv7511->powered = false;
- adv7511->status = connector_status_disconnected;
-
- ret = adv7511_parse_dt(dev->of_node, &link_config);
- if (ret)
- return ret;
-
- /*
- * The power down GPIO is optional. If present, toggle it from active to
- * inactive to wake up the encoder.
- */
- adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
- if (IS_ERR(adv7511->gpio_pd))
- return PTR_ERR(adv7511->gpio_pd);
-
- if (adv7511->gpio_pd) {
- mdelay(5);
- gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
- }
-
- adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
- if (IS_ERR(adv7511->regmap))
- return PTR_ERR(adv7511->regmap);
-
- ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
- if (ret)
- return ret;
- dev_dbg(dev, "Rev. %d\n", val);
-
- ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
- ARRAY_SIZE(adv7511_fixed_registers));
- if (ret)
- return ret;
-
- regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
- regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
- packet_i2c_addr);
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
- adv7511_packet_disable(adv7511, 0xffff);
-
- adv7511->i2c_main = i2c;
- adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
- if (!adv7511->i2c_edid)
- return -ENOMEM;
-
- if (i2c->irq) {
- init_waitqueue_head(&adv7511->wq);
-
- ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
- adv7511_irq_handler,
- IRQF_ONESHOT, dev_name(dev),
- adv7511);
- if (ret)
- goto err_i2c_unregister_device;
- }
-
- /* CEC is unused for now */
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
-
- adv7511_power_off(adv7511);
-
- i2c_set_clientdata(i2c, adv7511);
-
- adv7511_set_link_config(adv7511, &link_config);
-
- return 0;
-
-err_i2c_unregister_device:
- i2c_unregister_device(adv7511->i2c_edid);
-
- return ret;
-}
-
-static int adv7511_remove(struct i2c_client *i2c)
-{
- struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
-
- i2c_unregister_device(adv7511->i2c_edid);
-
- kfree(adv7511->edid);
-
- return 0;
-}
-
-static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
- struct drm_encoder_slave *encoder)
-{
-
- struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
-
- encoder->slave_priv = adv7511;
- encoder->slave_funcs = &adv7511_encoder_funcs;
-
- adv7511->encoder = &encoder->base;
-
- return 0;
-}
-
-static const struct i2c_device_id adv7511_i2c_ids[] = {
- { "adv7511", 0 },
- { "adv7511w", 0 },
- { "adv7513", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
-
-static const struct of_device_id adv7511_of_ids[] = {
- { .compatible = "adi,adv7511", },
- { .compatible = "adi,adv7511w", },
- { .compatible = "adi,adv7513", },
- { }
-};
-MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-
-static struct drm_i2c_encoder_driver adv7511_driver = {
- .i2c_driver = {
- .driver = {
- .name = "adv7511",
- .of_match_table = adv7511_of_ids,
- },
- .id_table = adv7511_i2c_ids,
- .probe = adv7511_probe,
- .remove = adv7511_remove,
- },
-
- .encoder_init = adv7511_encoder_init,
-};
-
-static int __init adv7511_init(void)
-{
- return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
-}
-module_init(adv7511_init);
-
-static void __exit adv7511_exit(void)
-{
- drm_i2c_encoder_unregister(&adv7511_driver);
-}
-module_exit(adv7511_exit);
-
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
-MODULE_LICENSE("GPL");
+++ /dev/null
-/*
- * Analog Devices ADV7511 HDMI transmitter driver
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef __DRM_I2C_ADV7511_H__
-#define __DRM_I2C_ADV7511_H__
-
-#include <linux/hdmi.h>
-
-#define ADV7511_REG_CHIP_REVISION 0x00
-#define ADV7511_REG_N0 0x01
-#define ADV7511_REG_N1 0x02
-#define ADV7511_REG_N2 0x03
-#define ADV7511_REG_SPDIF_FREQ 0x04
-#define ADV7511_REG_CTS_AUTOMATIC1 0x05
-#define ADV7511_REG_CTS_AUTOMATIC2 0x06
-#define ADV7511_REG_CTS_MANUAL0 0x07
-#define ADV7511_REG_CTS_MANUAL1 0x08
-#define ADV7511_REG_CTS_MANUAL2 0x09
-#define ADV7511_REG_AUDIO_SOURCE 0x0a
-#define ADV7511_REG_AUDIO_CONFIG 0x0b
-#define ADV7511_REG_I2S_CONFIG 0x0c
-#define ADV7511_REG_I2S_WIDTH 0x0d
-#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
-#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
-#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
-#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
-#define ADV7511_REG_AUDIO_CFG1 0x12
-#define ADV7511_REG_AUDIO_CFG2 0x13
-#define ADV7511_REG_AUDIO_CFG3 0x14
-#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
-#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
-#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
-#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
-#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
-#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
-#define ADV7511_REG_PIXEL_REPETITION 0x3b
-#define ADV7511_REG_VIC_MANUAL 0x3c
-#define ADV7511_REG_VIC_SEND 0x3d
-#define ADV7511_REG_VIC_DETECTED 0x3e
-#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
-#define ADV7511_REG_PACKET_ENABLE0 0x40
-#define ADV7511_REG_POWER 0x41
-#define ADV7511_REG_STATUS 0x42
-#define ADV7511_REG_EDID_I2C_ADDR 0x43
-#define ADV7511_REG_PACKET_ENABLE1 0x44
-#define ADV7511_REG_PACKET_I2C_ADDR 0x45
-#define ADV7511_REG_DSD_ENABLE 0x46
-#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
-#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
-#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
-#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
-#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
-#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
-#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
-#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
-#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
-#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
-#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
-#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
-#define ADV7511_REG_INT(x) (0x96 + (x))
-#define ADV7511_REG_INPUT_CLK_DIV 0x9d
-#define ADV7511_REG_PLL_STATUS 0x9e
-#define ADV7511_REG_HDMI_POWER 0xa1
-#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
-#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
-#define ADV7511_REG_HDCP_STATUS 0xb8
-#define ADV7511_REG_BCAPS 0xbe
-#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
-#define ADV7511_REG_EDID_SEGMENT 0xc4
-#define ADV7511_REG_DDC_STATUS 0xc8
-#define ADV7511_REG_EDID_READ_CTRL 0xc9
-#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
-#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
-#define ADV7511_REG_POWER2 0xd6
-#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
-
-#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
-#define ADV7511_REG_TMDS_CLOCK_INV 0xde
-#define ADV7511_REG_ARC_CTRL 0xdf
-#define ADV7511_REG_CEC_I2C_ADDR 0xe1
-#define ADV7511_REG_CEC_CTRL 0xe2
-#define ADV7511_REG_CHIP_ID_HIGH 0xf5
-#define ADV7511_REG_CHIP_ID_LOW 0xf6
-
-#define ADV7511_CSC_ENABLE BIT(7)
-#define ADV7511_CSC_UPDATE_MODE BIT(5)
-
-#define ADV7511_INT0_HPD BIT(7)
-#define ADV7511_INT0_VSYNC BIT(5)
-#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
-#define ADV7511_INT0_EDID_READY BIT(2)
-#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
-
-#define ADV7511_INT1_DDC_ERROR BIT(7)
-#define ADV7511_INT1_BKSV BIT(6)
-#define ADV7511_INT1_CEC_TX_READY BIT(5)
-#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
-#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
-#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
-#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
-#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
-
-#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
-
-#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
-
-#define ADV7511_POWER_POWER_DOWN BIT(6)
-
-#define ADV7511_HDMI_CFG_MODE_MASK 0x2
-#define ADV7511_HDMI_CFG_MODE_DVI 0x0
-#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
-
-#define ADV7511_AUDIO_SELECT_I2C 0x0
-#define ADV7511_AUDIO_SELECT_SPDIF 0x1
-#define ADV7511_AUDIO_SELECT_DSD 0x2
-#define ADV7511_AUDIO_SELECT_HBR 0x3
-#define ADV7511_AUDIO_SELECT_DST 0x4
-
-#define ADV7511_I2S_SAMPLE_LEN_16 0x2
-#define ADV7511_I2S_SAMPLE_LEN_20 0x3
-#define ADV7511_I2S_SAMPLE_LEN_18 0x4
-#define ADV7511_I2S_SAMPLE_LEN_22 0x5
-#define ADV7511_I2S_SAMPLE_LEN_19 0x8
-#define ADV7511_I2S_SAMPLE_LEN_23 0x9
-#define ADV7511_I2S_SAMPLE_LEN_24 0xb
-#define ADV7511_I2S_SAMPLE_LEN_17 0xc
-#define ADV7511_I2S_SAMPLE_LEN_21 0xd
-
-#define ADV7511_SAMPLE_FREQ_44100 0x0
-#define ADV7511_SAMPLE_FREQ_48000 0x2
-#define ADV7511_SAMPLE_FREQ_32000 0x3
-#define ADV7511_SAMPLE_FREQ_88200 0x8
-#define ADV7511_SAMPLE_FREQ_96000 0xa
-#define ADV7511_SAMPLE_FREQ_176400 0xc
-#define ADV7511_SAMPLE_FREQ_192000 0xe
-
-#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
-#define ADV7511_STATUS_HPD BIT(6)
-#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
-#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
-
-#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
-#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
-#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
-#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
-#define ADV7511_PACKET_ENABLE_GC BIT(7)
-#define ADV7511_PACKET_ENABLE_SPD BIT(6)
-#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
-#define ADV7511_PACKET_ENABLE_ACP BIT(4)
-#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
-#define ADV7511_PACKET_ENABLE_GM BIT(2)
-#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
-#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
-
-#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
-#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
-#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
-#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
-#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
-#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
-#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
-
-#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
-#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
-#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
-#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
-
-#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
-#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
-
-#define ADV7511_AUDIO_SOURCE_I2S 0
-#define ADV7511_AUDIO_SOURCE_SPDIF 1
-
-#define ADV7511_I2S_FORMAT_I2S 0
-#define ADV7511_I2S_FORMAT_RIGHT_J 1
-#define ADV7511_I2S_FORMAT_LEFT_J 2
-
-#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
-#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
-#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
-#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
-#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
-#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
-#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
-#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
-
-enum adv7511_input_clock {
- ADV7511_INPUT_CLOCK_1X,
- ADV7511_INPUT_CLOCK_2X,
- ADV7511_INPUT_CLOCK_DDR,
-};
-
-enum adv7511_input_justification {
- ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
- ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
- ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
-};
-
-enum adv7511_input_sync_pulse {
- ADV7511_INPUT_SYNC_PULSE_DE = 0,
- ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
- ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
- ADV7511_INPUT_SYNC_PULSE_NONE = 3,
-};
-
-/**
- * enum adv7511_sync_polarity - Polarity for the input sync signals
- * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
- * the currently configured mode.
- * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
- * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
- *
- * If the polarity is set to either LOW or HIGH the driver will configure the
- * ADV7511 to internally invert the sync signal if required to match the sync
- * polarity setting for the currently selected output mode.
- *
- * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
- * unchanged. This is used when the upstream graphics core already generates
- * the sync signals with the correct polarity.
- */
-enum adv7511_sync_polarity {
- ADV7511_SYNC_POLARITY_PASSTHROUGH,
- ADV7511_SYNC_POLARITY_LOW,
- ADV7511_SYNC_POLARITY_HIGH,
-};
-
-/**
- * struct adv7511_link_config - Describes adv7511 hardware configuration
- * @input_color_depth: Number of bits per color component (8, 10 or 12)
- * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
- * @input_clock: The input video clock style (1x, 2x, DDR)
- * @input_style: The input component arrangement variant
- * @input_justification: Video input format bit justification
- * @clock_delay: Clock delay for the input clock (in ps)
- * @embedded_sync: Video input uses BT.656-style embedded sync
- * @sync_pulse: Select the sync pulse
- * @vsync_polarity: vsync input signal configuration
- * @hsync_polarity: hsync input signal configuration
- */
-struct adv7511_link_config {
- unsigned int input_color_depth;
- enum hdmi_colorspace input_colorspace;
- enum adv7511_input_clock input_clock;
- unsigned int input_style;
- enum adv7511_input_justification input_justification;
-
- int clock_delay;
-
- bool embedded_sync;
- enum adv7511_input_sync_pulse sync_pulse;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
-};
-
-/**
- * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
- * @ADV7511_CSC_SCALING_1: CSC results are not scaled
- * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
- * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
- */
-enum adv7511_csc_scaling {
- ADV7511_CSC_SCALING_1 = 0,
- ADV7511_CSC_SCALING_2 = 1,
- ADV7511_CSC_SCALING_4 = 2,
-};
-
-/**
- * struct adv7511_video_config - Describes adv7511 hardware configuration
- * @csc_enable: Whether to enable color space conversion
- * @csc_scaling_factor: Color space conversion scaling factor
- * @csc_coefficents: Color space conversion coefficents
- * @hdmi_mode: Whether to use HDMI or DVI output mode
- * @avi_infoframe: HDMI infoframe
- */
-struct adv7511_video_config {
- bool csc_enable;
- enum adv7511_csc_scaling csc_scaling_factor;
- const uint16_t *csc_coefficents;
-
- bool hdmi_mode;
- struct hdmi_avi_infoframe avi_infoframe;
-};
-
-#endif /* __DRM_I2C_ADV7511_H__ */
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
- if (crtc) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
+ if (crtc)
+ drm_crtc_force_disable(crtc);
}
return 0;
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
* be moved to FW_FAILED.
*/
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
-#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
ret = intel_color_check(crtc, crtc_state);
if (ret)
return ret;
+
+ /*
+ * Changing color management on Intel hardware is
+ * handled as part of planes update.
+ */
+ crtc_state->planes_changed = true;
}
ret = 0;
intel_dp->detect_done = false;
- if (intel_connector->detect_edid)
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
return connector_status_connected;
else
return connector_status_disconnected;
uint32_t *const batch,
uint32_t index)
{
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
- IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
uint32_t *offset)
{
int ret;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
uint32_t scratch_addr
= engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
return -ENODEV;
}
+ /*
+ * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+ * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+ * vswing instead. Low vswing results in some display flickers, so
+ * let's simply ignore the OpRegion panel type on SKL for now.
+ */
+ if (IS_SKYLAKE(dev_priv)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
return ret - 1;
}
static void gen9_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
I915_WRITE(CHICKEN_PAR1_1,
static void kabylake_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
gen9_init_clock_gating(dev);
static void skylake_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
gen9_init_clock_gating(dev);
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
struct regmap *regmap;
};
+static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_hdmi, encoder);
+}
+
static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
{
45250000, {
{
}
-static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
-}
-
-static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
-{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
}
-static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
+static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
}
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
- .mode_set = dw_hdmi_imx_encoder_mode_set,
- .prepare = dw_hdmi_imx_encoder_prepare,
- .commit = dw_hdmi_imx_encoder_commit,
+ .enable = dw_hdmi_imx_encoder_enable,
.disable = dw_hdmi_imx_encoder_disable,
+ .atomic_check = dw_hdmi_imx_atomic_check,
};
static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
*/
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reservation.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
struct imx_drm_crtc *crtc[MAX_CRTC];
unsigned int pipes;
struct drm_fbdev_cma *fbhelper;
+ struct drm_atomic_state *state;
};
struct imx_drm_crtc {
return 0;
}
-static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
-{
- struct imx_drm_device *imxdrm = crtc->dev->dev_private;
- unsigned i;
-
- for (i = 0; i < MAX_CRTC; i++)
- if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
- return imxdrm->crtc[i];
-
- return NULL;
-}
-
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin, u32 bus_flags)
-{
- struct imx_drm_crtc_helper_funcs *helper;
- struct imx_drm_crtc *imx_crtc;
-
- imx_crtc = imx_drm_find_crtc(encoder->crtc);
- if (!imx_crtc)
- return -EINVAL;
-
- helper = &imx_crtc->imx_drm_helper_funcs;
- if (helper->set_interface_pix_fmt)
- return helper->set_interface_pix_fmt(encoder->crtc,
- bus_format, hsync_pin, vsync_pin,
- bus_flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
-
-int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
-{
- return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
- DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE);
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
-
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
return drm_crtc_vblank_get(imx_drm_crtc->crtc);
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_gem_cma_object *cma_obj;
+ struct fence *excl;
+ unsigned shared_count;
+ struct fence **shared;
+ unsigned int i, j;
+ int ret;
+
+ /* Wait for fences. */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ plane_state = crtc->primary->state;
+ if (plane_state->fb) {
+ cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
+ if (cma_obj->base.dma_buf) {
+ ret = reservation_object_get_fences_rcu(
+ cma_obj->base.dma_buf->resv, &excl,
+ &shared_count, &shared);
+ if (unlikely(ret))
+ DRM_ERROR("failed to get fences "
+ "for buffer\n");
+
+ if (excl) {
+ fence_wait(excl, false);
+ fence_put(excl);
+ }
+ for (j = 0; j < shared_count; i++) {
+ fence_wait(shared[j], false);
+ fence_put(shared[j]);
+ }
+ }
+ }
+ }
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, true);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
+ .atomic_commit_tail = imx_drm_atomic_commit_tail,
};
/*
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
drm_mode_config_init(drm);
}
}
+ drm_mode_config_reset(drm);
+
/*
* All components are now initialised, so setup the fb helper.
* The fb helper takes copies of key hardware information, so the
dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16;
}
- drm_helper_disable_unused_functions(drm);
imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
drm->mode_config.num_crtc, MAX_CRTC);
if (IS_ERR(imxdrm->fbhelper)) {
};
static struct drm_driver imx_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */
if (drm_dev == NULL)
drm_kms_helper_poll_disable(drm_dev);
+ imxdrm = drm_dev->dev_private;
+ imxdrm->state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(imxdrm->state)) {
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(imxdrm->state);
+ }
+
return 0;
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct imx_drm_device *imx_drm;
if (drm_dev == NULL)
return 0;
- drm_helper_resume_force_mode(drm_dev);
+ imx_drm = drm_dev->dev_private;
+ drm_atomic_helper_resume(drm_dev, imx_drm->state);
drm_kms_helper_poll_enable(drm_dev);
return 0;
unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
+struct imx_crtc_state {
+ struct drm_crtc_state base;
+ u32 bus_format;
+ u32 bus_flags;
+ int di_hsync_pin;
+ int di_vsync_pin;
+};
+
+static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct imx_crtc_state, base);
+}
+
struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
- int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin,
- u32 bus_flags);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin, u32 bus_flags);
-int imx_drm_set_bus_format(struct drm_encoder *encoder,
- u32 bus_format);
-
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
#include <linux/clk.h>
#include <linux/component.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
#define LDB_BGREF_RMODE_INT (1 << 15)
-#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
-#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
-
struct imx_ldb;
struct imx_ldb_channel {
int edid_len;
struct drm_display_mode mode;
int mode_valid;
- int bus_format;
+ u32 bus_format;
};
+static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
+{
+ return container_of(c, struct imx_ldb_channel, connector);
+}
+
+static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_ldb_channel, encoder);
+}
+
struct bus_mux {
int reg;
int shift;
return connector_status_connected;
}
+static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
+ u32 bus_format)
+{
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ }
+}
+
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
imx_ldb_ch->panel->funcs->get_modes) {
- struct drm_display_info *di = &connector->display_info;
-
num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
- if (!imx_ldb_ch->bus_format && di->num_bus_formats)
- imx_ldb_ch->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
return &imx_ldb_ch->encoder;
}
-static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
unsigned long serial_clk, unsigned long di_clk)
{
chno);
}
-static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
- struct imx_ldb *ldb = imx_ldb_ch->ldb;
- int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
- u32 bus_format;
-
- switch (imx_ldb_ch->bus_format) {
- default:
- dev_warn(ldb->dev,
- "could not determine data mapping, default to 18-bit \"spwg\"\n");
- /* fallthrough */
- case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
- bus_format = MEDIA_BUS_FMT_RGB666_1X18;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
- bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- if (imx_ldb_ch->chno == 0 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
- if (imx_ldb_ch->chno == 1 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
- bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- if (imx_ldb_ch->chno == 0 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
- LDB_BIT_MAP_CH0_JEIDA;
- if (imx_ldb_ch->chno == 1 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
- LDB_BIT_MAP_CH1_JEIDA;
- break;
- }
-
- imx_drm_set_bus_format(encoder, bus_format);
-}
-
-static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
+static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
+
clk_prepare_enable(ldb->clk[0]);
clk_prepare_enable(ldb->clk[1]);
+ } else {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
}
if (imx_ldb_ch == &ldb->channel[0] || dual) {
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ u32 bus_format = imx_ldb_ch->bus_format;
if (mode->clock > 170000) {
dev_warn(ldb->dev,
}
/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
- if (imx_ldb_ch == &ldb->channel[0]) {
+ if (imx_ldb_ch == &ldb->channel[0] || dual) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
}
- if (imx_ldb_ch == &ldb->channel[1]) {
+ if (imx_ldb_ch == &ldb->channel[1] || dual) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
}
+
+ if (!bus_format) {
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_connector_in_state(encoder->crtc->state->state,
+ connector, conn_state, i) {
+ struct drm_display_info *di = &connector->display_info;
+
+ if (conn_state->crtc == encoder->crtc &&
+ di->num_bus_formats) {
+ bus_format = di->bus_formats[0];
+ break;
+ }
+ }
+ }
+ imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
}
static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imx_ldb_ch->panel);
}
+static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ u32 bus_format = imx_ldb_ch->bus_format;
+
+ /* Bus format description in DT overrides connector display info. */
+ if (!bus_format && di->num_bus_formats)
+ bus_format = di->bus_formats[0];
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
+
static const struct drm_connector_funcs imx_ldb_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_ldb_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
};
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
- .dpms = imx_ldb_encoder_dpms,
- .prepare = imx_ldb_encoder_prepare,
- .commit = imx_ldb_encoder_commit,
.mode_set = imx_ldb_encoder_mode_set,
+ .enable = imx_ldb_encoder_enable,
.disable = imx_ldb_encoder_disable,
+ .atomic_check = imx_ldb_encoder_atomic_check,
};
static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
struct imx_ldb_channel *imx_ldb_ch)
{
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ struct drm_encoder *encoder = &imx_ldb_ch->encoder;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
- imx_ldb_ch->child);
+ ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
if (ret)
return ret;
return ret;
}
- drm_encoder_helper_add(&imx_ldb_ch->encoder,
- &imx_ldb_encoder_helper_funcs);
- drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+ drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
drm_connector_helper_add(&imx_ldb_ch->connector,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
- if (imx_ldb_ch->panel)
- drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+ if (imx_ldb_ch->panel) {
+ ret = drm_panel_attach(imx_ldb_ch->panel,
+ &imx_ldb_ch->connector);
+ if (ret)
+ return ret;
+ }
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
- &imx_ldb_ch->encoder);
+ drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
return 0;
}
struct imx_ldb_channel *channel;
struct device_node *ddc_node;
struct device_node *ep;
+ int bus_format;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
}
}
- channel->bus_format = of_get_bus_format(dev, child);
- if (channel->bus_format == -EINVAL) {
+ bus_format = of_get_bus_format(dev, child);
+ if (bus_format == -EINVAL) {
/*
* If no bus format was specified in the device tree,
* we can still get it from the connected panel later.
*/
if (channel->panel && channel->panel->funcs &&
channel->panel->funcs->get_modes)
- channel->bus_format = 0;
+ bus_format = 0;
}
- if (channel->bus_format < 0) {
+ if (bus_format < 0) {
dev_err(dev, "could not determine data mapping: %d\n",
- channel->bus_format);
- return channel->bus_format;
+ bus_format);
+ return bus_format;
}
+ channel->bus_format = bus_format;
ret = imx_ldb_register(drm, channel);
if (ret)
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <video/imx-ipu-v3.h>
/* TVE_TST_MODE_REG */
#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
-#define con_to_tve(x) container_of(x, struct imx_tve, connector)
-#define enc_to_tve(x) container_of(x, struct imx_tve, encoder)
-
enum {
TVE_MODE_TVOUT,
TVE_MODE_VGA,
spinlock_t lock; /* register lock */
bool enabled;
int mode;
+ int di_hsync_pin;
+ int di_vsync_pin;
struct regmap *regmap;
struct regulator *dac_reg;
struct clk *di_sel_clk;
struct clk_hw clk_hw_di;
struct clk *di_clk;
- int vsync_pin;
- int hsync_pin;
};
+static inline struct imx_tve *con_to_tve(struct drm_connector *c)
+{
+ return container_of(c, struct imx_tve, connector);
+}
+
+static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_tve, encoder);
+}
+
static void tve_lock(void *__tve)
__acquires(&tve->lock)
{
tve->enabled = true;
clk_prepare_enable(tve->clk);
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_IPU_CLK_EN | TVE_EN,
- TVE_IPU_CLK_EN | TVE_EN);
+ TVE_EN, TVE_EN);
}
/* clear interrupt status register */
if (tve->enabled) {
tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_IPU_CLK_EN | TVE_EN, 0);
+ TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
}
return &tve->encoder;
}
-static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_tve *tve = enc_to_tve(encoder);
- int ret;
-
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE);
- if (ret < 0)
- dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
-}
-
-static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_tve *tve = enc_to_tve(encoder);
-
- tve_disable(tve);
-
- switch (tve->mode) {
- case TVE_MODE_VGA:
- imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
- tve->hsync_pin, tve->vsync_pin,
- DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE);
- break;
- case TVE_MODE_TVOUT:
- imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
- break;
- }
-}
-
static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
ret);
}
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
+
if (tve->mode == TVE_MODE_VGA)
ret = tve_setup_vga(tve);
else
dev_err(tve->dev, "failed to set configuration: %d\n", ret);
}
-static void imx_tve_encoder_commit(struct drm_encoder *encoder)
+static void imx_tve_encoder_enable(struct drm_encoder *encoder)
{
struct imx_tve *tve = enc_to_tve(encoder);
tve_disable(tve);
}
+static int imx_tve_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_tve *tve = enc_to_tve(encoder);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
+ imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
+ imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
+
+ return 0;
+}
+
static const struct drm_connector_funcs imx_tve_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_tve_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
};
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
- .dpms = imx_tve_encoder_dpms,
- .prepare = imx_tve_encoder_prepare,
.mode_set = imx_tve_encoder_mode_set,
- .commit = imx_tve_encoder_commit,
+ .enable = imx_tve_encoder_enable,
.disable = imx_tve_encoder_disable,
+ .atomic_check = imx_tve_atomic_check,
};
static irqreturn_t imx_tve_irq_handler(int irq, void *data)
encoder_type = tve->mode == TVE_MODE_VGA ?
DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
- ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
- tve->dev->of_node);
+ ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
if (ret)
return ret;
if (tve->mode == TVE_MODE_VGA) {
ret = of_property_read_u32(np, "fsl,hsync-pin",
- &tve->hsync_pin);
+ &tve->di_hsync_pin);
if (ret < 0) {
- dev_err(dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get hsync pin\n");
return ret;
}
- ret |= of_property_read_u32(np, "fsl,vsync-pin",
- &tve->vsync_pin);
+ ret = of_property_read_u32(np, "fsl,vsync-pin",
+ &tve->di_vsync_pin);
if (ret < 0) {
dev_err(dev, "failed to get vsync pin\n");
tve->dac_reg = devm_regulator_get(dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
- regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+ ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+ if (ret)
+ return ret;
ret = regulator_enable(tve->dac_reg);
if (ret)
return ret;
#include <linux/device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include <linux/clk.h>
#include <linux/errno.h>
-#include <linux/reservation.h>
-#include <linux/dma-buf.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#define DRIVER_DESC "i.MX IPUv3 Graphics"
-enum ipu_flip_status {
- IPU_FLIP_NONE,
- IPU_FLIP_PENDING,
- IPU_FLIP_SUBMITTED,
-};
-
-struct ipu_flip_work {
- struct work_struct unref_work;
- struct drm_gem_object *bo;
- struct drm_pending_vblank_event *page_flip_event;
- struct work_struct fence_work;
- struct ipu_crtc *crtc;
- struct fence *excl;
- unsigned shared_count;
- struct fence **shared;
-};
-
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
struct ipu_dc *dc;
struct ipu_di *di;
- int enabled;
- enum ipu_flip_status flip_state;
- struct workqueue_struct *flip_queue;
- struct ipu_flip_work *flip_work;
int irq;
- u32 bus_format;
- u32 bus_flags;
- int di_hsync_pin;
- int di_vsync_pin;
};
-#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
+static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct ipu_crtc, base);
+}
-static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_enable(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- if (ipu_crtc->enabled)
- return;
-
ipu_dc_enable(ipu);
- ipu_plane_enable(ipu_crtc->plane[0]);
- /* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
- drm_crtc_vblank_on(&ipu_crtc->base);
-
- ipu_crtc->enabled = 1;
}
-static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_disable(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- if (!ipu_crtc->enabled)
- return;
-
- /* Stop DC channel and DI before IDMAC */
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
- ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu);
- drm_crtc_vblank_off(&ipu_crtc->base);
- ipu_crtc->enabled = 0;
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
}
-static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void imx_drm_crtc_reset(struct drm_crtc *crtc)
{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct imx_crtc_state *state;
- dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- ipu_fb_enable(ipu_crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- ipu_fb_disable(ipu_crtc);
- break;
+ if (crtc->state) {
+ if (crtc->state->mode_blob)
+ drm_property_unreference_blob(crtc->state->mode_blob);
+
+ state = to_imx_crtc_state(crtc->state);
+ memset(state, 0, sizeof(*state));
+ } else {
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+ crtc->state = &state->base;
}
+
+ state->base.crtc = crtc;
}
-static void ipu_flip_unref_work_func(struct work_struct *__work)
+static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct ipu_flip_work *work =
- container_of(__work, struct ipu_flip_work, unref_work);
+ struct imx_crtc_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
- drm_gem_object_unreference_unlocked(work->bo);
- kfree(work);
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ WARN_ON(state->base.crtc != crtc);
+ state->base.crtc = crtc;
+
+ return &state->base;
}
-static void ipu_flip_fence_work_func(struct work_struct *__work)
+static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct ipu_flip_work *work =
- container_of(__work, struct ipu_flip_work, fence_work);
- int i;
-
- /* wait for all fences attached to the FB obj to signal */
- if (work->excl) {
- fence_wait(work->excl, false);
- fence_put(work->excl);
- }
- for (i = 0; i < work->shared_count; i++) {
- fence_wait(work->shared[i], false);
- fence_put(work->shared[i]);
- }
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_imx_crtc_state(state));
+}
+
+static const struct drm_crtc_funcs ipu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = imx_drm_crtc_reset,
+ .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
+ .atomic_destroy_state = imx_drm_crtc_destroy_state,
+};
- work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_crtc *ipu_crtc = dev_id;
+
+ imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+
+ return IRQ_HANDLED;
}
-static int ipu_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct ipu_flip_work *flip_work;
+ struct videomode vm;
int ret;
- if (ipu_crtc->flip_state != IPU_FLIP_NONE)
- return -EBUSY;
-
- ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
- if (ret) {
- dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
- list_del(&event->base.link);
-
- return ret;
- }
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
- flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
- if (!flip_work) {
- ret = -ENOMEM;
- goto put_vblank;
- }
- INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
- flip_work->page_flip_event = event;
+ ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+ if (ret)
+ return false;
- /* get BO backing the old framebuffer and take a reference */
- flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
- drm_gem_object_reference(flip_work->bo);
+ if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
+ return false;
- ipu_crtc->flip_work = flip_work;
- /*
- * If the object has a DMABUF attached, we need to wait on its fences
- * if there are any.
- */
- if (cma_obj->base.dma_buf) {
- INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
- flip_work->crtc = ipu_crtc;
+ drm_display_mode_from_videomode(&vm, adjusted_mode);
- ret = reservation_object_get_fences_rcu(
- cma_obj->base.dma_buf->resv, &flip_work->excl,
- &flip_work->shared_count, &flip_work->shared);
+ return true;
+}
- if (unlikely(ret)) {
- DRM_ERROR("failed to get fences for buffer\n");
- goto free_flip_work;
- }
+static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
- /* No need to queue the worker if the are no fences */
- if (!flip_work->excl && !flip_work->shared_count) {
- ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
- } else {
- ipu_crtc->flip_state = IPU_FLIP_PENDING;
- queue_work(ipu_crtc->flip_queue,
- &flip_work->fence_work);
- }
- } else {
- ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
- }
+ if (state->active && (primary_plane_mask & state->plane_mask) == 0)
+ return -EINVAL;
return 0;
-
-free_flip_work:
- drm_gem_object_unreference_unlocked(flip_work->bo);
- kfree(flip_work);
- ipu_crtc->flip_work = NULL;
-put_vblank:
- imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
- return ret;
}
-static const struct drm_crtc_funcs ipu_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .destroy = drm_crtc_cleanup,
- .page_flip = ipu_page_flip,
-};
+static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
-static int ipu_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct ipu_di_signal_cfg sig_cfg = {};
unsigned long encoder_types = 0;
- int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc)
encoder_types |= BIT(encoder->encoder_type);
+ }
dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
__func__, encoder_types);
else
sig_cfg.clkflags = 0;
- sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+ sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
/* Default to driving pixel data on negative clock edges */
- sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
DRM_BUS_FLAG_PIXDATA_POSEDGE);
- sig_cfg.bus_format = ipu_crtc->bus_format;
+ sig_cfg.bus_format = imx_crtc_state->bus_format;
sig_cfg.v_to_h_sync = 0;
- sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
- sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
+ sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
+ sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
drm_display_mode_to_videomode(mode, &sig_cfg.mode);
- ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
- mode->flags & DRM_MODE_FLAG_INTERLACE,
- ipu_crtc->bus_format, mode->hdisplay);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing display controller failed with %d\n",
- ret);
- return ret;
- }
-
- ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing panel failed with %d\n", ret);
- return ret;
- }
-
- return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
- crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x, y, mode->hdisplay, mode->vdisplay,
- mode->flags & DRM_MODE_FLAG_INTERLACE);
-}
-
-static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
-{
- unsigned long flags;
- struct drm_device *drm = ipu_crtc->base.dev;
- struct ipu_flip_work *work = ipu_crtc->flip_work;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- if (work->page_flip_event)
- drm_crtc_send_vblank_event(&ipu_crtc->base,
- work->page_flip_event);
- imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
-static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
-{
- struct ipu_crtc *ipu_crtc = dev_id;
-
- imx_drm_handle_vblank(ipu_crtc->imx_crtc);
-
- if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
- struct ipu_plane *plane = ipu_crtc->plane[0];
-
- ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
- plane->x, plane->y);
- ipu_crtc_handle_pageflip(ipu_crtc);
- queue_work(ipu_crtc->flip_queue,
- &ipu_crtc->flip_work->unref_work);
- ipu_crtc->flip_state = IPU_FLIP_NONE;
- }
-
- return IRQ_HANDLED;
-}
-
-static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct videomode vm;
- int ret;
-
- drm_display_mode_to_videomode(adjusted_mode, &vm);
-
- ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
- if (ret)
- return false;
-
- drm_display_mode_from_videomode(&vm, adjusted_mode);
-
- return true;
-}
-
-static void ipu_crtc_prepare(struct drm_crtc *crtc)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_fb_disable(ipu_crtc);
-}
-
-static void ipu_crtc_commit(struct drm_crtc *crtc)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_fb_enable(ipu_crtc);
+ ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+ mode->flags & DRM_MODE_FLAG_INTERLACE,
+ imx_crtc_state->bus_format, mode->hdisplay);
+ ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
}
static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
- .dpms = ipu_crtc_dpms,
.mode_fixup = ipu_crtc_mode_fixup,
- .mode_set = ipu_crtc_mode_set,
- .prepare = ipu_crtc_prepare,
- .commit = ipu_crtc_commit,
+ .mode_set_nofb = ipu_crtc_mode_set_nofb,
+ .atomic_check = ipu_crtc_atomic_check,
+ .atomic_begin = ipu_crtc_atomic_begin,
+ .disable = ipu_crtc_disable,
+ .enable = ipu_crtc_enable,
};
static int ipu_enable_vblank(struct drm_crtc *crtc)
disable_irq_nosync(ipu_crtc->irq);
}
-static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_crtc->bus_format = bus_format;
- ipu_crtc->bus_flags = bus_flags;
- ipu_crtc->di_hsync_pin = hsync_pin;
- ipu_crtc->di_vsync_pin = vsync_pin;
-
- return 0;
-}
-
static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
.enable_vblank = ipu_enable_vblank,
.disable_vblank = ipu_disable_vblank,
- .set_interface_pix_fmt = ipu_set_interface_pix_fmt,
.crtc_funcs = &ipu_crtc_funcs,
.crtc_helper_funcs = &ipu_helper_funcs,
};
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(ipu_crtc->plane[1]))
+ if (IS_ERR(ipu_crtc->plane[1])) {
ipu_crtc->plane[1] = NULL;
+ } else {
+ ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
+ if (ret) {
+ dev_err(ipu_crtc->dev, "getting plane 1 "
+ "resources failed with %d.\n", ret);
+ goto err_put_plane0_res;
+ }
+ }
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_plane_res;
+ goto err_put_plane1_res;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
- ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
-
return 0;
-err_put_plane_res:
+err_put_plane1_res:
+ if (ipu_crtc->plane[1])
+ ipu_plane_put_resources(ipu_crtc->plane[1]);
+err_put_plane0_res:
ipu_plane_put_resources(ipu_crtc->plane[0]);
err_remove_crtc:
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
- destroy_workqueue(ipu_crtc->flip_queue);
- ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
+ if (ipu_crtc->plane[1])
+ ipu_plane_put_resources(ipu_crtc->plane[1]);
+ ipu_plane_put_resources(ipu_crtc->plane[0]);
}
static const struct component_ops ipu_crtc_ops = {
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "video/imx-ipu-v3.h"
#include "ipuv3-plane.h"
-#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
+static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+{
+ return container_of(p, struct ipu_plane, base);
+}
static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_ARGB1555,
IPU_IRQ_EOF);
}
-static int calc_vref(struct drm_display_mode *mode)
+static inline unsigned long
+drm_plane_state_to_eba(struct drm_plane_state *state)
{
- unsigned long htotal, vtotal;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
- htotal = mode->htotal;
- vtotal = mode->vtotal;
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ BUG_ON(!cma_obj);
- if (!htotal || !vtotal)
- return 60;
-
- return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
+ return cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * (state->src_y >> 16) +
+ (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
}
-static inline int calc_bandwidth(int width, int height, unsigned int vref)
+static inline unsigned long
+drm_plane_state_to_ubo(struct drm_plane_state *state)
{
- return width * height * vref;
-}
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state);
-int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
- int x, int y)
-{
- struct drm_gem_cma_object *cma_obj[3];
- unsigned long eba, ubo, vbo;
- int active, i;
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
+ BUG_ON(!cma_obj);
- for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
- cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
- if (!cma_obj[i]) {
- DRM_DEBUG_KMS("plane %d entry is null.\n", i);
- return -EFAULT;
- }
- }
+ return cma_obj->paddr + fb->offsets[1] +
+ fb->pitches[1] * (state->src_y >> 16) / 2 +
+ (state->src_x >> 16) / 2 - eba;
+}
- eba = cma_obj[0]->paddr + fb->offsets[0] +
- fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+static inline unsigned long
+drm_plane_state_to_vbo(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state);
- if (eba & 0x7) {
- DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
- return -EINVAL;
- }
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
+ BUG_ON(!cma_obj);
- if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
- DRM_DEBUG_KMS("pitches out of range.\n");
- return -EINVAL;
- }
+ return cma_obj->paddr + fb->offsets[2] +
+ fb->pitches[2] * (state->src_y >> 16) / 2 +
+ (state->src_x >> 16) / 2 - eba;
+}
- if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
- DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
- return -EINVAL;
- }
+static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane *plane = &ipu_plane->base;
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
+ int active;
- ipu_plane->stride[0] = fb->pitches[0];
+ eba = drm_plane_state_to_eba(state);
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ if (old_state->fb)
+ break;
+
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = cma_obj[1]->paddr + fb->offsets[1] +
- fb->pitches[1] * y / 2 + x / 2 - eba;
- vbo = cma_obj[2]->paddr + fb->offsets[2] +
- fb->pitches[2] * y / 2 + x / 2 - eba;
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
- if ((ubo & 0x7) || (vbo & 0x7)) {
- DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
- return -EINVAL;
- }
-
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
- DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
- return -EINVAL;
- }
-
- if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
- (ipu_plane->v_offset != vbo))) {
- DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
- return -EINVAL;
- }
-
- if (fb->pitches[1] != fb->pitches[2]) {
- DRM_DEBUG_KMS("U/V pitches must be identical.\n");
- return -EINVAL;
- }
-
- if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
- DRM_DEBUG_KMS("U/V pitches out of range.\n");
- return -EINVAL;
- }
-
- if (ipu_plane->enabled &&
- (ipu_plane->stride[1] != fb->pitches[1])) {
- DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
- return -EINVAL;
- }
-
- ipu_plane->u_offset = ubo;
- ipu_plane->v_offset = vbo;
- ipu_plane->stride[1] = fb->pitches[1];
+ if (fb->pixel_format == DRM_FORMAT_YUV420)
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+ else
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], vbo, ubo);
dev_dbg(ipu_plane->base.dev->dev,
- "phys = %pad %pad %pad, x = %d, y = %d",
- &cma_obj[0]->paddr, &cma_obj[1]->paddr,
- &cma_obj[2]->paddr, x, y);
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
break;
default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
- &cma_obj[0]->paddr, x, y);
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+
break;
}
- if (ipu_plane->enabled) {
+ if (old_state->fb) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
}
-
- /* cache offsets for subsequent pageflips */
- ipu_plane->x = x;
- ipu_plane->y = y;
-
- return 0;
-}
-
-int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h, bool interlaced)
-{
- struct device *dev = ipu_plane->base.dev->dev;
- int ret;
-
- /* no scaling */
- if (src_w != crtc_w || src_h != crtc_h)
- return -EINVAL;
-
- /* clip to crtc bounds */
- if (crtc_x < 0) {
- if (-crtc_x > crtc_w)
- return -EINVAL;
- src_x += -crtc_x;
- src_w -= -crtc_x;
- crtc_w -= -crtc_x;
- crtc_x = 0;
- }
- if (crtc_y < 0) {
- if (-crtc_y > crtc_h)
- return -EINVAL;
- src_y += -crtc_y;
- src_h -= -crtc_y;
- crtc_h -= -crtc_y;
- crtc_y = 0;
- }
- if (crtc_x + crtc_w > mode->hdisplay) {
- if (crtc_x > mode->hdisplay)
- return -EINVAL;
- crtc_w = mode->hdisplay - crtc_x;
- src_w = crtc_w;
- }
- if (crtc_y + crtc_h > mode->vdisplay) {
- if (crtc_y > mode->vdisplay)
- return -EINVAL;
- crtc_h = mode->vdisplay - crtc_y;
- src_h = crtc_h;
- }
- /* full plane minimum width is 13 pixels */
- if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
- return -EINVAL;
- if (crtc_h < 2)
- return -EINVAL;
-
- /*
- * since we cannot touch active IDMAC channels, we do not support
- * resizing the enabled plane or changing its format
- */
- if (ipu_plane->enabled) {
- if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
- fb->pixel_format != ipu_plane->base.fb->pixel_format)
- return -EINVAL;
-
- return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
- }
-
- switch (ipu_plane->dp_flow) {
- case IPU_DP_FLOW_SYNC_BG:
- ret = ipu_dp_setup_channel(ipu_plane->dp,
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
- if (ret) {
- dev_err(dev,
- "initializing display processor failed with %d\n",
- ret);
- return ret;
- }
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
- break;
- case IPU_DP_FLOW_SYNC_FG:
- ipu_dp_setup_channel(ipu_plane->dp,
- ipu_drm_fourcc_to_colorspace(fb->pixel_format),
- IPUV3_COLORSPACE_UNKNOWN);
- ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
- /* Enable local alpha on partial plane */
- switch (fb->pixel_format) {
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
- break;
- default:
- break;
- }
- }
-
- ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
- calc_bandwidth(crtc_w, crtc_h,
- calc_vref(mode)), 64);
- if (ret) {
- dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
- return ret;
- }
-
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
-
- ipu_cpmem_zero(ipu_plane->ipu_ch);
- ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
- ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
- if (ret < 0) {
- dev_err(dev, "unsupported pixel format 0x%08x\n",
- fb->pixel_format);
- return ret;
- }
- ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
- ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
- ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
- if (ret < 0)
- return ret;
- if (interlaced)
- ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420) {
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- ipu_plane->stride[1],
- ipu_plane->u_offset,
- ipu_plane->v_offset);
- } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- ipu_plane->stride[1],
- ipu_plane->v_offset,
- ipu_plane->u_offset);
- }
-
- ipu_plane->w = src_w;
- ipu_plane->h = src_h;
-
- return 0;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
return ret;
}
-void ipu_plane_enable(struct ipu_plane *ipu_plane)
+static void ipu_plane_enable(struct ipu_plane *ipu_plane)
{
if (ipu_plane->dp)
ipu_dp_enable(ipu_plane->ipu);
ipu_idmac_enable_channel(ipu_plane->ipu_ch);
if (ipu_plane->dp)
ipu_dp_enable_channel(ipu_plane->dp);
-
- ipu_plane->enabled = true;
}
-void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static void ipu_plane_disable(struct ipu_plane *ipu_plane)
{
- ipu_plane->enabled = false;
-
ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
if (ipu_plane->dp)
ipu_dp_disable(ipu_plane->ipu);
}
-/*
- * drm_plane API
- */
-
-static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static int ipu_disable_plane(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
- int ret = 0;
-
- DRM_DEBUG_KMS("plane - %p\n", plane);
-
- if (!ipu_plane->enabled)
- ret = ipu_plane_get_resources(ipu_plane);
- if (ret < 0)
- return ret;
-
- ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
- false);
- if (ret < 0) {
- ipu_plane_put_resources(ipu_plane);
- return ret;
- }
- if (crtc != plane->crtc)
- dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
- plane->crtc, crtc);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!ipu_plane->enabled)
- ipu_plane_enable(ipu_plane);
+ ipu_plane_disable(ipu_plane);
return 0;
}
-static int ipu_disable_plane(struct drm_plane *plane)
+static void ipu_plane_destroy(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (ipu_plane->enabled)
- ipu_plane_disable(ipu_plane);
+ ipu_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(ipu_plane);
+}
- ipu_plane_put_resources(ipu_plane);
+static const struct drm_plane_funcs ipu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = ipu_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ipu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_plane_state *old_state = plane->state;
+ struct drm_crtc_state *crtc_state;
+ struct device *dev = plane->dev->dev;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+ unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+
+ /* Ok to disable */
+ if (!fb)
+ return 0;
+
+ if (!state->crtc)
+ return -EINVAL;
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* CRTC should be enabled */
+ if (!crtc_state->enable)
+ return -EINVAL;
+
+ /* no scaling */
+ if (state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h)
+ return -EINVAL;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ /* full plane doesn't support partial off screen */
+ if (state->crtc_x || state->crtc_y ||
+ state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
+ state->crtc_h != crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ /* full plane minimum width is 13 pixels */
+ if (state->crtc_w < 13)
+ return -EINVAL;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w >
+ crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+ if (state->crtc_y + state->crtc_h >
+ crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+ break;
+ default:
+ dev_warn(dev, "Unsupported plane type\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_h < 2)
+ return -EINVAL;
+
+ /*
+ * since we cannot touch active IDMAC channels, we do not support
+ * resizing the enabled plane or changing its format
+ */
+ if (old_fb && (state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ fb->pixel_format != old_fb->pixel_format))
+ return -EINVAL;
+
+ eba = drm_plane_state_to_eba(state);
+
+ if (eba & 0x7)
+ return -EINVAL;
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
+ return -EINVAL;
+
+ if (old_fb && fb->pitches[0] != old_fb->pitches[0])
+ return -EINVAL;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+
+ if ((ubo & 0x7) || (vbo & 0x7))
+ return -EINVAL;
+
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+ return -EINVAL;
+
+ if (old_fb) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ old_vbo = drm_plane_state_to_vbo(old_state);
+ if (ubo != old_ubo || vbo != old_vbo)
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2])
+ return -EINVAL;
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+ return -EINVAL;
+
+ if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+ return -EINVAL;
+ }
return 0;
}
-static void ipu_plane_destroy(struct drm_plane *plane)
+static void ipu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ ipu_disable_plane(plane);
+}
+
+static void ipu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ enum ipu_color_space ics;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ if (old_state->fb) {
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ return;
+ }
- ipu_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(ipu_plane);
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ ipu_dp_setup_channel(ipu_plane->dp,
+ IPUV3_COLORSPACE_RGB,
+ IPUV3_COLORSPACE_RGB);
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
+ ipu_dp_setup_channel(ipu_plane->dp, ics,
+ IPUV3_COLORSPACE_UNKNOWN);
+ ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
+ state->crtc_y);
+ /* Enable local alpha on partial plane */
+ switch (state->fb->pixel_format) {
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
+
+ ipu_cpmem_zero(ipu_plane->ipu_ch);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
+ state->src_h >> 16);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
+ ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ ipu_plane_enable(ipu_plane);
}
-static const struct drm_plane_funcs ipu_plane_funcs = {
- .update_plane = ipu_update_plane,
- .disable_plane = ipu_disable_plane,
- .destroy = ipu_plane_destroy,
+static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
return ERR_PTR(ret);
}
+ drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+
return ipu_plane;
}
int dma;
int dp_flow;
-
- int x;
- int y;
- int w;
- int h;
-
- unsigned int u_offset;
- unsigned int v_offset;
- unsigned int stride[2];
-
- bool enabled;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
uint32_t src_h, bool interlaced);
-void ipu_plane_enable(struct ipu_plane *plane);
-void ipu_plane_disable(struct ipu_plane *plane);
-int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
- int x, int y);
-
int ipu_plane_get_resources(struct ipu_plane *plane);
void ipu_plane_put_resources(struct ipu_plane *plane);
#include <linux/component.h>
#include <linux/module.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_panel.h>
#include "imx-drm.h"
-#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector)
-#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder)
-
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
u32 bus_format;
struct drm_display_mode mode;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
+static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
+{
+ return container_of(c, struct imx_parallel_display, connector);
+}
+
+static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_parallel_display, encoder);
+}
+
static enum drm_connector_status imx_pd_connector_detect(
struct drm_connector *connector, bool force)
{
if (imxpd->panel && imxpd->panel->funcs &&
imxpd->panel->funcs->get_modes) {
- struct drm_display_info *di = &connector->display_info;
-
num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
- if (!imxpd->bus_format && di->num_bus_formats)
- imxpd->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ int ret;
if (!mode)
return -EINVAL;
- of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
+
+ ret = of_get_drm_display_mode(np, &imxpd->mode,
+ OF_USE_NATIVE_MODE);
+ if (ret)
+ return ret;
+
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
return &imxpd->encoder;
}
-static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
- if (mode != DRM_MODE_DPMS_ON)
- drm_panel_disable(imxpd->panel);
- else
- drm_panel_enable(imxpd->panel);
-}
-
-static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
- imxpd->connector.display_info.bus_flags);
-}
-
-static void imx_pd_encoder_commit(struct drm_encoder *encoder)
+static void imx_pd_encoder_enable(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode)
-{
-}
-
static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
drm_panel_unprepare(imxpd->panel);
}
+static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ imx_crtc_state->bus_flags = di->bus_flags;
+ if (!imxpd->bus_format && di->num_bus_formats)
+ imx_crtc_state->bus_format = di->bus_formats[0];
+ else
+ imx_crtc_state->bus_format = imxpd->bus_format;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
static const struct drm_connector_funcs imx_pd_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_pd_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
};
static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .dpms = imx_pd_encoder_dpms,
- .prepare = imx_pd_encoder_prepare,
- .commit = imx_pd_encoder_commit,
- .mode_set = imx_pd_encoder_mode_set,
+ .enable = imx_pd_encoder_enable,
.disable = imx_pd_encoder_disable,
+ .atomic_check = imx_pd_encoder_atomic_check,
};
static int imx_pd_register(struct drm_device *drm,
struct imx_parallel_display *imxpd)
{
+ struct drm_encoder *encoder = &imxpd->encoder;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
- imxpd->dev->of_node);
+ ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
- drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+ drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- drm_connector_helper_add(&imxpd->connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ if (!imxpd->bridge) {
+ drm_connector_helper_add(&imxpd->connector,
+ &imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, &imxpd->connector,
+ &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ }
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
+ if (imxpd->bridge) {
+ imxpd->bridge->encoder = encoder;
+ encoder->bridge = imxpd->bridge;
+ ret = drm_bridge_attach(drm, imxpd->bridge);
+ if (ret < 0) {
+ dev_err(imxpd->dev, "failed to attach bridge: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+ }
return 0;
}
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
+ u32 bus_format = 0;
const char *fmt;
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
if (!strcmp(fmt, "rgb24"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
else if (!strcmp(fmt, "rgb565"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ bus_format = MEDIA_BUS_FMT_RGB565_1X16;
else if (!strcmp(fmt, "bgr666"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ bus_format = MEDIA_BUS_FMT_RGB666_1X18;
else if (!strcmp(fmt, "lvds666"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+ bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
+ imxpd->bus_format = bus_format;
/* port@1 is the output port */
ep = of_graph_get_endpoint_by_regs(np, 1, -1);
struct device_node *remote;
remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_warn(dev, "endpoint %s not connected\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -ENODEV;
+ }
of_node_put(ep);
- if (remote) {
- imxpd->panel = of_drm_find_panel(remote);
- of_node_put(remote);
+
+ imxpd->panel = of_drm_find_panel(remote);
+ if (imxpd->panel) {
+ dev_dbg(dev, "found panel %s\n", remote->full_name);
+ } else {
+ imxpd->bridge = of_drm_find_bridge(remote);
+ if (imxpd->bridge)
+ dev_dbg(dev, "found bridge %s\n",
+ remote->full_name);
}
- if (!imxpd->panel)
+ if (!imxpd->panel && !imxpd->bridge) {
+ dev_dbg(dev, "waiting for panel or bridge %s\n",
+ remote->full_name);
+ of_node_put(remote);
return -EPROBE_DEFER;
+ }
+ of_node_put(remote);
}
imxpd->dev = dev;
* HDMI audio codec callbacks
*/
-static int mtk_hdmi_audio_hw_params(struct device *dev,
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
return 0;
}
-static int mtk_hdmi_audio_startup(struct device *dev)
+static int mtk_hdmi_audio_startup(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
return 0;
}
-static void mtk_hdmi_audio_shutdown(struct device *dev)
+static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_audio_disable(hdmi);
}
-int mtk_hdmi_audio_digital_mute(struct device *dev, bool enable)
+int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
return 0;
}
-static int mtk_hdmi_audio_get_eld(struct device *dev, uint8_t *buf, size_t len)
+static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy)) {
+ if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
return ret;
}
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for the MGA G200 server chips, it
{
}
-static int mgag200_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
.evict_flags = mgag200_bo_evict_flags,
- .move = mgag200_bo_move,
+ .move = NULL,
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
select SHMEM
select TMPFS
select QCOM_SCM
+ select SND_SOC_HDMI_CODEC if SND_SOC
default y
help
DRM/KMS driver for MSM/snapdragon.
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
+ msm_gem_shrinker.o \
msm_gem_submit.o \
msm_gpu.o \
msm_iommu.o \
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb;
- unsigned i, ibs = 0;
+ unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, submit->cmd[i].iova);
OUT_RING(ring, submit->cmd[i].size);
- ibs++;
+ OUT_PKT2(ring);
break;
}
}
- /* on a320, at least, we seem to need to pad things out to an
- * even number of qwords to avoid issue w/ CP hanging on wrap-
- * around:
- */
- if (ibs % 2)
- OUT_PKT2(ring);
-
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence->seqno);
return ret;
}
- adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
+ adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
+ if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
if (gpu->memptrs_bo) {
+ if (gpu->memptrs)
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
struct platform_device *phy_pdev;
struct device_node *phy_node;
- phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0);
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
dev_err(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
},
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+ .io_start = { 0x4700000, 0x5800000 },
+ .num_dsi = 2,
};
static const char * const dsi_6g_bus_clk_names[] = {
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd922800, 0xfd922b00 },
+ .num_dsi = 2,
};
static const char * const dsi_8916_bus_clk_names[] = {
},
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
+ .io_start = { 0x1a98000 },
+ .num_dsi = 1,
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd998000, 0xfd9a0000 },
+ .num_dsi = 2,
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
struct dsi_reg_config reg_cfg;
const char * const *bus_clk_names;
const int num_bus_clks;
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi;
};
struct msm_dsi_cfg_handler {
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_vaddr(msm_host->tx_gem_obj);
+ data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+
return len;
}
u32 lane_map[4];
int ret, i, len, num_lanes;
- prop = of_find_property(ep, "qcom,data-lane-map", &len);
+ prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
dev_dbg(dev, "failed to find data lane mapping\n");
return -EINVAL;
msm_host->num_data_lanes = num_lanes;
- ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map,
+ ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
dev_err(dev, "failed to read lane data\n");
const int *swap = supported_data_lane_swaps[i];
int j;
+ /*
+ * the data-lanes array we get from DT has a logical->physical
+ * mapping. The "data lane swap" register field represents
+ * supported configurations in a physical->logical mapping.
+ * Translate the DT mapping to what we understand and find a
+ * configuration that works.
+ */
for (j = 0; j < num_lanes; j++) {
- if (swap[j] != lane_map[j])
+ if (lane_map[j] < 0 || lane_map[j] > 3)
+ dev_err(dev, "bad physical lane entry %u\n",
+ lane_map[j]);
+
+ if (swap[lane_map[j]] != j)
break;
}
struct device_node *endpoint, *device_node;
int ret;
- ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
- if (ret) {
- dev_err(dev, "%s: host index not specified, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
/*
- * Get the first endpoint node. In our case, dsi has one output port
- * to which the panel is connected. Don't return an error if a port
- * isn't defined. It's possible that there is nothing connected to
- * the dsi output.
+ * Get the endpoint of the output port of the DSI host. In our case,
+ * this is mapped to port number with reg = 1. Don't return an error if
+ * the remote endpoint isn't defined. It's possible that there is
+ * nothing connected to the dsi output.
*/
- endpoint = of_graph_get_next_endpoint(np, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
dev_dbg(dev, "%s: no endpoint\n", __func__);
return 0;
return ret;
}
+static int dsi_host_get_id(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
goto fail;
}
+ msm_host->id = dsi_host_get_id(msm_host);
+ if (msm_host->id < 0) {
+ ret = msm_host->id;
+ pr_err("%s: unable to identify DSI host index\n", __func__);
+ goto fail;
+ }
+
/* fixup base address by io offset */
msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
}
msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
- if (IS_ERR(msm_host->mode)) {
+ if (!msm_host->mode) {
pr_err("%s: cannot duplicate mode\n", __func__);
- return PTR_ERR(msm_host->mode);
+ return -ENOMEM;
}
return 0;
{}
};
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi_phy; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
phy->cfg = match->data;
phy->pdev = pdev;
- ret = of_property_read_u32(dev->of_node,
- "qcom,dsi-phy-index", &phy->id);
- if (ret) {
- dev_err(dev, "%s: PHY index not specified, %d\n",
+ phy->id = dsi_phy_get_id(phy);
+ if (phy->id < 0) {
+ ret = phy->id;
+ dev_err(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
* Fill default H/W values in illegal cells, eg. cell {0, 1}.
*/
bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi_phy;
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
- }
+ },
+ .io_start = { 0xfd998300, 0xfd9a0300 },
+ .num_dsi_phy = 2,
};
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0xfd922b00, 0xfd923100 },
+ .num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0x1a98500 },
+ .num_dsi_phy = 1,
};
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0x4700300, 0x5800300 },
+ .num_dsi_phy = 2,
};
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
return gpio;
}
+/*
+ * HDMI audio codec callbacks
+ */
+static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ unsigned int chan;
+ unsigned int channel_allocation = 0;
+ unsigned int rate;
+ unsigned int level_shift = 0; /* 0dB */
+ bool down_mix = false;
+
+ dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ params->sample_width, params->cea.channels);
+
+ switch (params->cea.channels) {
+ case 2:
+ /* FR and FL speakers */
+ channel_allocation = 0;
+ chan = MSM_HDMI_AUDIO_CHANNEL_2;
+ break;
+ case 4:
+ /* FC, LFE, FR and FL speakers */
+ channel_allocation = 0x3;
+ chan = MSM_HDMI_AUDIO_CHANNEL_4;
+ break;
+ case 6:
+ /* RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x0B;
+ chan = MSM_HDMI_AUDIO_CHANNEL_6;
+ break;
+ case 8:
+ /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x1F;
+ chan = MSM_HDMI_AUDIO_CHANNEL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ dev_err(dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ msm_hdmi_audio_set_sample_rate(hdmi, rate);
+ msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
+ level_shift, down_mix);
+
+ return 0;
+}
+
+static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+
+ msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
+}
+
+static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
+ .hw_params = msm_hdmi_audio_hw_params,
+ .audio_shutdown = msm_hdmi_audio_shutdown,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &msm_hdmi_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
+{
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
+}
+
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
- int i;
+ int i, err;
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
return PTR_ERR(hdmi);
priv->hdmi = hdmi;
+ err = msm_hdmi_register_audio_driver(hdmi, dev);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec %d\n", err);
+ hdmi->audio_pdev = NULL;
+ }
+
return 0;
}
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
+ if (priv->hdmi->audio_pdev)
+ platform_device_unregister(priv->hdmi->audio_pdev);
+
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
+ struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
/*
* audio:
*/
+/* Supported HDMI Audio channels and rates */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+#define HDMI_SAMPLE_RATE_32KHZ 0
+#define HDMI_SAMPLE_RATE_44_1KHZ 1
+#define HDMI_SAMPLE_RATE_48KHZ 2
+#define HDMI_SAMPLE_RATE_88_2KHZ 3
+#define HDMI_SAMPLE_RATE_96KHZ 4
+#define HDMI_SAMPLE_RATE_176_4KHZ 5
+#define HDMI_SAMPLE_RATE_192KHZ 6
int msm_hdmi_audio_update(struct hdmi *hdmi);
int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
{
- if (hdmi && hdmi->hdcp_ctrl) {
+ if (hdmi) {
kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL;
}
struct mdp4_dtv_encoder {
struct drm_encoder base;
- struct clk *src_clk;
struct clk *hdmi_clk;
struct clk *mdp_clk;
unsigned long int pixclock;
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
- clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
bs_set(mdp4_dtv_encoder, 1);
- DBG("setting src_clk=%lu", pc);
+ DBG("setting mdp_clk=%lu", pc);
- ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
- clk_prepare_enable(mdp4_dtv_encoder->src_clk);
- ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
- if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ pc, ret);
+
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
mdp4_dtv_encoder->enabled = true;
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+ return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
}
/* initialize encoder */
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
- mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
- if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
- dev_err(dev->dev, "failed to get src_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
- goto fail;
- }
-
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
dev_err(dev->dev, "failed to get hdmi_clk\n");
goto fail;
}
- mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get mdp_clk\n");
+ dev_err(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct device *dev = mdp4_kms->dev->dev;
struct msm_mmu *mmu = mdp4_kms->mmu;
if (mmu) {
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
- if (mdp4_kms->blank_cursor_bo)
- drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+ if (mdp4_kms->rpm_enabled)
+ pm_runtime_disable(dev);
+
kfree(mdp4_kms);
}
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- int ret;
+ int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
goto fail;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ kms->irq = irq;
+
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
goto fail;
}
- mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
dev_err(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ pm_runtime_enable(dev->dev);
+ mdp4_kms->rpm_enabled = true;
+
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
struct mdp_irq error_handler;
+ bool rpm_enabled;
+
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
uint32_t blank_cursor_iova;
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-static inline uint32_t __offset_MDP(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->mdp.base[0]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-
-static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
-#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
-static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
+#define REG_MDP5_HW_VERSION 0x00000000
+#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
+ return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
}
-#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
-#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
+#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+ return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
}
-#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
+ return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
}
-static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
+#define REG_MDP5_DISP_INTF_SEL 0x00000004
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
}
-static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_EN 0x00000010
-static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_STATUS 0x00000014
-static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_CLEAR 0x00000018
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_EN 0x0000001c
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_STATUS 0x00000020
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
-static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
-#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
+#define REG_MDP5_SPARE_0 0x00000028
+#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
default: return INVALID_IDX(idx);
}
}
-static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
-static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
{
- return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
}
-#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); }
+#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
static inline uint32_t __offset_CTL(uint32_t idx)
{
.name = "msm8x74v1",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
+ .base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
},
.pipe_rgb = {
.count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
+ .base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
},
.pipe_dma = {
.count = 2,
- .base = { 0x02a00, 0x02e00 },
+ .base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
0,
},
.lm = {
.count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.nb_stages = 5,
},
.dspp = {
.count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x04500, 0x04900, 0x04d00 },
},
.pp = {
.count = 3,
- .base = { 0x21b00, 0x21c00, 0x21d00 },
+ .base = { 0x21a00, 0x21b00, 0x21c00 },
},
.intf = {
- .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+ .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
.name = "msm8x74",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
+ .base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
+ .base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x02a00, 0x02e00 },
+ .base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x04500, 0x04900, 0x04d00 },
},
.ad = {
.count = 2,
- .base = { 0x13100, 0x13300 },
+ .base = { 0x13000, 0x13200 },
},
.pp = {
.count = 3,
- .base = { 0x12d00, 0x12e00, 0x12f00 },
+ .base = { 0x12c00, 0x12d00, 0x12e00 },
},
.intf = {
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
.name = "apq8084",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
- .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x03200, 0x03600 },
+ .base = { 0x03100, 0x03500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
- .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
- .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+ .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
},
.ad = {
.count = 3,
- .base = { 0x13500, 0x13700, 0x13900 },
+ .base = { 0x13400, 0x13600, 0x13800 },
},
.pp = {
.count = 4,
- .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+ .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
},
.intf = {
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
.name = "msm8x16",
.mdp = {
.count = 1,
- .base = { 0x01000 },
+ .base = { 0x0 },
.caps = MDP_CAP_SMP |
0,
},
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0x4003ffff,
},
.pipe_vig = {
.count = 1,
- .base = { 0x05000 },
+ .base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
- .base = { 0x15000, 0x17000 },
+ .base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
- .base = { 0x25000 },
+ .base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2, /* LM0 and LM3 */
- .base = { 0x45000, 0x48000 },
+ .base = { 0x44000, 0x47000 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
- .base = { 0x55000 },
+ .base = { 0x54000 },
},
.intf = {
- .base = { 0x00000, 0x6b800 },
+ .base = { 0x00000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
.name = "msm8x94",
.mdp = {
.count = 1,
- .base = { 0x01000 },
.caps = MDP_CAP_SMP |
0,
},
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf0ffffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
- .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x25000, 0x27000 },
+ .base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
- .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
- .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+ .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
},
.ad = {
.count = 3,
- .base = { 0x79000, 0x79800, 0x7a000 },
+ .base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
- .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.intf = {
- .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
.name = "msm8x96",
.mdp = {
.count = 1,
- .base = { 0x01000 },
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
0,
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf4ffffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
},
.pipe_rgb = {
.count = 4,
- .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
},
.pipe_dma = {
.count = 2,
- .base = { 0x25000, 0x27000 },
+ .base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
},
.lm = {
.count = 6,
- .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 2,
- .base = { 0x55000, 0x57000 },
+ .base = { 0x54000, 0x56000 },
},
.ad = {
.count = 3,
- .base = { 0x79000, 0x79800, 0x7a000 },
+ .base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
- .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.cdm = {
.count = 1,
- .base = { 0x7a200 },
+ .base = { 0x79200 },
},
.dsc = {
.count = 2,
- .base = { 0x81000, 0x81400 },
+ .base = { 0x80000, 0x80400 },
},
.intf = {
- .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
* start signal for the slave encoder
*/
if (intf_num == 1)
- data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
else if (intf_num == 2)
- data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
else
return -EINVAL;
/* Smart Panel, Sync mode */
- data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL;
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0),
- MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_disable(mdp5_kms);
return 0;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride;
- int ret, bpp, lm;
- unsigned int depth;
+ int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
return -EINVAL;
lm = mdp5_crtc->lm;
- drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
- stride = width * (bpp >> 3);
+ stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
u32 intf_sel;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf->num) {
case 0:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
break;
case 1:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
break;
case 2:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
break;
case 3:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
break;
default:
BUG();
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
if (!enable) {
ctlx->pair = NULL;
ctly->pair = NULL;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
ctlx->pair = ctly;
ctly->pair = ctlx;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
- MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
+ MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
return 0;
}
* to use the master's enable signal for the slave encoder.
*/
if (intf_num == 1)
- data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
else if (intf_num == 2)
- data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
else
return -EINVAL;
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
/* Dumb Panel, Sync mode */
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/irqdomain.h>
#include <linux/irq.h>
#include "msm_drv.h"
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask)
{
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
- irqmask ^ (irqmask & old_irqmask));
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms);
}
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
+ mdp5_enable(mdp5_kms);
mdp_irq_register(mdp_kms, error_handler);
+ mdp5_disable(mdp5_kms);
return 0;
}
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms);
}
-static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+irqreturn_t mdp5_irq(struct msm_kms *kms)
{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
uint32_t status, enable;
- enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
- status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
+ enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
VERB("status=%08x", status);
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
-}
-
-irqreturn_t mdp5_irq(struct msm_kms *kms)
-{
- struct mdp_kms *mdp_kms = to_mdp_kms(kms);
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
- uint32_t intr;
-
- intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
-
- VERB("intr=%08x", intr);
-
- if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
- mdp5_irq_mdp(mdp_kms);
- intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
- }
-
- while (intr) {
- irq_hw_number_t hwirq = fls(intr) - 1;
- generic_handle_irq(irq_find_mapping(
- mdp5_kms->irqcontroller.domain, hwirq));
- intr &= ~(1 << hwirq);
- }
return IRQ_HANDLED;
}
mdp5_crtc_vblank(crtc), false);
mdp5_disable(mdp5_kms);
}
-
-/*
- * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
- * can register to get their irq's delivered
- */
-
-#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
- MDSS_HW_INTR_STATUS_INTR_DSI1 | \
- MDSS_HW_INTR_STATUS_INTR_HDMI | \
- MDSS_HW_INTR_STATUS_INTR_EDP)
-
-static void mdp5_hw_mask_irq(struct irq_data *irqd)
-{
- struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void mdp5_hw_unmask_irq(struct irq_data *irqd)
-{
- struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip mdp5_hw_irq_chip = {
- .name = "mdp5",
- .irq_mask = mdp5_hw_mask_irq,
- .irq_unmask = mdp5_hw_unmask_irq,
-};
-
-static int mdp5_hw_irqdomain_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct mdp5_kms *mdp5_kms = d->host_data;
-
- if (!(VALID_IRQS & (1 << hwirq)))
- return -EPERM;
-
- irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdp5_kms);
-
- return 0;
-}
-
-static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
- .map = mdp5_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-
-int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
-{
- struct device *dev = mdp5_kms->dev->dev;
- struct irq_domain *d;
-
- d = irq_domain_add_linear(dev->of_node, 32,
- &mdp5_hw_irqdomain_ops, mdp5_kms);
- if (!d) {
- dev_err(dev, "mdp5 irq domain add failed\n");
- return -ENXIO;
- }
-
- mdp5_kms->irqcontroller.enabled_mask = 0;
- mdp5_kms->irqcontroller.domain = d;
-
- return 0;
-}
-
-void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
-{
- if (mdp5_kms->irqcontroller.domain) {
- irq_domain_remove(mdp5_kms->irqcontroller.domain);
- mdp5_kms->irqcontroller.domain = NULL;
- }
-}
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_irq.h>
#include "msm_drv.h"
#include "msm_mmu.h"
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_device *dev = mdp5_kms->dev;
+ struct platform_device *pdev = mdp5_kms->pdev;
unsigned long flags;
- pm_runtime_get_sync(dev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ mdp5_enable(mdp5_kms);
/* Magic unknown register writes:
*
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- pm_runtime_put_sync(dev->dev);
+ mdp5_disable(mdp5_kms);
+ pm_runtime_put_sync(&pdev->dev);
return 0;
}
return mdp5_encoder_set_split_display(encoder, slave_encoder);
}
-static void mdp5_destroy(struct msm_kms *kms)
+static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
- mdp5_irq_domain_fini(mdp5_kms);
-
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
-
- if (mdp5_kms->ctlm)
- mdp5_ctlm_destroy(mdp5_kms->ctlm);
- if (mdp5_kms->smp)
- mdp5_smp_destroy(mdp5_kms->smp);
- if (mdp5_kms->cfg)
- mdp5_cfg_destroy(mdp5_kms->cfg);
-
- kfree(mdp5_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
- .destroy = mdp5_destroy,
+ .destroy = mdp5_kms_destroy,
},
.set_irqmask = mdp5_set_irqmask,
};
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* register our interrupt-controller for hdmi/eDP/dsi/etc
- * to use for irqs routed through mdp:
- */
- ret = mdp5_irq_domain_init(mdp5_kms);
- if (ret)
- goto fail;
-
/* construct CRTCs and their private planes: */
for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
return ret;
}
-static void read_hw_revision(struct mdp5_kms *mdp5_kms,
- uint32_t *major, uint32_t *minor)
+static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
+ u32 *major, u32 *minor)
{
- uint32_t version;
+ u32 version;
mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
+ version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
mdp5_disable(mdp5_kms);
- *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
- *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
+ *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
- struct platform_device *pdev = dev->platformdev;
- struct mdp5_cfg *config;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev;
struct mdp5_kms *mdp5_kms;
- struct msm_kms *kms = NULL;
+ struct mdp5_cfg *config;
+ struct msm_kms *kms;
struct msm_mmu *mmu;
- uint32_t major, minor;
- int i, ret;
+ int irq, i, ret;
- mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
- if (!mdp5_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
- ret = -ENOMEM;
+ /* priv->kms would have been populated by the MDP5 driver */
+ kms = priv->kms;
+ if (!kms)
+ return NULL;
+
+ mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+ pdev = mdp5_kms->pdev;
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
- spin_lock_init(&mdp5_kms->resource_lock);
+ kms->irq = irq;
- mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
- kms = &mdp5_kms->base.base;
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp5_enable(mdp5_kms);
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+ !config->hw->intf.base[i])
+ continue;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
- mdp5_kms->dev = dev;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
+ }
+ mdp5_disable(mdp5_kms);
+ mdelay(16);
- /* mdp5_kms->mmio actually represents the MDSS base address */
- mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
- if (IS_ERR(mdp5_kms->mmio)) {
- ret = PTR_ERR(mdp5_kms->mmio);
+ if (config->platform.iommu) {
+ mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
+ iommu_domain_free(config->platform.iommu);
+ goto fail;
+ }
+
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ mmu = NULL;
+ }
+ mdp5_kms->mmu = mmu;
+
+ mdp5_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp5_kms->id < 0) {
+ ret = mdp5_kms->id;
+ dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
goto fail;
}
- mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdp5_kms->vbif)) {
- ret = PTR_ERR(mdp5_kms->vbif);
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(mdp5_kms->vdd)) {
- ret = PTR_ERR(mdp5_kms->vdd);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = config->hw->lm.max_width;
+ dev->mode_config.max_height = config->hw->lm.max_height;
+
+ dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
+ dev->driver->get_scanout_position = mdp5_get_scanoutpos;
+ dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
+ dev->max_vblank_count = 0xffffffff;
+ dev->vblank_disable_immediate = true;
+
+ return kms;
+fail:
+ if (kms)
+ mdp5_kms_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static void mdp5_destroy(struct platform_device *pdev)
+{
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
+ if (mdp5_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ u32 major, minor;
+ int ret;
+
+ mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ ret = -ENOMEM;
goto fail;
}
- ret = regulator_enable(mdp5_kms->vdd);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ platform_set_drvdata(pdev, mdp5_kms);
+
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->pdev = pdev;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
goto fail;
}
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
- if (ret)
- goto fail;
- ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
- clk_set_rate(mdp5_kms->src_clk, 200000000);
+ clk_set_rate(mdp5_kms->core_clk, 200000000);
+
+ pm_runtime_enable(&pdev->dev);
+ mdp5_kms->rpm_enabled = true;
- read_hw_revision(mdp5_kms, &major, &minor);
+ read_mdp_hw_revision(mdp5_kms, &major, &minor);
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
mdp5_kms->caps = config->hw->mdp.caps;
/* TODO: compute core clock rate at runtime */
- clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+ clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
/*
* Some chipsets have a Shared Memory Pool (SMP), while others
goto fail;
}
- /* make sure things are off before attaching iommu (bootloader could
- * have left things on, in which case we'll start getting faults if
- * we don't disable):
- */
- mdp5_enable(mdp5_kms);
- for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
- if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
- !config->hw->intf.base[i])
- continue;
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+ /* set uninit-ed kms */
+ priv->kms = &mdp5_kms->base.base;
- mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
- }
- mdp5_disable(mdp5_kms);
- mdelay(16);
+ return 0;
+fail:
+ mdp5_destroy(pdev);
+ return ret;
+}
- if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(dev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
- goto fail;
- }
+static int mdp5_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret) {
- dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
- mmu->funcs->destroy(mmu);
- goto fail;
- }
- } else {
- dev_info(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
+ DBG("");
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
+ return mdp5_init(pdev, ddev);
+}
- ret = modeset_init(mdp5_kms);
- if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
- goto fail;
- }
+static void mdp5_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- dev->mode_config.max_width = config->hw->lm.max_width;
- dev->mode_config.max_height = config->hw->lm.max_height;
+ mdp5_destroy(pdev);
+}
- dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
- dev->vblank_disable_immediate = true;
+static const struct component_ops mdp5_ops = {
+ .bind = mdp5_bind,
+ .unbind = mdp5_unbind,
+};
- return kms;
+static int mdp5_dev_probe(struct platform_device *pdev)
+{
+ DBG("");
+ return component_add(&pdev->dev, &mdp5_ops);
+}
-fail:
- if (kms)
- mdp5_destroy(kms);
- return ERR_PTR(ret);
+static int mdp5_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &mdp5_ops);
+ return 0;
+}
+
+static const struct of_device_id mdp5_dt_match[] = {
+ { .compatible = "qcom,mdp5", },
+ /* to support downstream DT files */
+ { .compatible = "qcom,mdss_mdp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp5_dt_match);
+
+static struct platform_driver mdp5_driver = {
+ .probe = mdp5_dev_probe,
+ .remove = mdp5_dev_remove,
+ .driver = {
+ .name = "msm_mdp",
+ .of_match_table = mdp5_dt_match,
+ },
+};
+
+void __init msm_mdp_register(void)
+{
+ DBG("");
+ platform_driver_register(&mdp5_driver);
+}
+
+void __exit msm_mdp_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&mdp5_driver);
}
struct drm_device *dev;
+ struct platform_device *pdev;
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
- void __iomem *mmio, *vbif;
-
- struct regulator *vdd;
+ void __iomem *mmio;
struct clk *axi_clk;
struct clk *ahb_clk;
- struct clk *src_clk;
struct clk *core_clk;
struct clk *lut_clk;
struct clk *vsync_clk;
/*
* lock to protect access to global resources: ie., following register:
- * - REG_MDP5_MDP_DISP_INTF_SEL
+ * - REG_MDP5_DISP_INTF_SEL
*/
spinlock_t resource_lock;
- struct mdp_irq error_handler;
+ bool rpm_enabled;
- struct {
- volatile unsigned long enabled_mask;
- struct irq_domain *domain;
- } irqcontroller;
+ struct mdp_irq error_handler;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
--- /dev/null
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+/*
+ * If needed, this can become more specific: something like struct mdp5_mdss,
+ * which contains a 'struct msm_mdss base' member.
+ */
+struct msm_mdss {
+ struct drm_device *dev;
+
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+
+ struct {
+ volatile unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irqcontroller;
+};
+
+static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+{
+ msm_writel(data, mdss->mmio + reg);
+}
+
+static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+{
+ return msm_readl(mdss->mmio + reg);
+}
+
+static irqreturn_t mdss_irq(int irq, void *arg)
+{
+ struct msm_mdss *mdss = arg;
+ u32 intr;
+
+ intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+
+ VERB("intr=%08x", intr);
+
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ mdss->irqcontroller.domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
+ MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+ MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+ MDSS_HW_INTR_STATUS_INTR_HDMI | \
+ MDSS_HW_INTR_STATUS_INTR_EDP)
+
+static void mdss_hw_mask_irq(struct irq_data *irqd)
+{
+ struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void mdss_hw_unmask_irq(struct irq_data *irqd)
+{
+ struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip mdss_hw_irq_chip = {
+ .name = "mdss",
+ .irq_mask = mdss_hw_mask_irq,
+ .irq_unmask = mdss_hw_unmask_irq,
+};
+
+static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct msm_mdss *mdss = d->host_data;
+
+ if (!(VALID_IRQS & (1 << hwirq)))
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, mdss);
+
+ return 0;
+}
+
+static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+ .map = mdss_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+
+static int mdss_irq_domain_init(struct msm_mdss *mdss)
+{
+ struct device *dev = mdss->dev->dev;
+ struct irq_domain *d;
+
+ d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
+ mdss);
+ if (!d) {
+ dev_err(dev, "mdss irq domain add failed\n");
+ return -ENXIO;
+ }
+
+ mdss->irqcontroller.enabled_mask = 0;
+ mdss->irqcontroller.domain = d;
+
+ return 0;
+}
+
+void msm_mdss_destroy(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
+
+ if (!mdss)
+ return;
+
+ irq_domain_remove(mdss->irqcontroller.domain);
+ mdss->irqcontroller.domain = NULL;
+
+ regulator_disable(mdss->vdd);
+
+ pm_runtime_put_sync(dev->dev);
+
+ pm_runtime_disable(dev->dev);
+}
+
+int msm_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_mdss *mdss;
+ int ret;
+
+ DBG("");
+
+ if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
+ return 0;
+
+ mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
+ if (!mdss) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdss->dev = dev;
+
+ mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdss->mmio)) {
+ ret = PTR_ERR(mdss->mmio);
+ goto fail;
+ }
+
+ mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdss->vbif)) {
+ ret = PTR_ERR(mdss->vbif);
+ goto fail;
+ }
+
+ /* Regulator to enable GDSCs in downstream kernels */
+ mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdss->vdd)) {
+ ret = PTR_ERR(mdss->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(mdss->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ mdss_irq, 0, "mdss_isr", mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ goto fail_irq;
+ }
+
+ ret = mdss_irq_domain_init(mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ goto fail_irq;
+ }
+
+ priv->mdss = mdss;
+
+ pm_runtime_enable(dev->dev);
+
+ /*
+ * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
+ * domain. Remove this once runtime PM is adapted for all the devices.
+ */
+ pm_runtime_get_sync(dev->dev);
+
+ return 0;
+fail_irq:
+ regulator_disable(mdss->vdd);
+fail:
+ return ret;
+}
*
* configured:
* The block is allocated to some client, and assigned to that
- * client in MDP5_MDP_SMP_ALLOC registers.
+ * client in MDP5_SMP_ALLOC registers.
*
* inuse:
* The block is being actively used by a client.
* mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
+ * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
* are configured for the union(pending, inuse)
* Current pending is copied to configured.
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
+ val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
switch (fld) {
case 0:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
}
}
#include "msm_gpu.h"
#include "msm_kms.h"
+
+/*
+ * MSM driver version:
+ * - 1.0.0 - initial interface
+ * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ */
+#define MSM_VERSION_MAJOR 1
+#define MSM_VERSION_MINOR 1
+#define MSM_VERSION_PATCHLEVEL 0
+
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
kfree(vbl_ev);
}
+ msm_gem_shrinker_cleanup(ddev);
+
drm_kms_helper_poll_fini(ddev);
drm_dev_unregister(ddev);
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms) {
- pm_runtime_disable(dev);
+ if (kms)
kms->funcs->destroy(kms);
- }
if (gpu) {
mutex_lock(&ddev->struct_mutex);
component_unbind_all(dev, ddev);
+ msm_mdss_destroy(ddev);
+
ddev->dev_private = NULL;
drm_dev_unref(ddev);
if (node) {
struct resource r;
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
size = r.end - r.start;
}
ddev->dev_private = priv;
+ priv->dev = ddev;
+
+ ret = msm_mdss_init(ddev);
+ if (ret) {
+ kfree(priv);
+ drm_dev_unref(ddev);
+ return ret;
+ }
priv->wq = alloc_ordered_workqueue("msm", 0);
priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret) {
+ msm_mdss_destroy(ddev);
kfree(priv);
drm_dev_unref(ddev);
return ret;
if (ret)
goto fail;
+ msm_gem_shrinker_init(ddev);
+
switch (get_mdp_ver(pdev)) {
case 4:
kms = mdp4_kms_init(ddev);
+ priv->kms = kms;
break;
case 5:
kms = mdp5_kms_init(ddev);
goto fail;
}
- priv->kms = kms;
-
if (kms) {
- pm_runtime_enable(dev);
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
goto fail;
}
- pm_runtime_get_sync(dev);
- ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
- pm_runtime_put_sync(dev);
- if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ if (kms) {
+ pm_runtime_get_sync(dev);
+ ret = drm_irq_install(ddev, kms->irq);
+ pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
}
ret = drm_dev_register(ddev, 0);
return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
}
+static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ switch (args->madv) {
+ case MSM_MADV_DONTNEED:
+ case MSM_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = msm_gem_madvise(obj, args->madv);
+ if (ret >= 0) {
+ args->retained = ret;
+ ret = 0;
+ }
+
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
.name = "msm",
.desc = "MSM Snapdragon DRM",
.date = "20130625",
- .major = 1,
- .minor = 0,
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
};
#ifdef CONFIG_PM_SLEEP
return dev->of_node == data;
}
-static int add_components(struct device *dev, struct component_match **matchptr,
- const char *name)
+/*
+ * Identify what components need to be added by parsing what remote-endpoints
+ * our MDP output ports are connected to. In the case of LVDS on MDP4, there
+ * is no external component that we need to add since LVDS is within MDP4
+ * itself.
+ */
+static int add_components_mdp(struct device *mdp_dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np = mdp_dev->of_node;
+ struct device_node *ep_node;
+ struct device *master_dev;
+
+ /*
+ * on MDP4 based platforms, the MDP platform device is the component
+ * master that adds other display interface components to itself.
+ *
+ * on MDP5 based platforms, the MDSS platform device is the component
+ * master that adds MDP5 and other display interface components to
+ * itself.
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4"))
+ master_dev = mdp_dev;
+ else
+ master_dev = mdp_dev->parent;
+
+ for_each_endpoint_of_node(np, ep_node) {
+ struct device_node *intf;
+ struct of_endpoint ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret) {
+ dev_err(mdp_dev, "unable to parse port endpoint\n");
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /*
+ * The LCDC/LVDS port on MDP4 is a speacial case where the
+ * remote-endpoint isn't a component that we need to add
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4") &&
+ ep.port == 0) {
+ of_node_put(ep_node);
+ continue;
+ }
+
+ /*
+ * It's okay if some of the ports don't have a remote endpoint
+ * specified. It just means that the port isn't connected to
+ * any external interface.
+ */
+ intf = of_graph_get_remote_port_parent(ep_node);
+ if (!intf) {
+ of_node_put(ep_node);
+ continue;
+ }
+
+ component_match_add(master_dev, matchptr, compare_of, intf);
+
+ of_node_put(intf);
+ of_node_put(ep_node);
+ }
+
+ return 0;
+}
+
+static int compare_name_mdp(struct device *dev, void *data)
{
- struct device_node *np = dev->of_node;
- unsigned i;
+ return (strstr(dev_name(dev), "mdp") != NULL);
+}
+
+static int add_display_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device *mdp_dev;
+ int ret;
+
+ /*
+ * MDP5 based devices don't have a flat hierarchy. There is a top level
+ * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
+ * children devices, find the MDP5 node, and then add the interfaces
+ * to our components list.
+ */
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate children devices\n");
+ return ret;
+ }
- for (i = 0; ; i++) {
- struct device_node *node;
+ mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+ if (!mdp_dev) {
+ dev_err(dev, "failed to find MDSS MDP node\n");
+ of_platform_depopulate(dev);
+ return -ENODEV;
+ }
- node = of_parse_phandle(np, name, i);
- if (!node)
- break;
+ put_device(mdp_dev);
- component_match_add(dev, matchptr, compare_of, node);
+ /* add the MDP component itself */
+ component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
+ } else {
+ /* MDP4 */
+ mdp_dev = dev;
}
+ ret = add_components_mdp(mdp_dev, matchptr);
+ if (ret)
+ of_platform_depopulate(dev);
+
+ return ret;
+}
+
+/*
+ * We don't know what's the best binding to link the gpu with the drm device.
+ * Fow now, we just hunt for all the possible gpus that we support, and add them
+ * as components.
+ */
+static const struct of_device_id msm_gpu_match[] = {
+ { .compatible = "qcom,adreno-3xx" },
+ { .compatible = "qcom,kgsl-3d0" },
+ { },
+};
+
+static int add_gpu_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, msm_gpu_match);
+ if (!np)
+ return 0;
+
+ component_match_add(dev, matchptr, compare_of, np);
+
+ of_node_put(np);
+
return 0;
}
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
+ int ret;
- add_components(&pdev->dev, &match, "connectors");
- add_components(&pdev->dev, &match, "gpus");
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+
+ ret = add_gpu_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
static int msm_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
+ of_platform_depopulate(&pdev->dev);
return 0;
}
-static const struct platform_device_id msm_id[] = {
- { "mdp", 0 },
- { }
-};
-
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
- { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
- /* to support downstream DT files */
- { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
+ { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
+ { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
.of_match_table = dt_match,
.pm = &msm_pm_ops,
},
- .id_table = msm_id,
};
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_mdp_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_mdp_unregister();
}
module_init(msm_drm_register);
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
+struct msm_mdss;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_drm_private {
+ struct drm_device *dev;
+
struct msm_kms *kms;
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
+ /* top level MDSS wrapper device (for MDP5 only) */
+ struct msm_mdss *mdss;
+
/* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5..
*/
struct drm_mm mm;
} vram;
+ struct notifier_block vmap_notifier;
+ struct shrinker shrinker;
+
struct msm_vblank_ctrl vblank_ctrl;
};
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+void msm_gem_shrinker_init(struct drm_device *dev);
+void msm_gem_shrinker_cleanup(struct drm_device *dev);
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
-void *msm_gem_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
+void msm_gem_put_vaddr(struct drm_gem_object *obj);
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
+void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
}
#endif
+void __init msm_mdp_register(void);
+void __exit msm_mdp_unregister(void);
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i];
- if (bo)
- drm_gem_object_unreference_unlocked(bo);
+
+ drm_gem_object_unreference_unlocked(bo);
}
kfree(msm_fb);
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
+ if (IS_ERR(fbi->screen_base)) {
+ ret = PTR_ERR(fbi->screen_base);
+ goto fail_unlock;
+ }
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
/* this will free the backing object */
if (fbdev->fb) {
+ msm_gem_put_vaddr(fbdev->bo);
drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
}
return offset;
}
+static void
+put_iova(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int id;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ struct msm_mmu *mmu = priv->mmus[id];
+ if (mmu && msm_obj->domain[id].iova) {
+ uint32_t offset = msm_obj->domain[id].iova;
+ mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+ msm_obj->domain[id].iova = 0;
+ }
+ }
+}
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
return ret;
}
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
return ERR_CAST(pages);
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (msm_obj->vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
}
+ msm_obj->vmap_count++;
return msm_obj->vaddr;
}
-void *msm_gem_vaddr(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
void *ret;
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_vaddr_locked(obj);
+ ret = msm_gem_get_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ WARN_ON(msm_obj->vmap_count < 1);
+ msm_obj->vmap_count--;
+}
+
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
+{
+ mutex_lock(&obj->dev->struct_mutex);
+ msm_gem_put_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+}
+
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ if (msm_obj->madv != __MSM_MADV_PURGED)
+ msm_obj->madv = madv;
+
+ return (msm_obj->madv != __MSM_MADV_PURGED);
+}
+
+void msm_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!is_purgeable(msm_obj));
+ WARN_ON(obj->import_attach);
+
+ put_iova(obj);
+
+ msm_gem_vunmap(obj);
+
+ put_pages(obj);
+
+ msm_obj->madv = __MSM_MADV_PURGED;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+ return;
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+}
+
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
reservation_object_add_excl_fence(msm_obj->resv, fence);
struct reservation_object_list *fobj;
struct fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
+ const char *madv;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
+ switch (msm_obj->madv) {
+ case __MSM_MADV_PURGED:
+ madv = " purged";
+ break;
+ case MSM_MADV_DONTNEED:
+ madv = " purgeable";
+ break;
+ case MSM_MADV_WILLNEED:
+ default:
+ madv = "";
+ break;
+ }
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size);
+ off, msm_obj->vaddr, obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
list_del(&msm_obj->mm_list);
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- }
- }
+ put_iova(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
- vunmap(msm_obj->vaddr);
+ msm_gem_vunmap(obj);
put_pages(obj);
}
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags;
+ msm_obj->madv = MSM_MADV_WILLNEED;
if (resv) {
msm_obj->resv = resv;
return obj;
fail:
- if (obj)
- drm_gem_object_unreference(obj);
-
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
-
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
uint32_t flags;
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ uint8_t madv;
+
+ /**
+ * count of active vmap'ing
+ */
+ uint8_t vmap_count;
+
/* And object is either:
* inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at
return msm_obj->gpu != NULL;
}
-#define MAX_CMDS 4
+static inline bool is_purgeable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
+ !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
+}
+
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
uint32_t size; /* in dwords */
uint32_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
- } cmd[MAX_CMDS];
+ } *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
{
- return msm_gem_vaddr(obj);
+ return msm_gem_get_vaddr(obj);
}
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- /* TODO msm_gem_vunmap() */
+ msm_gem_put_vaddr(obj);
}
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
--- /dev/null
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+ *unlock = false;
+ } else {
+ *unlock = true;
+ }
+
+ return true;
+}
+
+
+static unsigned long
+msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned long count = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_purgeable(msm_obj))
+ count += msm_obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned long freed = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (freed >= sc->nr_to_scan)
+ break;
+ if (is_purgeable(msm_obj)) {
+ msm_gem_purge(&msm_obj->base);
+ freed += msm_obj->base.size >> PAGE_SHIFT;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed > 0)
+ pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+ return freed;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct msm_drm_private *priv =
+ container_of(nb, struct msm_drm_private, vmap_notifier);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned unmapped = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return NOTIFY_DONE;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_vunmapable(msm_obj)) {
+ msm_gem_vunmap(&msm_obj->base);
+ /* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+ if (++unmapped >= 15)
+ break;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ *(unsigned long *)ptr += unmapped;
+
+ if (unmapped > 0)
+ pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * msm_gem_shrinker_init - Initialize msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function registers and sets up the msm shrinker.
+ */
+void msm_gem_shrinker_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->shrinker.count_objects = msm_gem_shrinker_count;
+ priv->shrinker.scan_objects = msm_gem_shrinker_scan;
+ priv->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&priv->shrinker));
+
+ priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+}
+
+/**
+ * msm_gem_shrinker_cleanup - Clean up msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function unregisters the msm shrinker.
+ */
+void msm_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+}
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr)
+ struct msm_gpu *gpu, int nr_bos, int nr_cmds)
{
struct msm_gem_submit *submit;
- int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+ int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+ (nr_cmds * sizeof(*submit->cmd));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (!submit)
submit->dev = dev;
submit->gpu = gpu;
+ submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
+ submit->cmd = (void *)&submit->bos[nr_bos];
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
+ INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ submit->bos[i].flags = 0;
+
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr_locked(&obj->base);
+ ptr = msm_gem_get_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
last_offset = off;
}
+ msm_gem_put_vaddr_locked(&obj->base);
+
return 0;
}
if (args->pipe != MSM_PIPE_3D0)
return -EINVAL;
- if (args->nr_cmds > MAX_CMDS)
- return -EINVAL;
-
- submit = submit_create(dev, gpu, args->nr_bos);
- if (!submit)
- return -ENOMEM;
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
+ submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
ret = submit_lookup_objects(submit, args, file);
if (ret)
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
+out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
+ dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
struct msm_kms {
const struct msm_kms_funcs *funcs;
- /* irq handling: */
- bool in_irq;
- struct list_head irq_list; /* list of mdp4_irq */
- uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ /* irq number to be passed on to drm_irq_install */
+ int irq;
};
static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+int msm_mdss_init(struct drm_device *dev);
+void msm_mdss_destroy(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
size_t sz, loff_t *ppos)
{
struct msm_perf_state *perf = file->private_data;
- int n = 0, ret;
+ int n = 0, ret = 0;
mutex_lock(&perf->read_lock);
}
n = min((int)sz, perf->buftot - perf->bufpos);
- ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
- if (ret)
+ if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
+ ret = -EFAULT;
goto out;
+ }
perf->bufpos += n;
*ppos += n;
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
*/
#ifdef CONFIG_DEBUG_FS
#include "msm_gpu.h"
#include "msm_gem.h"
+static bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
goto out;
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
- ret = copy_to_user(buf, fptr, n);
- if (ret)
+ if (copy_to_user(buf, fptr, n)) {
+ ret = -EFAULT;
goto out;
+ }
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
*ppos += n;
kfree(rd);
}
+static void snapshot_buf(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit, int idx,
+ uint32_t iova, uint32_t size)
+{
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ const char *buf;
+
+ buf = msm_gem_get_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ return;
+
+ if (iova) {
+ buf += iova - submit->bos[idx].iova;
+ } else {
+ iova = submit->bos[idx].iova;
+ size = obj->base.size;
+ }
+
+ rd_write_section(rd, RD_GPUADDR,
+ (uint32_t[2]){ iova, size }, 8);
+ rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
+
+ msm_gem_put_vaddr_locked(&obj->base);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit)
{
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- /* could be nice to have an option (module-param?) to snapshot
- * all the bo's associated with the submit. Handy to see vtx
- * buffers, etc. For now just the cmdstream bo's is enough.
- */
+ if (rd_full) {
+ for (i = 0; i < submit->nr_bos; i++) {
+ /* buffers that are written to probably don't start out
+ * with anything interesting:
+ */
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ continue;
+
+ snapshot_buf(rd, submit, i, 0, 0);
+ }
+ }
for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t idx = submit->cmd[i].idx;
uint32_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
- struct msm_gem_object *obj = submit->bos[idx].obj;
- const char *buf = msm_gem_vaddr_locked(&obj->base);
- buf += iova - submit->bos[idx].iova;
-
- rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
- rd_write_section(rd, RD_BUFFER_CONTENTS,
- buf, szd * 4);
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!rd_full) {
+ snapshot_buf(rd, submit, submit->cmd[i].idx,
+ submit->cmd[i].iova, szd * 4);
+ }
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
goto fail;
}
- ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->start = msm_gem_get_vaddr_locked(ring->bo);
+ if (IS_ERR(ring->start)) {
+ ret = PTR_ERR(ring->start);
+ goto fail;
+ }
ring->end = ring->start + (size / 4);
ring->cur = ring->start;
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
- if (ring->bo)
+ if (ring->bo) {
+ msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo);
+ }
kfree(ring);
}
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB
- select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- /* Turn every CRTC off. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
-
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
encoder->enc_restore(&encoder->base.base);
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
- if (crtc) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
+ if (crtc)
+ drm_crtc_force_disable(crtc);
}
return 0;
#define NV_DEVICE_INFO_V0_FERMI 0x07
#define NV_DEVICE_INFO_V0_KEPLER 0x08
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+#define NV_DEVICE_INFO_V0_PASCAL 0x0a
__u8 family;
__u8 pad06[2];
__u64 ram_size;
#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
+#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
#define GK110_DISP /* cl5070.h */ 0x00009270
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM200_DISP /* cl5070.h */ 0x00009570
+#define GP100_DISP /* cl5070.h */ 0x00009770
+#define GP104_DISP /* cl5070.h */ 0x00009870
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
+#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
+#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
#define MAXWELL_A /* cl9097.h */ 0x0000b097
#define MAXWELL_B /* cl9097.h */ 0x0000b197
+#define PASCAL_A /* cl9097.h */ 0x0000c097
+
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
#define FERMI_DMA 0x000090b5
#define KEPLER_DMA_COPY_A 0x0000a0b5
#define MAXWELL_DMA_COPY_A 0x0000b0b5
+#define PASCAL_DMA_COPY_A 0x0000c0b5
+#define PASCAL_DMA_COPY_B 0x0000c1b5
#define FERMI_DECOMPRESS 0x000090b8
#define KEPLER_COMPUTE_B 0x0000a1c0
#define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0
+#define PASCAL_COMPUTE_A 0x0000c0c0
#define NV74_CIPHER 0x000074c1
#endif
NVKM_SUBDEV_MC,
NVKM_SUBDEV_BUS,
NVKM_SUBDEV_TIMER,
+ NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_FB,
NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,
NVKM_ENGINE_CE0,
NVKM_ENGINE_CE1,
NVKM_ENGINE_CE2,
- NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+ NVKM_ENGINE_CE3,
+ NVKM_ENGINE_CE4,
+ NVKM_ENGINE_CE5,
+ NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
NVKM_ENGINE_CIPHER,
NVKM_ENGINE_DISP,
NVKM_ENGINE_NVENC0,
NVKM_ENGINE_NVENC1,
- NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+ NVKM_ENGINE_NVENC2,
+ NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
NVKM_ENGINE_NVDEC,
NVKM_ENGINE_PM,
NV_C0 = 0xc0,
NV_E0 = 0xe0,
GM100 = 0x110,
+ GP100 = 0x130,
} card_type;
u32 chipset;
u8 chiprev;
struct nvkm_volt *volt;
struct nvkm_engine *bsp;
- struct nvkm_engine *ce[3];
+ struct nvkm_engine *ce[6];
struct nvkm_engine *cipher;
struct nvkm_disp *disp;
struct nvkm_dma *dma;
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
- struct nvkm_engine *nvenc[2];
+ struct nvkm_engine *nvenc[3];
struct nvkm_engine *nvdec;
struct nvkm_pm *pm;
struct nvkm_engine *sec;
void (*fini)(struct nvkm_device *, bool suspend);
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
- bool cpu_coherent;
};
struct nvkm_device_quirk {
int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
} iommu;
int gpu_speedo;
+ int gpu_speedo_id;
};
struct nvkm_device_tegra_func {
int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
u32 size;
u8 *data;
+ u32 image0_size;
+ u32 imaged_addr;
+
u32 bmp_offset;
u32 bit_offset;
u8 nvbios_checksum(const u8 *data, int size);
u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
-
-#define nvbios_rd08(b,o) (b)->data[(o)]
-#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
-#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
+u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
+u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
#endif
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
struct nvbios_ocfg {
- u16 match;
+ u8 proto;
+ u8 flags;
u16 clkcmp[2];
};
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
#endif
int regions;
} tile;
+ u8 page;
+
struct nvkm_memory *mmu_rd;
struct nvkm_memory *mmu_wr;
};
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
struct nvkm_subdev subdev;
};
-void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
-void nvkm_mc_intr_unarm(struct nvkm_mc *);
-void nvkm_mc_intr_rearm(struct nvkm_mc *);
-void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
-void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_intr(struct nvkm_device *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_device *);
+void nvkm_mc_intr_rearm(struct nvkm_device *);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_unk260(struct nvkm_device *, u32 data);
int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
const struct nvkm_secboot_func *func;
struct nvkm_subdev subdev;
+ enum nvkm_devidx devidx;
u32 base;
- u32 irq_mask;
- u32 enable_mask;
};
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
struct list_head device;
};
-u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
#endif
u32 uv;
u8 vid;
} vid[256];
+
+ u32 max_uv;
+ u32 min_uv;
};
int nvkm_volt_get(struct nvkm_volt *);
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
return NVIF_CLASS_SW_GF100;
}
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- if (!nvxx_device(&drm->device)->func->cpu_coherent)
- nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+ nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
if (drm->client.vm) {
if (ret)
return ret;
- /*
- * TTM buffers allocated using the DMA API already have a mapping, let's
- * use it instead.
- */
- if (!nvbo->force_coherent)
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
- &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
if (!nvbo)
return;
- /*
- * TTM buffers allocated using the DMA API already had a coherent
- * mapping which we used, no need to unmap.
- */
- if (!nvbo->force_coherent)
- ttm_bo_kunmap(&nvbo->kmap);
+ ttm_bo_kunmap(&nvbo->kmap);
}
void
return 0;
}
-static inline void *
-_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
-{
- struct ttm_dma_tt *dma_tt;
- u8 *m = mem;
-
- index *= sz;
-
- if (m) {
- /* kmap'd address, return the corresponding offset */
- m += index;
- } else {
- /* DMA-API mapping, lookup the right address */
- dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
- m = dma_tt->cpu_address[index / PAGE_SIZE];
- m += index % PAGE_SIZE;
- }
-
- return m;
-}
-#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
-
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
iowrite16_native(val, (void __force __iomem *)mem);
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
return ioread32_native((void __force __iomem *)mem);
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
iowrite32_native(val, (void __force __iomem *)mem);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict,
- no_wait_gpu,
new_mem);
nouveau_fence_unref(&fence);
}
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
+ ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+ if (ret)
+ return ret;
+
if (nvbo->pin_refcnt)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
out:
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
dev = drm->dev;
pdev = device->dev;
- /*
- * Objects matching this condition have been marked as force_coherent,
- * so use the DMA API for them.
- */
- if (!nvxx_device(&drm->device)->func->cpu_coherent &&
- ttm->caching_state == tt_uncached)
- return ttm_dma_populate(ttm_dma, dev->dev);
-
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm);
dev = drm->dev;
pdev = device->dev;
- /*
- * Objects matching this condition have been marked as force_coherent,
- * so use the DMA API for them.
- */
- if (!nvxx_device(&drm->device)->func->cpu_coherent &&
- ttm->caching_state == tt_uncached) {
- ttm_dma_unpopulate(ttm_dma, dev->dev);
- return;
- }
-
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm);
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
+ MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
KEPLER_CHANNEL_GPFIFO_A,
FERMI_CHANNEL_GPFIFO,
{
struct nouveau_crtc *nv_crtc =
container_of(notify, typeof(*nv_crtc), vblank);
- drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+ drm_crtc_handle_vblank(&nv_crtc->base);
return NVIF_NOTIFY_KEEP;
}
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
+ GP104_DISP,
+ GP100_DISP,
GM200_DISP,
GM107_DISP,
GK110_DISP,
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
+ drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
case KEPLER_CHANNEL_GPFIFO_A:
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
+ case PASCAL_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
- /* remove conflicting drivers (vesafb, efifb etc) */
+ /* We need to check that the chipset is supported before booting
+ * fbdev off the hardware, as there's no way to put it back.
+ */
+ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ if (ret)
+ return ret;
+
+ nvkm_device_del(&device);
+
+ /* Remove conflicting drivers (vesafb, efifb etc). */
aper = alloc_apertures(3);
if (!aper)
return -ENOMEM;
nouveau_vga_init(drm);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ if (!nvxx_device(&drm->device)->mmu) {
+ ret = -ENOSYS;
+ goto fail_device;
+ }
+
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
0x1000, NULL, &drm->client.vm);
if (ret)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- pm_runtime_get_sync(dev->dev);
+ if (nouveau_runtime_pm != 0) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
+
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
if (ret)
goto fini;
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
fini:
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
nouveau_hwmon_get_in0_input, NULL, 0);
+static ssize_t
+nouveau_hwmon_get_in0_min(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+ if (!volt || !volt->min_uv)
+ return -ENODEV;
+
+ return sprintf(buf, "%i\n", volt->min_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
+ nouveau_hwmon_get_in0_min, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_in0_max(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+ if (!volt || !volt->max_uv)
+ return -ENODEV;
+
+ return sprintf(buf, "%i\n", volt->max_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
+ nouveau_hwmon_get_in0_max, NULL, 0);
+
static ssize_t
nouveau_hwmon_get_in0_label(struct device *d,
struct device_attribute *a, char *buf)
static struct attribute *hwmon_in0_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
NULL
};
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
uint32_t fg;
uint32_t bg;
uint32_t dsize;
- uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
if (ret)
return ret;
- width = ALIGN(image->width, 8);
- dsize = ALIGN(width * image->height, 32) >> 5;
-
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ dsize = ALIGN(image->width * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
+ GP104_DISP_CORE_CHANNEL_DMA,
+ GP100_DISP_CORE_CHANNEL_DMA,
GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
- uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
if (ret)
return ret;
- width = ALIGN(image->width, 32);
- dwords = (width * image->height) >> 5;
-
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
+ dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
[NVKM_ENGINE_CE0 ] = "ce0",
[NVKM_ENGINE_CE1 ] = "ce1",
[NVKM_ENGINE_CE2 ] = "ce2",
+ [NVKM_ENGINE_CE3 ] = "ce3",
+ [NVKM_ENGINE_CE4 ] = "ce4",
+ [NVKM_ENGINE_CE5 ] = "ce5",
[NVKM_ENGINE_CIPHER ] = "cipher",
[NVKM_ENGINE_DISP ] = "disp",
[NVKM_ENGINE_DMAOBJ ] = "dma",
[NVKM_ENGINE_MSVLD ] = "msvld",
[NVKM_ENGINE_NVENC0 ] = "nvenc0",
[NVKM_ENGINE_NVENC1 ] = "nvenc1",
+ [NVKM_ENGINE_NVENC2 ] = "nvenc2",
[NVKM_ENGINE_NVDEC ] = "nvdec",
[NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec",
}
}
- nvkm_mc_reset(device->mc, subdev->index);
+ nvkm_mc_reset(device, subdev->index);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
nvkm-y += nvkm/engine/ce/gk104.o
nvkm-y += nvkm/engine/ce/gm107.o
nvkm-y += nvkm/engine/ce/gm200.o
+nvkm-y += nvkm/engine/ce/gp100.o
+nvkm-y += nvkm/engine/ce/gp104.o
--- /dev/null
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_enum
+gp100_ce_launcherr_report[] = {
+ { 0x0, "NO_ERR" },
+ { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
+ { 0x2, "INVALID_ALIGNMENT" },
+ { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
+ { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
+ { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
+ { 0x6, "DST_LINE_EXCEEDS_PITCH" },
+ { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
+ { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
+ { 0x9, "INVALID_VALUE" },
+ { 0xa, "UNUSED_FIELD" },
+ { 0xb, "INVALID_OPERATION" },
+ { 0xc, "NO_RESOURCES" },
+ { 0xd, "INVALID_CONFIG" },
+ {}
+};
+
+static void
+gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
+{
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x104418 + base);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
+ nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
+}
+
+void
+gp100_ce_intr(struct nvkm_engine *ce)
+{
+ const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x10440c + base);
+ u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
+ if (intr & 0x00000001) { //XXX: guess
+ nvkm_warn(subdev, "BLOCKPIPE\n");
+ nvkm_wr32(device, 0x104410 + base, 0x00000001);
+ intr &= ~0x00000001;
+ }
+ if (intr & 0x00000002) { //XXX: guess
+ nvkm_warn(subdev, "NONBLOCKPIPE\n");
+ nvkm_wr32(device, 0x104410 + base, 0x00000002);
+ intr &= ~0x00000002;
+ }
+ if (intr & 0x00000004) {
+ gp100_ce_intr_launcherr(ce, base);
+ nvkm_wr32(device, 0x104410 + base, 0x00000004);
+ intr &= ~0x00000004;
+ }
+ if (intr) {
+ nvkm_warn(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x104410 + base, intr);
+ }
+}
+
+static const struct nvkm_engine_func
+gp100_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, PASCAL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gp100_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+}
--- /dev/null
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gp104_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, PASCAL_DMA_COPY_B },
+ { -1, -1, PASCAL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gp104_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+}
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
+void gp100_ce_intr(struct nvkm_engine *);
#endif
.sw = gf100_sw_new,
};
+static const struct nvkm_device_chip
+nv130_chipset = {
+ .name = "GP100",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gm200_secboot_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp100_ce_new,
+ .ce[1] = gp100_ce_new,
+ .ce[2] = gp100_ce_new,
+ .ce[3] = gp100_ce_new,
+ .ce[4] = gp100_ce_new,
+ .ce[5] = gp100_ce_new,
+ .dma = gf119_dma_new,
+ .disp = gp100_disp_new,
+ .fifo = gp100_fifo_new,
+ .gr = gp100_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv134_chipset = {
+ .name = "GP104",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp104_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp104_ce_new,
+ .ce[1] = gp104_ce_new,
+ .ce[2] = gp104_ce_new,
+ .ce[3] = gp104_ce_new,
+ .disp = gp104_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
_(CE0 , device->ce[0] , device->ce[0]);
_(CE1 , device->ce[1] , device->ce[1]);
_(CE2 , device->ce[2] , device->ce[2]);
+ _(CE3 , device->ce[3] , device->ce[3]);
+ _(CE4 , device->ce[4] , device->ce[4]);
+ _(CE5 , device->ce[5] , device->ce[5]);
_(CIPHER , device->cipher , device->cipher);
_(DISP , device->disp , &device->disp->engine);
_(DMAOBJ , device->dma , &device->dma->engine);
_(MSVLD , device->msvld , device->msvld);
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
+ _(NVENC2 , device->nvenc[2], device->nvenc[2]);
_(NVDEC , device->nvdec , device->nvdec);
_(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec);
case 0x100: device->card_type = NV_E0; break;
case 0x110:
case 0x120: device->card_type = GM100; break;
+ case 0x130: device->card_type = GP100; break;
default:
break;
}
case 0x124: device->chip = &nv124_chipset; break;
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
+ case 0x130: device->chip = &nv130_chipset; break;
+ case 0x134: device->chip = &nv134_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
_(NVKM_ENGINE_CE0 , ce[0]);
_(NVKM_ENGINE_CE1 , ce[1]);
_(NVKM_ENGINE_CE2 , ce[2]);
+ _(NVKM_ENGINE_CE3 , ce[3]);
+ _(NVKM_ENGINE_CE4 , ce[4]);
+ _(NVKM_ENGINE_CE5 , ce[5]);
_(NVKM_ENGINE_CIPHER , cipher);
_(NVKM_ENGINE_DISP , disp);
_(NVKM_ENGINE_DMAOBJ , dma);
_(NVKM_ENGINE_MSVLD , msvld);
_(NVKM_ENGINE_NVENC0 , nvenc[0]);
_(NVKM_ENGINE_NVENC1 , nvenc[1]);
+ _(NVKM_ENGINE_NVENC2 , nvenc[2]);
_(NVKM_ENGINE_NVDEC , nvdec);
_(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec);
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
- .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
};
int
nvkm_device_tegra_intr(int irq, void *arg)
{
struct nvkm_device_tegra *tdev = arg;
- struct nvkm_mc *mc = tdev->device.mc;
+ struct nvkm_device *device = &tdev->device;
bool handled = false;
- if (likely(mc)) {
- nvkm_mc_intr_unarm(mc);
- nvkm_mc_intr(mc, &handled);
- nvkm_mc_intr_rearm(mc);
- }
+ nvkm_mc_intr_unarm(device);
+ nvkm_mc_intr(device, &handled);
+ nvkm_mc_intr_rearm(device);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
.fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size,
- .cpu_coherent = false,
};
int
goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+ tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+ case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
default:
args->v0.family = 0;
break;
nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm200.o
+nvkm-y += nvkm/engine/disp/gp100.o
+nvkm-y += nvkm/engine/disp/gp104.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
nvkm-y += nvkm/engine/disp/sornv50.o
nvkm-y += nvkm/engine/disp/sorg94.o
nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/dport.o
nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
nvkm-y += nvkm/engine/disp/rootgm200.o
+nvkm-y += nvkm/engine/disp/rootgp100.o
+nvkm-y += nvkm/engine/disp/rootgp104.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/dmacnv50.o
nvkm-y += nvkm/engine/disp/dmacgf119.o
+nvkm-y += nvkm/engine/disp/dmacgp104.o
nvkm-y += nvkm/engine/disp/basenv50.o
nvkm-y += nvkm/engine/disp/baseg84.o
nvkm-y += nvkm/engine/disp/basegf119.o
nvkm-y += nvkm/engine/disp/basegk104.o
nvkm-y += nvkm/engine/disp/basegk110.o
+nvkm-y += nvkm/engine/disp/basegp104.o
nvkm-y += nvkm/engine/disp/corenv50.o
nvkm-y += nvkm/engine/disp/coreg84.o
nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
nvkm-y += nvkm/engine/disp/coregm200.o
+nvkm-y += nvkm/engine/disp/coregp100.o
+nvkm-y += nvkm/engine/disp/coregp104.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
nvkm-y += nvkm/engine/disp/ovlygt215.o
nvkm-y += nvkm/engine/disp/ovlygf119.o
nvkm-y += nvkm/engine/disp/ovlygk104.o
+nvkm-y += nvkm/engine/disp/ovlygp104.o
nvkm-y += nvkm/engine/disp/piocnv50.o
nvkm-y += nvkm/engine/disp/piocgf119.o
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_base_oclass = {
+ .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &gp104_disp_dmac_func,
+ .mthd = &gf119_disp_base_chan_mthd,
+ .chid = 1,
+};
extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
struct nv50_disp_pioc_oclass {
int (*ctor)(const struct nv50_disp_chan_func *,
}
};
-static void
+void
gf119_disp_core_fini(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
--- /dev/null
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp100_disp_core_oclass = {
+ .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static int
+gp104_disp_core_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x611494, chan->push);
+ nvkm_wr32(device, 0x611498, 0x00010000);
+ nvkm_wr32(device, 0x61149c, 0x00000001);
+ nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core init: %08x\n",
+ nvkm_rd32(device, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_core_func = {
+ .init = gp104_disp_core_init,
+ .fini = gf119_disp_core_fini,
+ .bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_core_oclass = {
+ .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gp104_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
chan->base.chid << 27 | 0x00000001);
}
-static void
+void
gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static int
+gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
+ nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_dmac_func = {
+ .init = gp104_disp_dmac_init,
+ .fini = gf119_disp_dmac_fini,
+ .bind = gf119_disp_dmac_bind,
+};
extern const struct nv50_disp_dmac_func nv50_disp_core_func;
extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+void gf119_disp_core_fini(struct nv50_disp_dmac *);
+
+extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
struct nv50_disp_dmac_oclass {
int (*ctor)(const struct nv50_disp_dmac_func *,
extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
#endif
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
+ *data = nvbios_outp_match(bios, outp->info.hasht, mask,
ver, hdr, cnt, len, info);
if (!*data)
return NULL;
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
nvkm_wr32(device, 0x6101d0, 0x80000000);
}
-static void
+void
gf119_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
u32 stat = nvkm_rd32(device, 0x61009c);
int chid = ffs(stat) - 1;
if (chid >= 0)
- gf119_disp_intr_error(disp, chid);
+ disp->func->intr_error(disp, chid);
intr &= ~0x00000002;
}
static const struct nv50_disp_func
gf119_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gf119_disp_root_oclass,
static const struct nv50_disp_func
gk104_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gk104_disp_root_oclass,
static const struct nv50_disp_func
gk110_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gk110_disp_root_oclass,
static const struct nv50_disp_func
gm107_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gm107_disp_root_oclass,
.outp.internal.crt = nv50_dac_output_new,
.outp.internal.tmds = nv50_sor_output_new,
.outp.internal.lvds = nv50_sor_output_new,
- .outp.internal.dp = gf119_sor_dp_new,
+ .outp.internal.dp = gm107_sor_dp_new,
.dac.nr = 3,
.dac.power = nv50_dac_power,
.dac.sense = nv50_dac_sense,
static const struct nv50_disp_func
gm200_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gm200_disp_root_oclass,
--- /dev/null
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gp100_disp = {
+ .intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gp100_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm200_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm200_sor_magic,
+};
+
+int
+gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gp100_disp, device, index, pdisp);
+}
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static void
+gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
+ u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
+ u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
+
+ nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+ if (chid < ARRAY_SIZE(disp->chan)) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nvkm_wr32(device, 0x61009c, (1 << chid));
+ nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
+}
+
+static const struct nv50_disp_func
+gp104_disp = {
+ .intr = gf119_disp_intr,
+ .intr_error = gp104_disp_intr_error,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gp104_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm200_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm200_sor_magic,
+};
+
+int
+gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+}
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
+#include <subdev/timer.h>
static const struct nvkm_disp_oclass *
nv50_disp_root_(struct nvkm_disp *base)
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
+ *data = nvbios_outp_match(bios, outp->info.hasht, mask,
ver, hdr, cnt, len, info);
if (!*data)
return NULL;
if (!outp)
return NULL;
+ *conf = (ctrl & 0x00000f00) >> 8;
if (outp->info.location == 0) {
switch (outp->info.type) {
case DCB_OUTPUT_TMDS:
- *conf = (ctrl & 0x00000f00) >> 8;
if (*conf == 5)
*conf |= 0x0100;
break;
case DCB_OUTPUT_LVDS:
- *conf = disp->sor.lvdsconf;
+ *conf |= disp->sor.lvdsconf;
break;
- case DCB_OUTPUT_DP:
- *conf = (ctrl & 0x00000f00) >> 8;
- break;
- case DCB_OUTPUT_ANALOG:
default:
- *conf = 0x00ff;
break;
}
} else {
pclk = pclk / 2;
}
- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+ &ver, &hdr, &cnt, &len, &info2);
if (data && id < 0xff) {
data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
if (data) {
return outp;
}
+static bool
+nv50_disp_dptmds_war(struct nvkm_device *device)
+{
+ switch (device->chipset) {
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+ if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
+ switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
+ case 0x00000000:
+ case 0x00030000:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+
+}
+
+static void
+nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+
+ if (!nv50_disp_dptmds_war_needed(disp, outp))
+ return;
+
+ nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
+ nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
+
+ nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
+ nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
+ nvkm_usec(device, 400, NVKM_DELAY);
+ nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
+ nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
+
+ if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
+ u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+ u32 pu_pc = seqctl & 0x0000000f;
+ nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
+ }
+}
+
+static void
+nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+ u32 sorpwr;
+
+ if (!nv50_disp_dptmds_war_needed(disp, outp))
+ return;
+
+ sorpwr = nvkm_rd32(device, 0x61c004 + soff);
+ if (sorpwr & 0x00000001) {
+ u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+ u32 pd_pc = (seqctl & 0x00000f00) >> 8;
+ u32 pu_pc = seqctl & 0x0000000f;
+
+ nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+
+ nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
+ nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
+ }
+
+ nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
+
+ if (sorpwr & 0x00000001) {
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
+ }
+}
+
+static void
+nv50_disp_update_sppll1(struct nv50_disp *disp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ bool used = false;
+ int sor;
+
+ if (!nv50_disp_dptmds_war(device))
+ return;
+
+ for (sor = 0; sor < disp->func->sor.nr; sor++) {
+ u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
+ switch (clksor & 0x03000000) {
+ case 0x02000000:
+ case 0x03000000:
+ used = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (used)
+ return;
+
+ nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
+}
+
static void
nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
{
nvkm_mask(device, hreg, 0x0000000f, hval);
nvkm_mask(device, oreg, mask, oval);
+
+ nv50_disp_dptmds_war_2(disp, &outp->info);
}
/* If programming a TMDS output on a SOR that can also be configured for
if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
+ nv50_disp_dptmds_war_3(disp, &outp->info);
}
void
continue;
nv50_disp_intr_unk40_0(disp, head);
}
+ nv50_disp_update_sppll1(disp);
}
nvkm_wr32(device, 0x610030, 0x80000000);
struct nv50_disp_func {
void (*intr)(struct nv50_disp *);
+ void (*intr_error)(struct nv50_disp *, int chid);
const struct nvkm_event_func *uevent;
void (*super)(struct work_struct *);
void gf119_disp_vblank_fini(struct nv50_disp *, int);
void gf119_disp_intr(struct nv50_disp *);
void gf119_disp_intr_supervisor(struct work_struct *);
+void gf119_disp_intr_error(struct nv50_disp *, int);
#endif
int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
-int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
- struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ struct nvkm_output **);
#endif
}
};
-static const struct nv50_disp_chan_mthd
+const struct nv50_disp_chan_mthd
gk104_disp_ovly_chan_mthd = {
.name = "Overlay",
.addr = 0x001000,
--- /dev/null
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_ovly_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &gp104_disp_dmac_func,
+ .mthd = &gk104_disp_ovly_chan_mthd,
+ .chid = 5,
+};
--- /dev/null
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp100_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gp100_disp_core_oclass,
+ &gk110_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp100_disp_root_oclass = {
+ .base.oclass = GP100_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gp100_disp_root_new,
+};
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp104_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gp104_disp_core_oclass,
+ &gp104_disp_base_oclass,
+ &gp104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp104_disp_root_oclass = {
+ .base.oclass = GP104_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gp104_disp_root_new,
+};
extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
#endif
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 loff = gf119_sor_loff(outp);
- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+ const u32 soff = gf119_sor_soff(outp);
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
return 0;
}
-static int
+int
gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+ const u32 soff = outp->base.or * 0x800;
+ const u32 data = 0x01010101 * pattern;
+ if (outp->base.info.sorconf.link & 1)
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+ else
+ nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+ return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+ .pattern = gm107_sor_dp_pattern,
+ .lnk_pwr = g94_sor_dp_lnk_pwr,
+ .lnk_ctl = gf119_sor_dp_lnk_ctl,
+ .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+ struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+ return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
return lane * 0x08;
}
-static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
- struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 soff = gm200_sor_soff(outp);
- const u32 data = 0x01010101 * pattern;
- if (outp->base.info.sorconf.link & 1)
- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
- else
- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
- return 0;
-}
-
static int
gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
static const struct nvkm_output_dp_func
gm200_sor_dp_func = {
- .pattern = gm200_sor_dp_pattern,
+ .pattern = gm107_sor_dp_pattern,
.lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
.drv_ctl = gm200_sor_dp_drv_ctl,
nvkm-y += nvkm/engine/fifo/gm107.o
nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
+nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
nvkm-y += nvkm/engine/fifo/gpfifogk110.o
nvkm-y += nvkm/engine/fifo/gpfifogm200.o
+nvkm-y += nvkm/engine/fifo/gpfifogp100.o
extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
#endif
}
if (eu == NULL) {
- enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+ enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
if (engidx < NVKM_SUBDEV_NR) {
const char *src = nvkm_subdev_name[engidx];
char *dst = en;
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_top *top = device->top;
int engn, runl, pbid, ret, i, j;
enum nvkm_devidx engidx;
u32 *map;
/* Determine runlist configuration from topology device info. */
i = 0;
- while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
+ while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
/* Determine which PBDMA handles requests for this engine. */
for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
if (map[j] & (1 << runl)) {
}
}
- nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
- engn, runl, pbid);
+ nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
+ engn, runl, pbid, nvkm_subdev_name[engidx]);
fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
fifo->engine[engn].runl = runl;
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_enum
+gp100_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "HOST0" },
+ { 0x07, "HOST1" },
+ { 0x08, "HOST2" },
+ { 0x09, "HOST3" },
+ { 0x0a, "HOST4" },
+ { 0x0b, "HOST5" },
+ { 0x0c, "HOST6" },
+ { 0x0d, "HOST7" },
+ { 0x0e, "HOST8" },
+ { 0x0f, "HOST9" },
+ { 0x10, "HOST10" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x1f, "PHYSICAL" },
+ {}
+};
+
+static const struct gk104_fifo_func
+gp100_fifo = {
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .chan = {
+ &gp100_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+}
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gp100_fifo_gpfifo_oclass = {
+ .base.oclass = PASCAL_CHANNEL_GPFIFO_A,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
+};
nvkm-y += nvkm/engine/gr/gm107.o
nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
+nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
nvkm-y += nvkm/engine/gr/ctxgm107.o
nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
+nvkm-y += nvkm/engine/gr/ctxgp100.o
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 idle_timeout;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
}
int
extern const struct gf100_grctx_func gm20b_grctx;
+extern const struct gf100_grctx_func gp100_grctx;
+
/* context init value lists */
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
u32 idle_timeout;
int i;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
}
const struct gf100_grctx_func
u32 idle_timeout;
int i;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gp100_grctx_generate_pagepool(struct gf100_grctx *info)
+{
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 attrib = grctx->attrib_nr;
+ const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
+ const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size, (1 << s), access);
+ const int max_batches = 0xffff;
+ u32 ao = 0;
+ u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
+ int gpc, ppc, n = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+ mmio_refn(info, 0x419b00, 0x00000000, s, b);
+ mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
+ mmio_wr32(info, 0x405830, attrib);
+ mmio_wr32(info, 0x40585c, alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 u = 0x418ea0 + (n * 0x04);
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+ continue;
+ mmio_wr32(info, o + 0xc0, bs);
+ mmio_wr32(info, o + 0xf4, bo);
+ mmio_wr32(info, o + 0xf0, bs);
+ bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, as);
+ mmio_wr32(info, o + 0xf8, ao);
+ ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, u, bs);
+ }
+ }
+
+ mmio_wr32(info, 0x418eec, 0x00000000);
+ mmio_wr32(info, 0x41befc, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_405b60(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+ u32 dist[TPC_MAX / 4] = {};
+ u32 gpcs[GPC_MAX * 2] = {};
+ u8 tpcnr[GPC_MAX];
+ int tpc, gpc, i;
+
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+ /* won't result in the same distribution as the binary driver where
+ * some of the gpcs have more tpcs than others, but this shall do
+ * for the moment. the code for earlier gpus has this issue too.
+ */
+ for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while(!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+ gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+ }
+
+ for (i = 0; i < dist_nr; i++)
+ nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < gr->gpc_nr * 2; i++)
+ nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+static void
+gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout, tmp;
+ int i;
+
+ gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
+
+ grctx->pagepool(info);
+ grctx->bundle(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
+
+ gm200_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x406500, 0x00000000);
+
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+ for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+ tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
+ nvkm_wr32(device, 0x4041c4, tmp);
+
+ gp100_grctx_generate_405b60(gr);
+
+ gf100_gr_icmd(gr, gr->fuc_bundle);
+ nvkm_wr32(device, 0x404154, idle_timeout);
+ gf100_gr_mthd(gr, gr->fuc_method);
+}
+
+const struct gf100_grctx_func
+gp100_grctx = {
+ .main = gp100_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x1080,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gp100_grctx_generate_attrib,
+ .attrib_nr_max = 0x660,
+ .attrib_nr = 0x440,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
}
static const struct nvkm_enum gf100_mp_warp_error[] = {
- { 0x00, "NO_ERROR" },
- { 0x01, "STACK_MISMATCH" },
+ { 0x01, "STACK_ERROR" },
+ { 0x02, "API_STACK_ERROR" },
+ { 0x03, "RET_EMPTY_STACK_ERROR" },
+ { 0x04, "PC_WRAP" },
{ 0x05, "MISALIGNED_PC" },
- { 0x08, "MISALIGNED_GPR" },
- { 0x09, "INVALID_OPCODE" },
- { 0x0d, "GPR_OUT_OF_BOUNDS" },
- { 0x0e, "MEM_OUT_OF_BOUNDS" },
- { 0x0f, "UNALIGNED_MEM_ACCESS" },
+ { 0x06, "PC_OVERFLOW" },
+ { 0x07, "MISALIGNED_IMMC_ADDR" },
+ { 0x08, "MISALIGNED_REG" },
+ { 0x09, "ILLEGAL_INSTR_ENCODING" },
+ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+ { 0x0b, "ILLEGAL_INSTR_PARAM" },
+ { 0x0c, "INVALID_CONST_ADDR" },
+ { 0x0d, "OOR_REG" },
+ { 0x0e, "OOR_ADDR" },
+ { 0x0f, "MISALIGNED_ADDR" },
{ 0x10, "INVALID_ADDR_SPACE" },
- { 0x11, "INVALID_PARAM" },
+ { 0x11, "ILLEGAL_INSTR_PARAM2" },
+ { 0x12, "INVALID_CONST_ADDR_LDC" },
+ { 0x13, "GEOMETRY_SM_ERROR" },
+ { 0x14, "DIVERGENT" },
+ { 0x15, "WARP_EXIT" },
{}
};
static const struct nvkm_bitfield gf100_mp_global_error[] = {
+ { 0x00000001, "SM_TO_SM_FAULT" },
+ { 0x00000002, "L1_ERROR" },
{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
- { 0x00000008, "OUT_OF_STACK_SPACE" },
+ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+ { 0x00000010, "BPT_INT" },
+ { 0x00000020, "BPT_PAUSE" },
+ { 0x00000040, "SINGLE_STEP_COMPLETE" },
+ { 0x20000000, "ECC_SEC_ERROR" },
+ { 0x40000000, "ECC_DED_ERROR" },
+ { 0x80000000, "TIMEOUT" },
{}
};
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
int i;
+ int ret = 0;
if (gr->firmware) {
/* load fuc microcode */
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
else
gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
&gr->fuc409d);
+ if (ret)
+ return ret;
+
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
else
gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
&gr->fuc41ad);
+ if (ret)
+ return ret;
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
/* start both of them running */
nvkm_wr32(device, 0x409840, 0xffffffff);
}
/* load HUB microcode */
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
nvkm_wr32(device, 0x4091c0, 0x01000000);
for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
nvkm_wr32(device, 0x41a188, i >> 6);
nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
}
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
/* load register lists */
gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
+
+void gm200_gr_init_gpc_mmu(struct gf100_gr *);
#endif
if (ret)
return ret;
-
return 0;
}
return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
}
-static void
+void
gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
+ const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static int
+gp100_gr_init(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, rop;
+ int i;
+
+ gr->func->init_gpc_mmu(gr);
+
+ gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+ gr->func->init_rop_active_fbps(gr);
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400124, 0x00000002);
+ nvkm_wr32(device, 0x409c24, 0x000f0002);
+ nvkm_wr32(device, 0x405848, 0xc0000000);
+ nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+ nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
+ nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+
+ gr->func->init_ppc_exceptions(gr);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+ }
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
+
+ gf100_gr_zbc_init(gr);
+
+ return gf100_gr_init_ctxctl(gr);
+}
+
+static const struct gf100_gr_func
+gp100_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 2,
+ .grctx = &gp100_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_A, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp100_gr, device, index, pgr);
+}
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+
+static bool
+nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
+{
+ u32 p = *addr;
+
+ if (*addr > bios->image0_size && bios->imaged_addr) {
+ *addr -= bios->image0_size;
+ *addr += bios->imaged_addr;
+ }
+
+ if (unlikely(*addr + size >= bios->size)) {
+ nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
+ return false;
+ }
+
+ return true;
+}
+
+u8
+nvbios_rd08(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 1)))
+ return bios->data[addr];
+ return 0x00;
+}
+
+u16
+nvbios_rd16(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 2)))
+ return get_unaligned_le16(&bios->data[addr]);
+ return 0x0000;
+}
+
+u32
+nvbios_rd32(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 4)))
+ return get_unaligned_le32(&bios->data[addr]);
+ return 0x00000000;
+}
u8
nvbios_checksum(const u8 *data, int size)
nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
+ struct nvbios_image image;
struct bit_entry bit_i;
- int ret;
+ int ret, idx = 0;
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
if (ret)
return ret;
+ /* Some tables have weird pointers that need adjustment before
+ * they're dereferenced. I'm not entirely sure why...
+ */
+ if (nvbios_image(bios, idx++, &image)) {
+ bios->image0_size = image.size;
+ while (nvbios_image(bios, idx++, &image)) {
+ if (image.type == 0xe0) {
+ bios->imaged_addr = image.base;
+ break;
+ }
+ }
+ }
+
/* detect type of vbios we're dealing with */
bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
"\xff\x7f""NV\0", 5);
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
- info->match = nvbios_rd16(bios, data + 0x00);
+ info->proto = nvbios_rd08(bios, data + 0x00);
+ info->flags = nvbios_rd16(bios, data + 0x01);
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
}
}
u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data, idx = 0;
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
- if (info->match == type)
+ if ((info->proto == proto || info->proto == 0xff) &&
+ (info->flags == flags))
break;
}
return data;
case 0x30:
case 0x40:
case 0x41:
+ case 0x42:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
break;
case 0x40:
case 0x41:
+ case 0x42:
*hdr = nvbios_rd08(bios, data + 0x04);
*cnt = 0;
*len = 0;
break;
case 0x40:
case 0x41:
+ case 0x42:
info->flags = nvbios_rd08(bios, data + 0x04);
info->script[0] = nvbios_rd16(bios, data + 0x05);
info->script[1] = nvbios_rd16(bios, data + 0x07);
info->pe = nvbios_rd08(bios, data + 0x02);
info->tx_pu = nvbios_rd08(bios, data + 0x03);
break;
+ case 0x42:
+ info->dc = nvbios_rd08(bios, data + 0x00);
+ info->pe = nvbios_rd08(bios, data + 0x01);
+ info->tx_pu = nvbios_rd08(bios, data + 0x02);
+ break;
default:
data = 0x0000;
break;
bool
nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
{
+ u32 imaged_addr = bios->imaged_addr;
memset(image, 0x00, sizeof(*image));
+ bios->imaged_addr = 0;
do {
image->base += image->size;
- if (image->last || !nvbios_imagen(bios, image))
+ if (image->last || !nvbios_imagen(bios, image)) {
+ bios->imaged_addr = imaged_addr;
return false;
+ }
} while(idx--);
+ bios->imaged_addr = imaged_addr;
return true;
}
{}
};
-static u16
+static u32
pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_C;
- u16 data = 0x0000;
+ u32 data = 0x0000;
if (!bit_entry(bios, 'C', &bit_C)) {
if (bit_C.version == 1 && bit_C.length >= 10)
data = nvbios_rd16(bios, bit_C.offset + 8);
+ if (bit_C.version == 2 && bit_C.length >= 4)
+ data = nvbios_rd32(bios, bit_C.offset + 0);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = nvbios_rd08(bios, data + 1);
}
}
-static u16
+static u32
pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
- u16 data;
+ u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
map = pll_map(bios);
while (map && map->reg) {
if (map->reg == reg && *ver >= 0x20) {
- u16 addr = (data += hdr);
+ u32 addr = (data += hdr);
*type = map->type;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
return 0x0000;
}
-static u16
+static u32
pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
- u16 data;
+ u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
map = pll_map(bios);
while (map && map->reg) {
if (map->type == type && *ver >= 0x20) {
- u16 addr = (data += hdr);
+ u32 addr = (data += hdr);
*reg = map->reg;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
struct nvkm_device *device = subdev->device;
u8 ver, len;
u32 reg = type;
- u16 data;
+ u32 data;
if (type > PLL_MAX) {
reg = type;
#include <subdev/bios/image.h>
#include <subdev/bios/pmu.h>
-static u32
-weirdo_pointer(struct nvkm_bios *bios, u32 data)
-{
- struct nvbios_image image;
- int idx = 0;
- if (nvbios_image(bios, idx++, &image)) {
- data -= image.size;
- while (nvbios_image(bios, idx++, &image)) {
- if (image.type == 0xe0)
- return image.base + data;
- }
- }
- return 0;
-}
-
u32
nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
if (!bit_entry(bios, 'p', &bit_p)) {
if (bit_p.version == 2 && bit_p.length >= 4)
data = nvbios_rd32(bios, bit_p.offset + 0x00);
- if ((data = weirdo_pointer(bios, data))) {
+ if (data) {
*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
u32 data;
memset(info, 0x00, sizeof(*info));
while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
- if ( pmuE.type == type &&
- (data = weirdo_pointer(bios, pmuE.data))) {
+ if (pmuE.type == type && (data = pmuE.data)) {
info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
info->boot_addr = data + 0x30;
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 rammap = 0x0000;
+ u32 rammap = 0x0000;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- rammap = nvbios_rd16(bios, bit_P.offset + 4);
+ rammap = nvbios_rd32(bios, bit_P.offset + 4);
if (rammap) {
*ver = nvbios_rd08(bios, rammap + 0);
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (rammap && idx < *cnt) {
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
- u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
+ u32 sclk, sctl, sdiv = 2;
switch (ssrc & 0x00000003) {
case 0:
case 2:
return 100000;
case 3:
- if (sctl & 0x80000000) {
- u32 sclk = read_vco(clk, dsrc + (doff * 4));
- u32 sdiv = (sctl & 0x0000003f) + 2;
- return (sclk * 2) / sdiv;
+ sclk = read_vco(clk, dsrc + (doff * 4));
+
+ /* Memclk has doff of 0 despite its alt. location */
+ if (doff <= 2) {
+ sctl = nvkm_rd32(device, dctl + (doff * 4));
+
+ if (sctl & 0x80000000) {
+ if (ssrc & 0x100)
+ sctl >>= 8;
+
+ sdiv = (sctl & 0x3f) + 2;
+ }
}
- return read_vco(clk, dsrc + (doff * 4));
+ return (sclk * 2) / sdiv;
default:
return 0;
}
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+ /* Test PLL lock */
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
- nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+ /* Enable sync mode */
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
}
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+ /* Test PLL lock */
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
- nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+ /* Enable sync mode */
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
#include <core/tegra.h>
#include <subdev/timer.h>
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w) ((1 << w) - 1)
-
-#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
-#define GPCPLL_CFG_ENABLE BIT(0)
-#define GPCPLL_CFG_IDDQ BIT(1)
-#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
-#define GPCPLL_CFG_LOCK BIT(17)
-
-#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
-#define GPCPLL_COEFF_M_SHIFT 0
-#define GPCPLL_COEFF_M_WIDTH 8
-#define GPCPLL_COEFF_N_SHIFT 8
-#define GPCPLL_COEFF_N_WIDTH 8
-#define GPCPLL_COEFF_P_SHIFT 16
-#define GPCPLL_COEFF_P_WIDTH 6
-
-#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
-#define GPCPLL_CFG2_SETUP2_SHIFT 16
-#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
-
-#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
-#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
-
-#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
-#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
-#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
-#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
-#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
-
-#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
-#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
-
-#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
-#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
-#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
-#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
-#define GPC2CLK_OUT_VCODIV_WIDTH 6
-#define GPC2CLK_OUT_VCODIV_SHIFT 8
-#define GPC2CLK_OUT_VCODIV1 0
-#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
- GPC2CLK_OUT_VCODIV_SHIFT)
-#define GPC2CLK_OUT_BYPDIV_WIDTH 6
-#define GPC2CLK_OUT_BYPDIV_SHIFT 0
-#define GPC2CLK_OUT_BYPDIV31 0x3c
-#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
- GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
- | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
- | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
-#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
- GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
- | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
- | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
-
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
- (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-
static const u8 _pl_to_div[] = {
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
.min_pl = 1, .max_pl = 32,
};
-static void
+void
gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
struct nvkm_device *device = clk->base.subdev.device;
pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
-static u32
-gk20a_pllg_calc_rate(struct gk20a_clk *clk)
+void
+gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
+{
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
+
+ val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
+ val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
+ nvkm_wr32(device, GPCPLL_COEFF, val);
+}
+
+u32
+gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
u32 rate;
u32 divider;
- rate = clk->parent_rate * clk->pll.n;
- divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
+ rate = clk->parent_rate * pll->n;
+ divider = pll->m * clk->pl_to_div(pll->pl);
return rate / divider / 2;
}
-static int
-gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
+int
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
+ struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
u32 target_clk_f, ref_clk_f, target_freq;
target_clk_f = rate * 2 / KHZ;
ref_clk_f = clk->parent_rate / KHZ;
- max_vco_f = clk->params->max_vco;
+ target_vco_f = target_clk_f + target_clk_f / 50;
+ max_vco_f = max(clk->params->max_vco, target_vco_f);
min_vco_f = clk->params->min_vco;
best_m = clk->params->max_m;
best_n = clk->params->min_n;
best_pl = clk->params->min_pl;
- target_vco_f = target_clk_f + target_clk_f / 50;
- if (max_vco_f < target_vco_f)
- max_vco_f = target_vco_f;
-
/* min_pl <= high_pl <= max_pl */
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
high_pl = min(high_pl, clk->params->max_pl);
target_vco_f = target_clk_f * clk->pl_to_div(pl);
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
- u32 u_f, vco_f;
-
- u_f = ref_clk_f / m;
+ u32 u_f = ref_clk_f / m;
if (u_f < clk->params->min_u)
break;
break;
for (; n <= n2; n++) {
+ u32 vco_f;
+
if (n < clk->params->min_n)
continue;
if (n > clk->params->max_n)
"no best match for target @ %dMHz on gpc_pll",
target_clk_f / KHZ);
- clk->pll.m = best_m;
- clk->pll.n = best_n;
- clk->pll.pl = best_pl;
+ pll->m = best_m;
+ pll->n = best_n;
+ pll->pl = best_pl;
- target_freq = gk20a_pllg_calc_rate(clk);
+ target_freq = gk20a_pllg_calc_rate(clk, pll);
nvkm_debug(subdev,
- "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
- target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
- clk->pl_to_div(clk->pll.pl));
+ "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq / KHZ, pll->m, pll->n, pll->pl,
+ clk->pl_to_div(pll->pl));
return 0;
}
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
- u32 val;
- int ramp_timeout;
+ struct gk20a_pll pll;
+ int ret = 0;
/* get old coefficients */
- val = nvkm_rd32(device, GPCPLL_COEFF);
+ gk20a_pllg_read_mnp(clk, &pll);
/* do nothing if NDIV is the same */
- if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+ if (n == pll.n)
return 0;
- /* setup */
- nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
- 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
- nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
- 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
-
/* pll slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
- val = nvkm_rd32(device, GPCPLL_COEFF);
- val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
- val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ pll.n = n;
udelay(1);
- nvkm_wr32(device, GPCPLL_COEFF, val);
+ gk20a_pllg_write_mnp(clk, &pll);
/* dynamic ramp to new ndiv */
- val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
- val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
udelay(1);
- nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
- for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
- udelay(1);
- val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
- if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
- break;
- }
+ /* wait for ramping to complete */
+ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+ ret = -ETIMEDOUT;
/* exit slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
- if (ramp_timeout <= 0) {
- nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
+ return ret;
}
-static void
+static int
gk20a_pllg_enable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
+
+ /* enable lock detection */
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+ val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+ nvkm_wr32(device, GPCPLL_CFG, val);
+ }
+
+ /* wait for lock */
+ if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+ GPCPLL_CFG_LOCK) < 0)
+ return -ETIMEDOUT;
+
+ /* switch to VCO mode */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ return 0;
}
static void
{
struct nvkm_device *device = clk->base.subdev.device;
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
static int
-_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
- u32 val, cfg;
- struct gk20a_pll old_pll;
- u32 n_lo;
-
- /* get old coefficients */
- gk20a_pllg_read_mnp(clk, &old_pll);
-
- /* do NDIV slide if there is no change in M and PL */
- cfg = nvkm_rd32(device, GPCPLL_CFG);
- if (allow_slide && clk->pll.m == old_pll.m &&
- clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
- return gk20a_pllg_slide(clk, clk->pll.n);
- }
-
- /* slide down to NDIV_LO */
- if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
- int ret;
-
- n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
- ret = gk20a_pllg_slide(clk, n_lo);
+ struct gk20a_pll cur_pll;
+ int ret;
- if (ret)
- return ret;
- }
+ gk20a_pllg_read_mnp(clk, &cur_pll);
- /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+ /* split VCO-to-bypass jump in half by setting out divider 1:2 */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
- 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
-
- /* put PLL in bypass before programming it */
- val = nvkm_rd32(device, SEL_VCO);
- val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
udelay(2);
- nvkm_wr32(device, SEL_VCO, val);
-
- /* get out from IDDQ */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_IDDQ) {
- val &= ~GPCPLL_CFG_IDDQ;
- nvkm_wr32(device, GPCPLL_CFG, val);
- nvkm_rd32(device, GPCPLL_CFG);
- udelay(2);
- }
gk20a_pllg_disable(clk);
- nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
- clk->pll.m, clk->pll.n, clk->pll.pl);
-
- n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
- val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
- val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
- val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
- nvkm_wr32(device, GPCPLL_COEFF, val);
+ gk20a_pllg_write_mnp(clk, pll);
- gk20a_pllg_enable(clk);
-
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_LOCK_DET_OFF) {
- val &= ~GPCPLL_CFG_LOCK_DET_OFF;
- nvkm_wr32(device, GPCPLL_CFG, val);
- }
-
- if (nvkm_usec(device, 300,
- if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
- break;
- ) < 0)
- return -ETIMEDOUT;
-
- /* switch to VCO mode */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
- BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ ret = gk20a_pllg_enable(clk);
+ if (ret)
+ return ret;
/* restore out divider 1:1 */
- val = nvkm_rd32(device, GPC2CLK_OUT);
- if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
- (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
- val &= ~GPC2CLK_OUT_VCODIV_MASK;
- val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
- udelay(2);
- nvkm_wr32(device, GPC2CLK_OUT, val);
- /* Intentional 2nd write to assure linear divider operation */
- nvkm_wr32(device, GPC2CLK_OUT, val);
- nvkm_rd32(device, GPC2CLK_OUT);
- }
+ udelay(2);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
- /* slide up to new NDIV */
- return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
+ return 0;
}
static int
-gk20a_pllg_program_mnp(struct gk20a_clk *clk)
+gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
- int err;
+ struct gk20a_pll cur_pll;
+ int ret;
- err = _gk20a_pllg_program_mnp(clk, true);
- if (err)
- err = _gk20a_pllg_program_mnp(clk, false);
+ if (gk20a_pllg_is_enabled(clk)) {
+ gk20a_pllg_read_mnp(clk, &cur_pll);
+
+ /* just do NDIV slide if there is no change to M and PL */
+ if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+ return gk20a_pllg_slide(clk, pll->n);
+
+ /* slide down to current NDIV_LO */
+ cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+ ret = gk20a_pllg_slide(clk, cur_pll.n);
+ if (ret)
+ return ret;
+ }
+
+ /* program MNP with the new clock parameters and new NDIV_LO */
+ cur_pll = *pll;
+ cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+ ret = gk20a_pllg_program_mnp(clk, &cur_pll);
+ if (ret)
+ return ret;
- return err;
+ /* slide up to new NDIV */
+ return gk20a_pllg_slide(clk, pll->n);
}
static struct nvkm_pstate
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
+ struct gk20a_pll pll;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_gpc:
- gk20a_pllg_read_mnp(clk, &clk->pll);
- return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
+ gk20a_pllg_read_mnp(clk, &pll);
+ return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
struct gk20a_clk *clk = gk20a_clk(base);
return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
- GK20A_CLK_GPC_MDIV);
+ GK20A_CLK_GPC_MDIV, &clk->pll);
}
int
gk20a_clk_prog(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
+ int ret;
+
+ ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
+ if (ret)
+ ret = gk20a_pllg_program_mnp(clk, &clk->pll);
- return gk20a_pllg_program_mnp(clk);
+ return ret;
}
void
{
}
+int
+gk20a_clk_setup_slide(struct gk20a_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 step_a, step_b;
+
+ switch (clk->parent_rate) {
+ case 12000000:
+ case 12800000:
+ case 13000000:
+ step_a = 0x2b;
+ step_b = 0x0b;
+ break;
+ case 19200000:
+ step_a = 0x12;
+ step_b = 0x08;
+ break;
+ case 38400000:
+ step_a = 0x04;
+ step_b = 0x05;
+ break;
+ default:
+ nvkm_error(subdev, "invalid parent clock rate %u KHz",
+ clk->parent_rate / KHZ);
+ return -EINVAL;
+ }
+
+ nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+ nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+ return 0;
+}
+
void
gk20a_clk_fini(struct nvkm_clk *base)
{
struct nvkm_device *device = base->subdev.device;
struct gk20a_clk *clk = gk20a_clk(base);
- u32 val;
/* slide to VCO min */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_ENABLE) {
+ if (gk20a_pllg_is_enabled(clk)) {
struct gk20a_pll pll;
u32 n_lo;
gk20a_pllg_read_mnp(clk, &pll);
- n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
+ n_lo = gk20a_pllg_n_lo(clk, &pll);
gk20a_pllg_slide(clk, n_lo);
}
- /* put PLL in bypass before disabling it */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
gk20a_pllg_disable(clk);
+
+ /* set IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
}
static int
struct nvkm_device *device = subdev->device;
int ret;
+ /* get out from IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+ udelay(5);
+
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
GPC2CLK_OUT_INIT_VAL);
+ ret = gk20a_clk_setup_slide(clk);
+ if (ret)
+ return ret;
+
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
ret = base->func->prog(&clk->base);
};
int
-_gk20a_clk_ctor(struct nvkm_device *device, int index,
+gk20a_clk_ctor(struct nvkm_device *device, int index,
const struct nvkm_clk_func *func,
const struct gk20a_clk_pllg_params *params,
struct gk20a_clk *clk)
return -ENOMEM;
*pclk = &clk->base;
- ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+ ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
clk);
clk->pl_to_div = pl_to_div;
#ifndef __NVKM_CLK_GK20A_H__
#define __NVKM_CLK_GK20A_H__
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w) ((1 << (w)) - 1)
+
#define GK20A_CLK_GPC_MDIV 1000
#define SYS_GPCPLL_CFG_BASE 0x00137000
+#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE BIT(0)
+#define GPCPLL_CFG_IDDQ BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
+#define GPCPLL_CFG_LOCK BIT(17)
+
+#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0
+#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9
+#define GPCPLL_CFG3_VCO_CTRL_MASK \
+ (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
+#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8
+
+#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT 0
+#define GPCPLL_COEFF_M_WIDTH 8
+#define GPCPLL_COEFF_N_SHIFT 8
+#define GPCPLL_COEFF_N_WIDTH 8
+#define GPCPLL_COEFF_N_MASK \
+ (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
+#define GPCPLL_COEFF_P_SHIFT 16
+#define GPCPLL_COEFF_P_WIDTH 6
+
+#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
+
+#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+ (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
+
+#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH 6
+#define GPC2CLK_OUT_VCODIV_SHIFT 8
+#define GPC2CLK_OUT_VCODIV1 0
+#define GPC2CLK_OUT_VCODIV2 2
+#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+ GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH 6
+#define GPC2CLK_OUT_BYPDIV_SHIFT 0
+#define GPC2CLK_OUT_BYPDIV31 0x3c
+#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+ | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+ | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+ | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+ | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
/* All frequencies in Khz */
struct gk20a_clk_pllg_params {
};
#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
-int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
+int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
+void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
+void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
+
+static inline bool
+gk20a_pllg_is_enabled(struct gk20a_clk *clk)
+{
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
+
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ return val & GPCPLL_CFG_ENABLE;
+}
+
+static inline u32
+gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
+{
+ return DIV_ROUND_UP(pll->m * clk->params->min_vco,
+ clk->parent_rate / KHZ);
+}
+
+int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
void gk20a_clk_fini(struct nvkm_clk *);
int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
int gk20a_clk_prog(struct nvkm_clk *);
void gk20a_clk_tidy(struct nvkm_clk *);
+int gk20a_clk_setup_slide(struct gk20a_clk *);
+
#endif
*/
#include <subdev/clk.h>
+#include <subdev/volt.h>
+#include <subdev/timer.h>
#include <core/device.h>
+#include <core/tegra.h>
#include "priv.h"
#include "gk20a.h"
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w) ((1 << w) - 1)
+#define GPCPLL_CFG_SYNC_MODE BIT(2)
#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
+#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
+#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
+#define GPCPLL_CFG2_SDM_DIN_MASK \
+ (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
+#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
+#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
+#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
+ (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
+#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
+#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
+#define GPCPLL_DVFS0_DFS_COEFF_MASK \
+ (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
+#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
+#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
+#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
+ (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
+
+#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
+#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
+#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
+#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
+#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
+#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
+#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
+#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
+#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
+#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
+#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
+#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
+#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
+#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
+#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
+#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
+#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
+#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
+#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
+#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
+#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
+
+#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
+#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
+
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
+
+#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
+#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
+
+struct gm20b_clk_dvfs_params {
+ s32 coeff_slope;
+ s32 coeff_offs;
+ u32 vco_ctrl;
+};
+
+static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
+ .coeff_slope = -165230,
+ .coeff_offs = 214007,
+ .vco_ctrl = 0x7 << 3,
+};
+
+/*
+ * base.n is now the *integer* part of the N factor.
+ * sdm_din contains n's decimal part.
+ */
+struct gm20b_pll {
+ struct gk20a_pll base;
+ u32 sdm_din;
+};
+
+struct gm20b_clk_dvfs {
+ u32 dfs_coeff;
+ s32 dfs_det_max;
+ s32 dfs_ext_cal;
+};
+
+struct gm20b_clk {
+ /* currently applied parameters */
+ struct gk20a_clk base;
+ struct gm20b_clk_dvfs dvfs;
+ u32 uv;
+
+ /* new parameters to apply */
+ struct gk20a_pll new_pll;
+ struct gm20b_clk_dvfs new_dvfs;
+ u32 new_uv;
+
+ const struct gm20b_clk_dvfs_params *dvfs_params;
+
+ /* fused parameters */
+ s32 uvdet_slope;
+ s32 uvdet_offs;
+
+ /* safe frequency we can use at minimum voltage */
+ u32 safe_fmax_vmin;
+};
+#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
+
static u32 pl_to_div(u32 pl)
{
return pl;
.min_pl = 1, .max_pl = 31,
};
+static void
+gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 val;
+
+ gk20a_pllg_read_mnp(&clk->base, &pll->base);
+ val = nvkm_rd32(device, GPCPLL_CFG2);
+ pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
+ MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+}
+
+static void
+gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+ pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+ gk20a_pllg_write_mnp(&clk->base, &pll->base);
+}
+
+/*
+ * Determine DFS_COEFF for the requested voltage. Always select external
+ * calibration override equal to the voltage, and set maximum detection
+ * limit "0" (to make sure that PLL output remains under F/V curve when
+ * voltage increases).
+ */
+static void
+gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
+ struct gm20b_clk_dvfs *dvfs)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
+ u32 coeff;
+ /* Work with mv as uv would likely trigger an overflow */
+ s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
+
+ /* coeff = slope * voltage + offset */
+ coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
+ coeff = DIV_ROUND_CLOSEST(coeff, 1000);
+ dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
+
+ dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
+ clk->uvdet_slope);
+ /* should never happen */
+ if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
+ nvkm_error(subdev, "dfs_ext_cal overflow!\n");
+
+ dvfs->dfs_det_max = 0;
+
+ nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
+ __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
+ dvfs->dfs_det_max);
+}
+
+/*
+ * Solve equation for integer and fractional part of the effective NDIV:
+ *
+ * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
+ * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
+ *
+ * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
+ */
+static void
+gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ const struct gk20a_clk_pllg_params *p = clk->base.params;
+ u32 n;
+ s32 det_delta;
+ u32 rem, rem_range;
+
+ /* calculate current ext_cal and subtract previous one */
+ det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
+ clk->uvdet_slope);
+ det_delta -= clk->dvfs.dfs_ext_cal;
+ det_delta = min(det_delta, clk->dvfs.dfs_det_max);
+ det_delta *= clk->dvfs.dfs_coeff;
+
+ /* integer part of n */
+ n = (n_eff << DFS_DET_RANGE) - det_delta;
+ /* should never happen! */
+ if (n <= 0) {
+ nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
+ n = 1 << DFS_DET_RANGE;
+ }
+ if (n >> DFS_DET_RANGE > p->max_n) {
+ nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
+ n = p->max_n << DFS_DET_RANGE;
+ }
+ *n_int = n >> DFS_DET_RANGE;
+
+ /* fractional part of n */
+ rem = ((u32)n) & MASK(DFS_DET_RANGE);
+ rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
+ /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
+ rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
+ /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
+ *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+
+ nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
+ n_eff, *n_int, *sdm_din);
+}
+
+static int
+gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct gm20b_pll pll;
+ u32 n_int, sdm_din;
+ int ret = 0;
+
+ /* calculate the new n_int/sdm_din for this n/uv */
+ gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
+
+ /* get old coefficients */
+ gm20b_pllg_read_mnp(clk, &pll);
+ /* do nothing if NDIV is the same */
+ if (n_int == pll.base.n && sdm_din == pll.sdm_din)
+ return 0;
+
+ /* pll slowdown mode */
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+ /* new ndiv ready for ramp */
+ /* in DVFS mode SDM is updated via "new" field */
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
+ sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
+ pll.base.n = n_int;
+ udelay(1);
+ gk20a_pllg_write_mnp(&clk->base, &pll.base);
+
+ /* dynamic ramp to new ndiv */
+ udelay(1);
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
+
+ /* wait for ramping to complete */
+ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+ ret = -ETIMEDOUT;
+
+ /* in DVFS mode complete SDM update */
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+ sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+
+ /* exit slowdown mode */
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+ nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
+
+ return ret;
+}
+
+static int
+gm20b_pllg_enable(struct gm20b_clk *clk)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nvkm_rd32(device, GPCPLL_CFG);
+
+ /* In DVFS mode lock cannot be used - so just delay */
+ udelay(40);
+
+ /* set SYNC_MODE for glitchless switch out of bypass */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
+ GPCPLL_CFG_SYNC_MODE);
+ nvkm_rd32(device, GPCPLL_CFG);
+
+ /* switch to VCO mode */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ return 0;
+}
+
+static void
+gm20b_pllg_disable(struct gm20b_clk *clk)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+ /* clear SYNC_MODE before disabling PLL */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
+
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+}
+
+static int
+gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct gm20b_pll cur_pll;
+ u32 n_int, sdm_din;
+ /* if we only change pdiv, we can do a glitchless transition */
+ bool pdiv_only;
+ int ret;
+
+ gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
+ gm20b_pllg_read_mnp(clk, &cur_pll);
+ pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
+ cur_pll.base.m == pll->m;
+
+ /* need full sequence if clock not enabled yet */
+ if (!gk20a_pllg_is_enabled(&clk->base))
+ pdiv_only = false;
+
+ /* split VCO-to-bypass jump in half by setting out divider 1:2 */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
+ udelay(2);
+
+ if (pdiv_only) {
+ u32 old = cur_pll.base.pl;
+ u32 new = pll->pl;
+
+ /*
+ * we can do a glitchless transition only if the old and new PL
+ * parameters share at least one bit set to 1. If this is not
+ * the case, calculate and program an interim PL that will allow
+ * us to respect that rule.
+ */
+ if ((old & new) == 0) {
+ cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
+ new | BIT(ffs(old) - 1));
+ gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+ }
+
+ cur_pll.base.pl = new;
+ gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+ } else {
+ /* disable before programming if more than pdiv changes */
+ gm20b_pllg_disable(clk);
+
+ cur_pll.base = *pll;
+ cur_pll.base.n = n_int;
+ cur_pll.sdm_din = sdm_din;
+ gm20b_pllg_write_mnp(clk, &cur_pll);
+
+ ret = gm20b_pllg_enable(clk);
+ if (ret)
+ return ret;
+ }
+
+ /* restore out divider 1:1 */
+ udelay(2);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
+
+ return 0;
+}
+
+static int
+gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+ struct gk20a_pll cur_pll;
+ int ret;
+
+ if (gk20a_pllg_is_enabled(&clk->base)) {
+ gk20a_pllg_read_mnp(&clk->base, &cur_pll);
+
+ /* just do NDIV slide if there is no change to M and PL */
+ if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+ return gm20b_pllg_slide(clk, pll->n);
+
+ /* slide down to current NDIV_LO */
+ cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+ ret = gm20b_pllg_slide(clk, cur_pll.n);
+ if (ret)
+ return ret;
+ }
+
+ /* program MNP with the new clock parameters and new NDIV_LO */
+ cur_pll = *pll;
+ cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+ ret = gm20b_pllg_program_mnp(clk, &cur_pll);
+ if (ret)
+ return ret;
+
+ /* slide up to new NDIV */
+ return gm20b_pllg_slide(clk, pll->n);
+}
+
+static int
+gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+ struct gm20b_clk *clk = gm20b_clk(base);
+ struct nvkm_subdev *subdev = &base->subdev;
+ struct nvkm_volt *volt = base->subdev.device->volt;
+ int ret;
+
+ ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
+ GK20A_CLK_GPC_MDIV, &clk->new_pll);
+ if (ret)
+ return ret;
+
+ clk->new_uv = volt->vid[cstate->voltage].uv;
+ gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
+
+ nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
+
+ return 0;
+}
+
+/*
+ * Compute PLL parameters that are always safe for the current voltage
+ */
+static void
+gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
+{
+ u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
+ u32 parent_rate = clk->base.parent_rate / KHZ;
+ u32 nmin, nsafe;
+
+ /* remove a safe margin of 10% */
+ if (rate > clk->safe_fmax_vmin)
+ rate = rate * (100 - 10) / 100;
+
+ /* gpc2clk */
+ rate *= 2;
+
+ nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
+ nsafe = pll->m * rate / (clk->base.parent_rate);
+
+ if (nsafe < nmin) {
+ pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
+ nsafe = nmin;
+ }
+
+ pll->n = nsafe;
+}
+
+static void
+gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* strobe to read external DFS coefficient */
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+ nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
+ coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
+
+ udelay(1);
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+}
+
+static void
+gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+ u32 val;
+
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
+ dfs_det_cal);
+ udelay(1);
+
+ val = nvkm_rd32(device, GPCPLL_DVFS1);
+ if (!(val & BIT(25))) {
+ /* Use external value to overwrite calibration value */
+ val |= BIT(25) | BIT(16);
+ nvkm_wr32(device, GPCPLL_DVFS1, val);
+ }
+}
+
+static void
+gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
+ struct gm20b_clk_dvfs *dvfs)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* strobe to read external DFS coefficient */
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+ nvkm_mask(device, GPCPLL_DVFS0,
+ GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
+ dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
+ dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
+
+ udelay(1);
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+
+ gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
+}
+
+static int
+gm20b_clk_prog(struct nvkm_clk *base)
+{
+ struct gm20b_clk *clk = gm20b_clk(base);
+ u32 cur_freq;
+ int ret;
+
+ /* No change in DVFS settings? */
+ if (clk->uv == clk->new_uv)
+ goto prog;
+
+ /*
+ * Interim step for changing DVFS detection settings: low enough
+ * frequency to be safe at at DVFS coeff = 0.
+ *
+ * 1. If voltage is increasing:
+ * - safe frequency target matches the lowest - old - frequency
+ * - DVFS settings are still old
+ * - Voltage already increased to new level by volt, but maximum
+ * detection limit assures PLL output remains under F/V curve
+ *
+ * 2. If voltage is decreasing:
+ * - safe frequency target matches the lowest - new - frequency
+ * - DVFS settings are still old
+ * - Voltage is also old, it will be lowered by volt afterwards
+ *
+ * Interim step can be skipped if old frequency is below safe minimum,
+ * i.e., it is low enough to be safe at any voltage in operating range
+ * with zero DVFS coefficient.
+ */
+ cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
+ if (cur_freq > clk->safe_fmax_vmin) {
+ struct gk20a_pll pll_safe;
+
+ if (clk->uv < clk->new_uv)
+ /* voltage will raise: safe frequency is current one */
+ pll_safe = clk->base.pll;
+ else
+ /* voltage will drop: safe frequency is new one */
+ pll_safe = clk->new_pll;
+
+ gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
+ ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * DVFS detection settings transition:
+ * - Set DVFS coefficient zero
+ * - Set calibration level to new voltage
+ * - Set DVFS coefficient to match new voltage
+ */
+ gm20b_dvfs_program_coeff(clk, 0);
+ gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
+ gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
+ gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+prog:
+ clk->uv = clk->new_uv;
+ clk->dvfs = clk->new_dvfs;
+ clk->base.pll = clk->new_pll;
+
+ return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
+}
+
static struct nvkm_pstate
gm20b_pstates[] = {
{
.voltage = 12,
},
},
-
};
+static void
+gm20b_clk_fini(struct nvkm_clk *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gm20b_clk *clk = gm20b_clk(base);
+
+ /* slide to VCO min */
+ if (gk20a_pllg_is_enabled(&clk->base)) {
+ struct gk20a_pll pll;
+ u32 n_lo;
+
+ gk20a_pllg_read_mnp(&clk->base, &pll);
+ n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
+ gm20b_pllg_slide(clk, n_lo);
+ }
+
+ gm20b_pllg_disable(clk);
+
+ /* set IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
+}
+
+static int
+gm20b_clk_init_dvfs(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ bool fused = clk->uvdet_offs && clk->uvdet_slope;
+ static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
+ u32 data;
+ int ret;
+
+ /* Enable NA DVFS */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
+ GPCPLL_DVFS1_EN_DFS_BIT);
+
+ /* Set VCO_CTRL */
+ if (clk->dvfs_params->vco_ctrl)
+ nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
+ clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
+
+ if (fused) {
+ /* Start internal calibration, but ignore results */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+ GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+ /* got uvdev parameters from fuse, skip calibration */
+ goto calibrated;
+ }
+
+ /*
+ * If calibration parameters are not fused, start internal calibration,
+ * wait for completion, and use results along with default slope to
+ * calculate ADC offset during boot.
+ */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+ GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+ /* Wait for internal calibration done (spec < 2us). */
+ ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
+ GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
+ GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
+ if (ret < 0) {
+ nvkm_error(subdev, "GPCPLL calibration timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ data = nvkm_rd32(device, GPCPLL_CFG3) >>
+ GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
+ data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
+
+ clk->uvdet_slope = ADC_SLOPE_UV;
+ clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
+
+ nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
+ clk->uvdet_offs, clk->uvdet_slope);
+
+calibrated:
+ /* Compute and apply initial DVFS parameters */
+ gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
+ gm20b_dvfs_program_coeff(clk, 0);
+ gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
+ gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
+ gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+ return 0;
+}
+
+/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
+static const struct nvkm_clk_func gm20b_clk;
+
static int
gm20b_clk_init(struct nvkm_clk *base)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
int ret;
+ u32 data;
+
+ /* get out from IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+ udelay(5);
+
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+ GPC2CLK_OUT_INIT_VAL);
/* Set the global bypass control to VCO */
nvkm_mask(device, BYPASSCTRL_SYS,
MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
0);
+ ret = gk20a_clk_setup_slide(clk);
+ if (ret)
+ return ret;
+
+ /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
+ data = nvkm_rd32(device, 0x021944);
+ if (!(data & 0x3)) {
+ data |= 0x2;
+ nvkm_wr32(device, 0x021944, data);
+
+ data = nvkm_rd32(device, 0x021948);
+ data |= 0x1;
+ nvkm_wr32(device, 0x021948, data);
+ }
+
+ /* Disable idle slow down */
+ nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
+
+ /* speedo >= 1? */
+ if (clk->base.func == &gm20b_clk) {
+ struct gm20b_clk *_clk = gm20b_clk(base);
+ struct nvkm_volt *volt = device->volt;
+
+ /* Get current voltage */
+ _clk->uv = nvkm_volt_get(volt);
+
+ /* Initialize DVFS */
+ ret = gm20b_clk_init_dvfs(_clk);
+ if (ret)
+ return ret;
+ }
+
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
- ret = base->func->prog(&clk->base);
+ ret = base->func->prog(base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
.prog = gk20a_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
+ /* Speedo 0 only supports 12 voltages */
.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
.domains = {
{ nv_clk_src_crystal, 0xff },
},
};
-int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+static const struct nvkm_clk_func
+gm20b_clk = {
+ .init = gm20b_clk_init,
+ .fini = gm20b_clk_fini,
+ .read = gk20a_clk_read,
+ .calc = gm20b_clk_calc,
+ .prog = gm20b_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gm20b_pstates,
+ .nr_pstates = ARRAY_SIZE(gm20b_pstates),
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max },
+ },
+};
+
+static int
+gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+ struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
return -ENOMEM;
*pclk = &clk->base;
- ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
- &gm20b_pllg_params, clk);
+ ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+ &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
return ret;
}
+
+/* FUSE register */
+#define FUSE_RESERVED_CALIB0 0x204
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
+#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
+#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
+
+static int
+gm20b_clk_init_fused_params(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ u32 val = 0;
+ u32 rev = 0;
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA)
+ tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
+ rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
+#endif
+
+ /* No fused parameters, we will calibrate later */
+ if (rev == 0)
+ return -EINVAL;
+
+ /* Integer part in mV + fractional part in uV */
+ clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
+ ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
+
+ /* Integer part in mV + fractional part in 100uV */
+ clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
+ ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
+
+ nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
+ clk->uvdet_slope, clk->uvdet_offs);
+ return 0;
+}
+
+static int
+gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_volt *volt = subdev->device->volt;
+ struct nvkm_pstate *pstates = clk->base.base.func->pstates;
+ int nr_pstates = clk->base.base.func->nr_pstates;
+ int vmin, id = 0;
+ u32 fmax = 0;
+ int i;
+
+ /* find lowest voltage we can use */
+ vmin = volt->vid[0].uv;
+ for (i = 1; i < volt->vid_nr; i++) {
+ if (volt->vid[i].uv <= vmin) {
+ vmin = volt->vid[i].uv;
+ id = volt->vid[i].vid;
+ }
+ }
+
+ /* find max frequency at this voltage */
+ for (i = 0; i < nr_pstates; i++)
+ if (pstates[i].base.voltage == id)
+ fmax = max(fmax,
+ pstates[i].base.domain[nv_clk_src_gpc]);
+
+ if (!fmax) {
+ nvkm_error(subdev, "failed to evaluate safe fmax\n");
+ return -EINVAL;
+ }
+
+ /* we are safe at 90% of the max frequency */
+ clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
+ nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
+
+ return 0;
+}
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gm20b_clk *clk;
+ struct nvkm_subdev *subdev;
+ struct gk20a_clk_pllg_params *clk_params;
+ int ret;
+
+ /* Speedo 0 GPUs cannot use noise-aware PLL */
+ if (tdev->gpu_speedo_id == 0)
+ return gm20b_clk_new_speedo0(device, index, pclk);
+
+ /* Speedo >= 1, use NAPLL */
+ clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base.base;
+ subdev = &clk->base.base.subdev;
+
+ /* duplicate the clock parameters since we will patch them below */
+ clk_params = (void *) (clk + 1);
+ *clk_params = gm20b_pllg_params;
+ ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
+ &clk->base);
+ if (ret)
+ return ret;
+
+ /*
+ * NAPLL can only work with max_u, clamp the m range so
+ * gk20a_pllg_calc_mnp always uses it
+ */
+ clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
+ (clk->base.parent_rate / KHZ));
+ if (clk_params->max_m == 0) {
+ nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
+ kfree(clk);
+ return gm20b_clk_new_speedo0(device, index, pclk);
+ }
+
+ clk->base.pl_to_div = pl_to_div;
+ clk->base.div_to_pl = div_to_pl;
+
+ clk->dvfs_params = &gm20b_dvfs_params;
+
+ ret = gm20b_clk_init_fused_params(clk);
+ /*
+ * we will calibrate during init - should never happen on
+ * prod parts
+ */
+ if (ret)
+ nvkm_warn(subdev, "no fused calibration parameters\n");
+
+ ret = gm20b_clk_init_safe_fmax(clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gp100.o
+nvkm-y += nvkm/subdev/fb/gp104.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
nvkm-y += nvkm/subdev/fb/ramgf100.o
nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/ramgp100.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
#include "ram.h"
#include <core/memory.h>
+#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
#include <engine/gr.h>
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_page)
+ fb->func->init_page(fb);
+ if (fb->func->init_unkn)
+ fb->func->init_unkn(fb);
return 0;
}
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
}
int
return 0;
}
+void
+gf100_fb_init_page(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ switch (fb->page) {
+ case 16:
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
+ break;
+ case 17:
+ default:
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
+ fb->page = 17;
+ break;
+ }
+}
+
void
gf100_fb_init(struct nvkm_fb *base)
{
if (fb->r100c10_page)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
-
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
}
void *
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
void *gf100_fb_dtor(struct nvkm_fb *);
void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
+
+void gp100_fb_init(struct nvkm_fb *);
#endif
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
gk20a_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
}
gk20a_fb = {
.oneinit = gf100_fb_oneinit,
.init = gk20a_fb_init,
+ .init_page = gf100_fb_init_page,
.memtype_valid = gf100_fb_memtype_valid,
};
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
#include <core/memory.h>
+void
+gm200_fb_init_page(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ switch (fb->page) {
+ case 16:
+ nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
+ break;
+ case 17:
+ nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
+ break;
+ default:
+ nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
+ fb->page = 0;
+ break;
+ }
+}
+
static void
gm200_fb_init(struct nvkm_fb *base)
{
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gp100_fb_init_unkn(struct nvkm_fb *base)
+{
+ struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
+ nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
+ nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
+ nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
+}
+
+void
+gp100_fb_init(struct nvkm_fb *base)
+{
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ if (fb->r100c10_page)
+ nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+ nvkm_mask(device, 0x100cc4, 0x00060000,
+ max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gp100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gm200_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp100_fb, device, index, pfb);
+}
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static const struct nvkm_fb_func
+gp104_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gm200_fb_init_page,
+ .ram_new = gp100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp104_fb, device, index, pfb);
+}
void *(*dtor)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_page)(struct nvkm_fb *);
+ void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
struct {
u32 pitch, u32 flags, struct nvkm_fb_tile *);
int gf100_fb_oneinit(struct nvkm_fb *);
+void gf100_fb_init_page(struct nvkm_fb *);
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+
+void gm200_fb_init_page(struct nvkm_fb *);
#endif
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
--- /dev/null
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static int
+gp100_ram_init(struct nvkm_ram *ram)
+{
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, hdr, cnt, len, snr, ssz;
+ u32 data;
+ int i;
+
+ /* run a bunch of tables from rammap table. there's actually
+ * individual pointers for each rammap entry too, but, nvidia
+ * seem to just run the last two entries' scripts early on in
+ * their init, and never again.. we'll just run 'em all once
+ * for now.
+ *
+ * i strongly suspect that each script is for a separate mode
+ * (likely selected by 0x9a065c's lower bits?), and the
+ * binary driver skips the one that's already been setup by
+ * the init tables.
+ */
+ data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ if (!data || hdr < 0x15)
+ return -EINVAL;
+
+ cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
+ data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+ if (cnt) {
+ u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
+ for (i = 0; i < cnt; i++, data += 4) {
+ if (i != save >> 4) {
+ nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
+ nvbios_exec(&(struct nvbios_init) {
+ .subdev = subdev,
+ .bios = bios,
+ .offset = nvbios_rd32(bios, data),
+ .execute = 1,
+ });
+ }
+ }
+ nvkm_mask(device, 0x9a065c, 0x000000f0, save);
+ }
+
+ nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
+ nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+ nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
+ return 0;
+}
+
+static const struct nvkm_ram_func
+gp100_ram_func = {
+ .init = gp100_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+};
+
+int
+gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_ram *ram;
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+ u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+ u32 fbio_opt = nvkm_rd32(device, 0x021c14);
+ u64 part, size = 0, comm = ~0ULL;
+ bool mixed = false;
+ int ret;
+
+ nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+ nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
+ for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
+ if (!(fbio_opt & (1 << fbpa))) {
+ part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
+ nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
+ part = part << 20;
+ if (part != comm) {
+ if (comm != ~0ULL)
+ mixed = true;
+ comm = min(comm, part);
+ }
+ size = size + part;
+ }
+ }
+
+ ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
+ *pram = ram;
+ if (ret)
+ return ret;
+
+ nvkm_mm_fini(&ram->vram);
+
+ if (mixed) {
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ((comm * fbpa_num) - rsvd_head) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+
+ ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
+ NVKM_RAM_MM_SHIFT,
+ (size - (comm * fbpa_num) - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
struct pwr_rail_t *r = &stbl.rail[i];
struct nvkm_iccsense_rail *rail;
struct nvkm_iccsense_sensor *sensor;
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
if (!r->mode || r->resistor_mohm == 0)
continue;
if (!sensor)
continue;
- rail = kmalloc(sizeof(*rail), GFP_KERNEL);
- if (!rail)
- return -ENOMEM;
-
switch (sensor->type) {
case NVBIOS_EXTDEV_INA209:
if (r->rail != 0)
continue;
- rail->read = nvkm_iccsense_ina209_read;
+ read = nvkm_iccsense_ina209_read;
break;
case NVBIOS_EXTDEV_INA219:
if (r->rail != 0)
continue;
- rail->read = nvkm_iccsense_ina219_read;
+ read = nvkm_iccsense_ina219_read;
break;
case NVBIOS_EXTDEV_INA3221:
if (r->rail >= 3)
continue;
- rail->read = nvkm_iccsense_ina3221_read;
+ read = nvkm_iccsense_ina3221_read;
break;
default:
continue;
}
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
sensor->rail_mask |= 1 << r->rail;
+ rail->read = read;
rail->sensor = sensor;
rail->idx = r->rail;
rail->mohm = r->resistor_mohm;
nvkm-y += nvkm/subdev/ltc/gk104.o
nvkm-y += nvkm/subdev/ltc/gm107.o
nvkm-y += nvkm/subdev/ltc/gm200.o
+nvkm-y += nvkm/subdev/ltc/gp100.o
*/
#include "priv.h"
-#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
nvkm_wr32(device, 0x17ea58, depth);
}
-static const struct nvkm_bitfield
+const struct nvkm_bitfield
gf100_ltc_lts_intr_name[] = {
{ 0x00000001, "IDLE_ERROR_IQ" },
{ 0x00000002, "IDLE_ERROR_CBC" },
nvkm_wr32(device, 0x17e34c, depth);
}
-static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+void
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = <c->subdev;
struct nvkm_device *device = subdev->device;
- u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
- u32 stat = nvkm_rd32(device, base + 0x00c);
+ u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
+ u32 intr = nvkm_rd32(device, base + 0x00c);
+ u16 stat = intr & 0x0000ffff;
+ char msg[128];
if (stat) {
- nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
- nvkm_wr32(device, base + 0x00c, stat);
+ nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+ nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
}
+
+ nvkm_wr32(device, base + 0x00c, intr);
}
void
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
- gm107_ltc_lts_isr(ltc, c, s);
+ gm107_ltc_intr_lts(ltc, c, s);
mask &= ~(1 << c);
}
}
gm200_ltc = {
.oneinit = gm200_ltc_oneinit,
.init = gm200_ltc_init,
- .intr = gm107_ltc_intr, /*XXX: not validated */
+ .intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc = 16,
--- /dev/null
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static void
+gp100_ltc_intr(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ u32 mask;
+
+ mask = nvkm_rd32(device, 0x0001c0);
+ while (mask) {
+ u32 s, c = __ffs(mask);
+ for (s = 0; s < ltc->lts_nr; s++)
+ gm107_ltc_intr_lts(ltc, c, s);
+ mask &= ~(1 << c);
+ }
+}
+
+static int
+gp100_ltc_oneinit(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
+ ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
+ /*XXX: tagram allocation - TBD */
+ return nvkm_mm_init(<c->tags, 0, 0, 1);
+}
+
+static void
+gp100_ltc_init(struct nvkm_ltc *ltc)
+{
+ /*XXX: PMU LS call to setup tagram address */
+}
+
+static const struct nvkm_ltc_func
+gp100_ltc = {
+ .oneinit = gp100_ltc_oneinit,
+ .init = gp100_ltc_init,
+ .intr = gp100_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+ .invalidate = gf100_ltc_invalidate,
+ .flush = gf100_ltc_flush,
+};
+
+int
+gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+}
#define __NVKM_LTC_PRIV_H__
#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
#include <subdev/ltc.h>
+#include <core/enum.h>
int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
int index, struct nvkm_ltc **);
void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
void gf100_ltc_invalidate(struct nvkm_ltc *);
void gf100_ltc_flush(struct nvkm_ltc *);
+extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[];
void gm107_ltc_intr(struct nvkm_ltc *);
+void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts);
void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
void gm107_ltc_cbc_wait(struct nvkm_ltc *);
void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
nvkm-y += nvkm/subdev/mc/gf100.o
nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
+nvkm-y += nvkm/subdev/mc/gp100.o
#include <subdev/top.h>
void
-nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
+nvkm_mc_unk260(struct nvkm_device *device, u32 data)
{
- if (mc->func->unk260)
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc) && mc->func->unk260)
mc->func->unk260(mc, data);
}
void
-nvkm_mc_intr_unarm(struct nvkm_mc *mc)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
{
- return mc->func->intr_unarm(mc);
+ struct nvkm_mc *mc = device->mc;
+ const struct nvkm_mc_map *map;
+ if (likely(mc) && mc->func->intr_mask) {
+ u32 mask = nvkm_top_intr_mask(device, devidx);
+ for (map = mc->func->intr; !mask && map->stat; map++) {
+ if (map->unit == devidx)
+ mask = map->stat;
+ }
+ mc->func->intr_mask(mc, mask, en ? mask : 0);
+ }
+}
+
+void
+nvkm_mc_intr_unarm(struct nvkm_device *device)
+{
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc))
+ mc->func->intr_unarm(mc);
}
void
-nvkm_mc_intr_rearm(struct nvkm_mc *mc)
+nvkm_mc_intr_rearm(struct nvkm_device *device)
{
- return mc->func->intr_rearm(mc);
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc))
+ mc->func->intr_rearm(mc);
}
static u32
-nvkm_mc_intr_mask(struct nvkm_mc *mc)
+nvkm_mc_intr_stat(struct nvkm_mc *mc)
{
- u32 intr = mc->func->intr_mask(mc);
+ u32 intr = mc->func->intr_stat(mc);
if (WARN_ON_ONCE(intr == 0xffffffff))
intr = 0; /* likely fallen off the bus */
return intr;
}
void
-nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+nvkm_mc_intr(struct nvkm_device *device, bool *handled)
{
- struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_mc *mc = device->mc;
struct nvkm_subdev *subdev;
- const struct nvkm_mc_map *map = mc->func->intr;
- u32 stat, intr = nvkm_mc_intr_mask(mc);
+ const struct nvkm_mc_map *map;
+ u32 stat, intr;
u64 subdevs;
- stat = nvkm_top_intr(device->top, intr, &subdevs);
+ if (unlikely(!mc))
+ return;
+
+ intr = nvkm_mc_intr_stat(mc);
+ stat = nvkm_top_intr(device, intr, &subdevs);
while (subdevs) {
enum nvkm_devidx subidx = __ffs64(subdevs);
subdev = nvkm_device_subdev(device, subidx);
subdevs &= ~BIT_ULL(subidx);
}
- while (map->stat) {
+ for (map = mc->func->intr; map->stat; map++) {
if (intr & map->stat) {
subdev = nvkm_device_subdev(device, map->unit);
if (subdev)
nvkm_subdev_intr(subdev);
stat &= ~map->stat;
}
- map++;
}
if (stat)
*handled = intr != 0;
}
-static void
-nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+static u32
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
+ enum nvkm_devidx devidx)
{
- struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
- u64 pmc_enable;
-
- if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
- for (map = mc->func->reset; map && map->stat; map++) {
- if (map->unit == devidx) {
- pmc_enable = map->stat;
- break;
+ u64 pmc_enable = 0;
+ if (likely(mc)) {
+ if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+ for (map = mc->func->reset; map && map->stat; map++) {
+ if (!isauto || !map->noauto) {
+ if (map->unit == devidx) {
+ pmc_enable = map->stat;
+ break;
+ }
+ }
}
}
}
+ return pmc_enable;
+}
+void
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
}
void
-nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
{
- if (likely(mc))
- nvkm_mc_reset_(mc, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ if (pmc_enable)
+ nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+}
+
+void
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ if (pmc_enable) {
+ nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+ nvkm_rd32(device, 0x000200);
+ }
}
static int
nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_mc *mc = nvkm_mc(subdev);
- nvkm_mc_intr_unarm(mc);
+ nvkm_mc_intr_unarm(subdev->device);
return 0;
}
struct nvkm_mc *mc = nvkm_mc(subdev);
if (mc->func->init)
mc->func->init(mc);
- nvkm_mc_intr_rearm(mc);
+ nvkm_mc_intr_rearm(subdev->device);
return 0;
}
.fini = nvkm_mc_fini,
};
+void
+nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc *mc)
+{
+ nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+ mc->func = func;
+}
+
int
nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
int index, struct nvkm_mc **pmc)
{
struct nvkm_mc *mc;
-
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
-
- nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
- mc->func = func;
+ nvkm_mc_ctor(func, device, index, *pmc);
return 0;
}
.intr = g84_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = g84_mc_reset,
};
.intr = g98_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = g98_mc_reset,
};
}
u32
-gf100_mc_intr_mask(struct nvkm_mc *mc)
+gf100_mc_intr_stat(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
u32 intr0 = nvkm_rd32(device, 0x000100);
return intr0 | intr1;
}
+void
+gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_mask(device, 0x000640, mask, stat);
+ nvkm_mask(device, 0x000644, mask, stat);
+}
+
void
gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
{
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gf100_mc_reset,
.unk260 = gf100_mc_unk260,
};
const struct nvkm_mc_map
gk104_mc_reset[] = {
{ 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00002000, NVKM_SUBDEV_PMU, true },
{}
};
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
.unk260 = gf100_mc_unk260,
};
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
};
--- /dev/null
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gp100_mc(p) container_of((p), struct gp100_mc, base)
+#include "priv.h"
+
+struct gp100_mc {
+ struct nvkm_mc base;
+ spinlock_t lock;
+ bool intr;
+ u32 mask;
+};
+
+static void
+gp100_mc_intr_update(struct gp100_mc *mc)
+{
+ struct nvkm_device *device = mc->base.subdev.device;
+ u32 mask = mc->intr ? mc->mask : 0, i;
+ for (i = 0; i < 2; i++) {
+ nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+ nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
+ }
+}
+
+static void
+gp100_mc_intr_unarm(struct nvkm_mc *base)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = false;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_rearm(struct nvkm_mc *base)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = true;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->mask = (mc->mask & ~mask) | intr;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static const struct nvkm_mc_func
+gp100_mc = {
+ .init = nv50_mc_init,
+ .intr = gk104_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ struct gp100_mc *mc;
+
+ if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+ *pmc = &mc->base;
+
+ spin_lock_init(&mc->lock);
+ mc->intr = false;
+ mc->mask = 0x7fffffff;
+ return 0;
+}
{},
};
+static void
+gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+ nvkm_mask(mc->subdev.device, 0x000640, mask, stat);
+}
+
static const struct nvkm_mc_func
gt215_mc = {
.init = nv50_mc_init,
.intr = gt215_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_mask = gt215_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = gt215_mc_reset,
};
}
u32
-nv04_mc_intr_mask(struct nvkm_mc *mc)
+nv04_mc_intr_stat(struct nvkm_mc *mc)
{
return nvkm_rd32(mc->subdev.device, 0x000100);
}
.intr = nv04_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv04_mc_reset,
};
.intr = nv11_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv04_mc_reset,
};
.intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
.intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
.intr = nv50_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
#include <subdev/mc.h>
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
+ int index, struct nvkm_mc *);
int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
int index, struct nvkm_mc **);
struct nvkm_mc_map {
u32 stat;
u32 unit;
+ bool noauto;
};
struct nvkm_mc_func {
void (*intr_unarm)(struct nvkm_mc *);
/* enable reporting of interrupts to host */
void (*intr_rearm)(struct nvkm_mc *);
+ /* (un)mask delivery of specific interrupts */
+ void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
- u32 (*intr_mask)(struct nvkm_mc *);
+ u32 (*intr_stat)(struct nvkm_mc *);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
void nv04_mc_init(struct nvkm_mc *);
void nv04_mc_intr_unarm(struct nvkm_mc *);
void nv04_mc_intr_rearm(struct nvkm_mc *);
-u32 nv04_mc_intr_mask(struct nvkm_mc *);
+u32 nv04_mc_intr_stat(struct nvkm_mc *);
extern const struct nvkm_mc_map nv04_mc_reset[];
extern const struct nvkm_mc_map nv17_mc_intr[];
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
-u32 gf100_mc_intr_mask(struct nvkm_mc *);
+void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+u32 gf100_mc_intr_stat(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
extern const struct nvkm_mc_map gk104_mc_intr[];
nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
+nvkm-y += nvkm/subdev/pci/gp100.o
nvkm_pci_intr(int irq, void *arg)
{
struct nvkm_pci *pci = arg;
- struct nvkm_mc *mc = pci->subdev.device->mc;
+ struct nvkm_device *device = pci->subdev.device;
bool handled = false;
- if (likely(mc)) {
- nvkm_mc_intr_unarm(mc);
- if (pci->msi)
- pci->func->msi_rearm(pci);
- nvkm_mc_intr(mc, &handled);
- nvkm_mc_intr_rearm(mc);
- }
+ nvkm_mc_intr_unarm(device);
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
+ nvkm_mc_intr(device, &handled);
+ nvkm_mc_intr_rearm(device);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
--- /dev/null
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ nvkm_pci_wr32(pci, 0x0704, 0x00000000);
+}
+
+static const struct nvkm_pci_func
+gp100_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = gp100_pci_msi_rearm,
+};
+
+int
+gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+}
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+
+#include <subdev/mc.h>
#include <subdev/timer.h>
static const char *
int ret;
/* enable engine */
- nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
- nvkm_rd32(device, 0x200);
+ nvkm_mc_enable(device, sb->devidx);
ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
if (ret < 0) {
- nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+ nvkm_mc_disable(device, sb->devidx);
return ret;
}
/* enable IRQs */
nvkm_wr32(device, sb->base + 0x010, 0xff);
- nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
- nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+ nvkm_mc_intr_mask(device, sb->devidx, true);
return 0;
}
struct nvkm_device *device = sb->subdev.device;
/* disable IRQs and wait for any previous code to complete */
- nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
- nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+ nvkm_mc_intr_mask(device, sb->devidx, false);
nvkm_wr32(device, sb->base + 0x014, 0xff);
falcon_wait_idle(device, sb->base);
/* disable engine */
- nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+ nvkm_mc_disable(device, sb->devidx);
return 0;
}
return ret;
}
- /*
- * Build all blobs - the same blobs can be used to perform secure boot
- * multiple times
- */
- if (sb->func->prepare_blobs)
- ret = sb->func->prepare_blobs(sb);
-
- return ret;
+ return 0;
}
static int
/* setup the performing falcon's base address and masks */
switch (func->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU:
+ sb->devidx = NVKM_SUBDEV_PMU;
sb->base = 0x10a000;
- sb->irq_mask = 0x1000000;
- sb->enable_mask = 0x2000;
break;
default:
nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
/* Write LS blob */
ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+ if (ret)
+ nvkm_gpuobj_del(&gsb->ls_blob);
cleanup:
ls_ucode_mgr_cleanup(&mgr);
int ret;
/* Load and prepare the managed falcon's firmwares */
- ret = gm200_secboot_prepare_ls_blob(gsb);
- if (ret)
- return ret;
+ if (!gsb->ls_blob) {
+ ret = gm200_secboot_prepare_ls_blob(gsb);
+ if (ret)
+ return ret;
+ }
/* Load the HS firmware that will load the LS firmwares */
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
- &gsb->acr_load_blob,
- &gsb->acr_load_bl_desc, true);
- if (ret)
- return ret;
+ if (!gsb->acr_load_blob) {
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+ &gsb->acr_load_blob,
+ &gsb->acr_load_bl_desc, true);
+ if (ret)
+ return ret;
+ }
/* Load the HS firmware bootloader */
- ret = gm200_secboot_prepare_hsbl_blob(gsb);
- if (ret)
- return ret;
+ if (!gsb->hsbl_blob) {
+ ret = gm200_secboot_prepare_hsbl_blob(gsb);
+ if (ret)
+ return ret;
+ }
return 0;
}
static int
-gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
{
- struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20x_secboot_prepare_blobs(gsb);
return ret;
/* dGPU only: load the HS firmware that unprotects the WPR region */
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
- &gsb->acr_unload_blob,
- &gsb->acr_unload_bl_desc, false);
- if (ret)
- return ret;
+ if (!gsb->acr_unload_blob) {
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+ &gsb->acr_unload_blob,
+ &gsb->acr_unload_bl_desc, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
+static int
+gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ int ret;
+
+ /* firmware already loaded, nothing to do... */
+ if (gsb->firmware_ok)
+ return 0;
+
+ ret = gsb->func->prepare_blobs(gsb);
+ if (ret) {
+ nvkm_error(subdev, "failed to load secure firmware\n");
+ return ret;
+ }
+
+ gsb->firmware_ok = true;
+
+ return 0;
+}
/*
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
+ /* Make sure all blobs are ready */
+ ret = gm200_secboot_blobs_ready(gsb);
+ if (ret)
+ return ret;
+
/*
* Dummy GM200 implementation: perform secure boot each time we are
* called on FECS. Since only FECS and GPCCS are managed and started
.dtor = gm200_secboot_dtor,
.init = gm200_secboot_init,
.fini = gm200_secboot_fini,
- .prepare_blobs = gm200_secboot_prepare_blobs,
.reset = gm200_secboot_reset,
.start = gm200_secboot_start,
.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
.bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
.fixup_bl_desc = gm200_secboot_fixup_bl_desc,
.fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+ .prepare_blobs = gm200_secboot_prepare_blobs,
};
int
MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
u32 data_size;
};
+static int
+gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ int acr_size;
+ int ret;
+
+ ret = gm20x_secboot_prepare_blobs(gsb);
+ if (ret)
+ return ret;
+
+ acr_size = gsb->acr_load_blob->size;
+ /*
+ * On Tegra the WPR region is set by the bootloader. It is illegal for
+ * the HS blob to be larger than this region.
+ */
+ if (acr_size > gsb->wpr_size) {
+ nvkm_error(subdev, "WPR region too small for FW blob!\n");
+ nvkm_error(subdev, "required: %dB\n", acr_size);
+ nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/**
* gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
*
.bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
.fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
.fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+ .prepare_blobs = gm20b_secboot_prepare_blobs,
};
}
#endif
-static int
-gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int acr_size;
- int ret;
-
- ret = gm20x_secboot_prepare_blobs(gsb);
- if (ret)
- return ret;
-
- acr_size = gsb->acr_load_blob->size;
- /*
- * On Tegra the WPR region is set by the bootloader. It is illegal for
- * the HS blob to be larger than this region.
- */
- if (acr_size > gsb->wpr_size) {
- nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
- nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
- nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
- return -ENOSPC;
- }
-
- return 0;
-}
-
static int
gm20b_secboot_init(struct nvkm_secboot *sb)
{
gm20b_secboot = {
.dtor = gm200_secboot_dtor,
.init = gm20b_secboot_init,
- .prepare_blobs = gm20b_secboot_prepare_blobs,
.reset = gm200_secboot_reset,
.start = gm200_secboot_start,
.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
int (*init)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *);
- int (*prepare_blobs)(struct nvkm_secboot *);
int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
* @inst: instance block for HS falcon
* @pgd: page directory for the HS falcon
* @vm: address space used by the HS falcon
- * @bl_desc_size: size of the BL descriptor used by this chip.
- * @fixup_bl_desc: hook that generates the proper BL descriptor format from
- * the generic GM200 format into a data array of size
- * bl_desc_size
+ * @falcon_state: current state of the managed falcons
+ * @firmware_ok: whether the firmware blobs have been created
*/
struct gm200_secboot {
struct nvkm_secboot base;
RUNNING,
} falcon_state[NVKM_SECBOOT_FALCON_END];
+ bool firmware_ok;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
+/**
+ * Contains functions we wish to abstract between GM200-like implementations
+ * @bl_desc_size: size of the BL descriptor used by this chip.
+ * @fixup_bl_desc: hook that generates the proper BL descriptor format from
+ * the generic GM200 format into a data array of size
+ * bl_desc_size
+ * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used
+ * @prepare_blobs: prepares the various blobs needed for secure booting
+ */
struct gm200_secboot_func {
/*
* Size of the bootloader descriptor for this chip. A block of this
* we want the HS FW to set up.
*/
void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+ int (*prepare_blobs)(struct gm200_secboot *);
};
int gm200_secboot_init(struct nvkm_secboot *);
}
u32
-nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
}
u32
-nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
{
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *info;
+
+ if (top) {
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index == devidx && info->intr >= 0)
+ return BIT(info->intr);
+ }
+ }
+
+ return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
+{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
u64 subdevs = 0;
u32 handled = 0;
}
enum nvkm_devidx
-nvkm_top_fault(struct nvkm_top *top, int fault)
+nvkm_top_fault(struct nvkm_device *device, int fault)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
}
enum nvkm_devidx
-nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
int n = 0;
struct nvkm_subdev *subdev = &top->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_top_device *info = NULL;
- u32 data, type;
+ u32 data, type, inst;
int i;
for (i = 0; i < 64; i++) {
if (!(info = nvkm_top_device_new(top)))
return -ENOMEM;
type = ~0;
+ inst = 0;
}
data = nvkm_rd32(device, 0x022700 + (i * 0x04));
case 0x00000000: /* NOT_VALID */
continue;
case 0x00000001: /* DATA */
+ inst = (data & 0x3c000000) >> 26;
info->addr = (data & 0x00fff000);
info->fault = (data & 0x000000f8) >> 3;
break;
continue;
/* Translate engine type to NVKM engine identifier. */
+#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
+#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
+ info->index = NVKM_ENGINE_##A##0 + inst
switch (type) {
- case 0x00000000: info->index = NVKM_ENGINE_GR; break;
- case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
- case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
- case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
- case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
- case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
- case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
- case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
- case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
- case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
- case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
- case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
- case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+ case 0x00000000: A_(GR ); break;
+ case 0x00000001: A_(CE0 ); break;
+ case 0x00000002: A_(CE1 ); break;
+ case 0x00000003: A_(CE2 ); break;
+ case 0x00000008: A_(MSPDEC); break;
+ case 0x00000009: A_(MSPPP ); break;
+ case 0x0000000a: A_(MSVLD ); break;
+ case 0x0000000b: A_(MSENC ); break;
+ case 0x0000000c: A_(VIC ); break;
+ case 0x0000000d: A_(SEC ); break;
+ case 0x0000000e: B_(NVENC ); break;
+ case 0x0000000f: A_(NVENC1); break;
+ case 0x00000010: A_(NVDEC ); break;
+ case 0x00000013: B_(CE ); break;
break;
default:
break;
}
- nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
- "runlist %2d intr %2d reset %2d\n", type,
+ nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+ "engine %2d runlist %2d intr %2d "
+ "reset %2d\n", type, inst,
info->index == NVKM_SUBDEV_NR ? NULL :
nvkm_subdev_name[info->index],
info->addr, info->fault, info->engine, info->runlist,
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
if (data && info.vidmask && info.base && info.step) {
+ volt->min_uv = info.min;
+ volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
if (info.base >= info.min &&
info.base <= info.max) {
}
volt->vid_mask = info.vidmask;
} else if (data && info.vidmask) {
+ volt->min_uv = 0xffffffff;
+ volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
&ivid);
volt->vid[volt->vid_nr].uv = ivid.voltage;
volt->vid[volt->vid_nr].vid = ivid.vid;
volt->vid_nr++;
+ volt->min_uv = min(volt->min_uv, ivid.voltage);
+ volt->max_uv = max(volt->max_uv, ivid.voltage);
}
}
volt->vid_mask = info.vidmask;
+ } else if (data && info.type == NVBIOS_VOLT_PWM) {
+ volt->min_uv = info.base;
+ volt->max_uv = info.base + info.pwm_range;
}
}
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
- if (bios)
+ if (bios) {
nvkm_volt_parse_bios(bios, volt);
+ nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
+ volt->min_uv, volt->max_uv);
+ }
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
return mv;
}
-int
+static int
gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
{
+ static const int v_scale = 1000;
int mv;
mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
- mv = DIV_ROUND_UP(mv, 1000);
+ mv = DIV_ROUND_UP(mv, v_scale);
return mv * 1000;
}
-int
+static int
gk20a_volt_vid_get(struct nvkm_volt *base)
{
struct gk20a_volt *volt = gk20a_volt(base);
return -EINVAL;
}
-int
+static int
gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
{
struct gk20a_volt *volt = gk20a_volt(base);
return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
}
-int
+static int
gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
{
struct gk20a_volt *volt = gk20a_volt(base);
};
int
-_gk20a_volt_ctor(struct nvkm_device *device, int index,
- const struct cvb_coef *coefs, int nb_coefs,
- struct gk20a_volt *volt)
+gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ int vmin, struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int i, uv;
volt->base.vid_nr = nb_coefs;
for (i = 0; i < volt->base.vid_nr; i++) {
volt->base.vid[i].vid = i;
- volt->base.vid[i].uv =
- gk20a_volt_calc_voltage(&coefs[i],
- tdev->gpu_speedo);
+ volt->base.vid[i].uv = max(
+ gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
+ vmin);
nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
volt->base.vid[i].vid, volt->base.vid[i].uv);
}
return -ENOMEM;
*pvolt = &volt->base;
- return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
- ARRAY_SIZE(gk20a_cvb_coef), volt);
+ return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+ ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
}
struct regulator *vdd;
};
-int _gk20a_volt_ctor(struct nvkm_device *device, int index,
- const struct cvb_coef *coefs, int nb_coefs,
- struct gk20a_volt *volt);
-
-int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
-int gk20a_volt_vid_get(struct nvkm_volt *volt);
-int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
-int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+int gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ int vmin, struct gk20a_volt *volt);
#endif
/* 921600 */ { 2647676, -106455, 1632 },
};
+static const struct cvb_coef gm20b_na_cvb_coef[] = {
+ /* KHz, c0, c1, c2, c3, c4, c5 */
+ /* 76800 */ { 814294, 8144, -940, 808, -21583, 226 },
+ /* 153600 */ { 856185, 8144, -940, 808, -21583, 226 },
+ /* 230400 */ { 898077, 8144, -940, 808, -21583, 226 },
+ /* 307200 */ { 939968, 8144, -940, 808, -21583, 226 },
+ /* 384000 */ { 981860, 8144, -940, 808, -21583, 226 },
+ /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
+ /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
+ /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
+ /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
+ /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
+ /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
+ /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
+ /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
+};
+
+const u32 speedo_to_vmin[] = {
+ /* 0, 1, 2, 3, 4, */
+ 950000, 840000, 818750, 840000, 810000,
+};
+
int
gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
+ u32 vmin;
+
+ if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
+ nvdev_error(device, "unsupported speedo %d\n",
+ tdev->gpu_speedo_id);
+ return -EINVAL;
+ }
volt = kzalloc(sizeof(*volt), GFP_KERNEL);
if (!volt)
return -ENOMEM;
*pvolt = &volt->base;
- return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
- ARRAY_SIZE(gm20b_cvb_coef), volt);
+ vmin = speedo_to_vmin[tdev->gpu_speedo_id];
+
+ if (tdev->gpu_speedo_id >= 1)
+ return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
+ ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+ else
+ return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+ ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
}
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
default n
help
DRM display driver for OMAP2/3/4 based boards.
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
+ of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
- int r;
if (dsi->vdds_dsi_reg != NULL)
return 0;
static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
{
- struct device_node *np;
+ struct device_node *np, *np_parent;
np = of_parse_phandle(node, "remote-endpoint", 0);
if (!np)
return NULL;
- np = of_get_next_parent(np);
+ np_parent = of_get_next_parent(np);
+ of_node_put(np);
- return np;
+ return np_parent;
}
struct device_node *
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
if (p->backlight) {
p->backlight->props.power = FB_BLANK_POWERDOWN;
+ p->backlight->props.state |= BL_CORE_FBBLANK;
backlight_update_status(p->backlight);
}
msleep(p->desc->delay.enable);
if (p->backlight) {
+ p->backlight->props.state &= ~BL_CORE_FBBLANK;
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
}
.num_modes = 1,
.bpc = 6,
.size = {
- .width = 1024,
- .height = 600,
+ .width = 154,
+ .height = 90,
},
};
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+ .clock = 200000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 12,
+ .hsync_end = 1536 + 12 + 16,
+ .htotal = 1536 + 12 + 16 + 48,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 8,
+ .vsync_end = 2048 + 8 + 4,
+ .vtotal = 2048 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+ .modes = &lg_lp079qx1_sp0v_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 129,
+ .height = 171,
+ },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+ .clock = 205210,
+ .hdisplay = 2048,
+ .hsync_start = 2048 + 150,
+ .hsync_end = 2048 + 150 + 5,
+ .htotal = 2048 + 150 + 5 + 5,
+ .vdisplay = 1536,
+ .vsync_start = 1536 + 3,
+ .vsync_end = 1536 + 3 + 1,
+ .vtotal = 1536 + 3 + 1 + 9,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+ .modes = &lg_lp097qx1_spa1_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 208,
+ .height = 147,
+ },
+};
+
static const struct drm_display_mode lg_lp120up1_mode = {
.clock = 162300,
.hdisplay = 1920,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+ .clock = 271560,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 2,
+ .vsync_end = 1600 + 2 + 5,
+ .vtotal = 1600 + 2 + 5 + 57,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+ .modes = &samsung_lsn122dl01_c01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
.num_modes = 1,
.bpc = 6,
.size = {
- .width = 1024,
- .height = 600,
+ .width = 223,
+ .height = 125,
},
};
},
};
+static const struct display_timing sharp_lq101k1ly04_timing = {
+ .pixelclock = { 60000000, 65000000, 80000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 20, 20, 20 },
+ .hback_porch = { 20, 20, 20 },
+ .hsync_len = { 10, 10, 10 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 4, 4, 4 },
+ .vsync_len = { 4, 4, 4 },
+ .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq101k1ly04 = {
+ .timings = &sharp_lq101k1ly04_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct drm_display_mode sharp_lq123p1jx31_mode = {
+ .clock = 252750,
+ .hdisplay = 2400,
+ .hsync_start = 2400 + 48,
+ .hsync_end = 2400 + 48 + 32,
+ .htotal = 2400 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 3,
+ .vsync_end = 1600 + 3 + 10,
+ .vtotal = 1600 + 3 + 10 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+ .modes = &sharp_lq123p1jx31_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+ .clock = 147000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 15,
+ .vsync_end = 1200 + 15 + 2,
+ .vtotal = 1200 + 15 + 2 + 18,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+ .modes = &starry_kr122ea0sra_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
}, {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
+ }, {
+ .compatible = "lg,lp079qx1-sp0v",
+ .data = &lg_lp079qx1_sp0v,
+ }, {
+ .compatible = "lg,lp097qx1-spa1",
+ .data = &lg_lp097qx1_spa1,
}, {
.compatible = "lg,lp120up1",
.data = &lg_lp120up1,
}, {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
+ }, {
+ .compatible = "samsung,lsn122dl01-c01",
+ .data = &samsung_lsn122dl01_c01,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "sharp,lq101k1ly04",
+ .data = &sharp_lq101k1ly04,
+ }, {
+ .compatible = "sharp,lq123p1jx31",
+ .data = &sharp_lq123p1jx31,
}, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
+ }, {
+ .compatible = "starry,kr122ea0sra",
+ .data = &starry_kr122ea0sra,
}, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
.lanes = 4,
};
-
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
config DRM_QXL
tristate "QXL virtual GPU"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select CRC32
help
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
{
if (!qxl_check_idle(qdev->release_ring)) {
- queue_work(qdev->gc_queue, &qdev->gc_work);
+ schedule_work(&qdev->gc_work);
if (flush)
flush_work(&qdev->gc_work);
return true;
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
- struct qxl_drawable *drawable,
unsigned num_clips,
struct qxl_bo *clips_bo)
{
* correctly globaly, since that would require
* tracking all of our palettes. */
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
+ if (ret)
+ return ret;
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
if (ret)
goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects)
goto out_release_backoff;
struct qxl_bo *current_release_bo[3];
int current_release_bo_offset[3];
- struct workqueue_struct *gc_queue;
struct work_struct gc_work;
struct drm_property *hotplug_mode_update_property;
(unsigned long)qdev->surfaceram_size);
- qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
qxl_bo_unref(&qdev->current_release_bo[0]);
if (qdev->current_release_bo[1])
qxl_bo_unref(&qdev->current_release_bo[1]);
- flush_workqueue(qdev->gc_queue);
- destroy_workqueue(qdev->gc_queue);
- qdev->gc_queue = NULL;
-
+ flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
struct qxl_device *qdev;
int r;
- /* require kms */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
if (qdev == NULL)
return -ENOMEM;
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ return ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
- if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ && !radeon_crtc->ss_enabled)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
- if (ASIC_IS_AVIVO(rdev))
+ if (rdev->family >= CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
MODULE_FIRMWARE("radeon/HAWAII_me.bin");
MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
int new_fw = 0;
int err;
int num_fw;
+ bool new_smc = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ if ((rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x665f))
+ new_smc = true;
new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ if (rdev->pdev->revision == 0x80)
+ new_smc = true;
new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+ if (new_smc)
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
}
}
rdev->rlc.cs_data = ci_cs_data;
- rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
}
break;
}
+ case PACKET3_PFP_SYNC_ME:
+ if (pkt->count) {
+ DRM_ERROR("bad PFP_SYNC_ME\n");
+ return -EINVAL;
+ }
+ break;
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
case PACKET3_MPEG_INDEX:
case PACKET3_WAIT_REG_MEM:
case PACKET3_MEM_WRITE:
+ case PACKET3_PFP_SYNC_ME:
case PACKET3_SURFACE_SYNC:
case PACKET3_EVENT_WRITE:
case PACKET3_EVENT_WRITE_EOP:
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "radeon_acpi.h"
struct radeon_atpx {
acpi_handle handle;
struct radeon_atpx_functions functions;
+ bool is_hybrid;
};
static struct radeon_atpx_priv {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool radeon_is_atpx_hybrid(void) {
+ return radeon_atpx_priv.atpx.is_hybrid;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+#if 1
+ /* This is a temporary hack until the D3 cold support
+ * makes it upstream. The ATPX power_control method seems
+ * to still work on even if the system should be using
+ * the new standardized hybrid D3 cold ACPI interface.
+ */
+ atpx->functions.power_cntl = true;
+#else
+ atpx->functions.power_cntl = false;
+#endif
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
+#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
/*
* GPU helpers function.
*/
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* radeon_card_posted - check if the hw has already been initialized
*
{
uint32_t reg;
+ /* for pass through, always force asic_init */
+ if (radeon_device_is_virtual())
+ return false;
+
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
return 0;
failed:
+ /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
+ if (radeon_is_px(ddev))
+ pm_runtime_put_noidle(ddev->dev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
return r;
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (freeze && rdev->family >= CHIP_R600) {
+ if (freeze && rdev->family >= CHIP_CEDAR) {
rdev->asic->asic_reset(rdev, true);
pci_restore_state(dev->pdev);
} else if (suspend) {
radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
+ drm_crtc_force_disable_all(rdev->ddev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
* 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
* 2.44.0 - SET_APPEND_CNT packet3 support
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
+ * 2.46.0 - Add PFP_SYNC_ME support on evergreen
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 45
+#define KMS_DRIVER_MINOR 46
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
#if defined(CONFIG_VGA_SWITCHEROO)
void radeon_register_atpx_handler(void);
void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
#else
static inline void radeon_register_atpx_handler(void) {}
static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
int radeon_no_wb;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (radeon_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!radeon_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (radeon_is_atpx_hybrid() ||
+ !radeon_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
if (rdev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (radeon_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
radeon_kfd_device_fini(rdev);
if (IS_ERR(fence))
return PTR_ERR(fence);
- r = ttm_bo_move_accel_cleanup(bo, &fence->base,
- evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
radeon_fence_unref(&fence);
return r;
}
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (r)
+ return r;
+
/* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
if (WARN_ON_ONCE(rbo->pin_count > 0))
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
if (r) {
return r;
}
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/verde_rlc.bin");
MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
MODULE_FIRMWARE("radeon/oland_rlc.bin");
MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/hainan_mc.bin");
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
char fw_name[30];
int err;
int new_fw = 0;
+ bool new_smc = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
+ /* XXX: figure out which Tahitis need the new ucode */
+ if (0)
+ new_smc = true;
new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ new_smc = true;
new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
break;
case CHIP_VERDE:
chip_name = "VERDE";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B))
+ new_smc = true;
new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
break;
case CHIP_OLAND:
chip_name = "OLAND";
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605))
+ new_smc = true;
new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667))
+ new_smc = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+ if (new_smc)
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
rcar_du_plane.o \
rcar_du_vgacon.o
-rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
- rcar_du_hdmienc.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o
+
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_FRM) {
- drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+ drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
}
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
break;
case DRM_MODE_ENCODER_TMDS:
- ret = rcar_du_hdmi_connector_init(rcdu, renc);
+ /* connector managed by the bridge driver */
break;
default:
#define __RCAR_DU_ENCODER_H__
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder_slave.h>
struct rcar_du_device;
struct rcar_du_hdmienc;
};
struct rcar_du_encoder {
- struct drm_encoder_slave slave;
+ struct drm_encoder base;
enum rcar_du_output output;
struct rcar_du_hdmienc *hdmi;
struct rcar_du_lvdsenc *lvds;
};
#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, slave.base)
+ container_of(e, struct rcar_du_encoder, base)
-#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
+#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
struct rcar_du_connector {
struct drm_connector connector;
+++ /dev/null
-/*
- * R-Car Display Unit HDMI Connector
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
-#include "rcar_du_kms.h"
-
-#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
-
-static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct rcar_du_connector *con = to_rcar_connector(connector);
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->get_modes == NULL)
- return 0;
-
- return sfuncs->get_modes(encoder, connector);
-}
-
-static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct rcar_du_connector *con = to_rcar_connector(connector);
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->mode_valid == NULL)
- return MODE_OK;
-
- return sfuncs->mode_valid(encoder, mode);
-}
-
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = rcar_du_hdmi_connector_get_modes,
- .mode_valid = rcar_du_hdmi_connector_mode_valid,
-};
-
-static enum drm_connector_status
-rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct rcar_du_connector *con = to_rcar_connector(connector);
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->detect == NULL)
- return connector_status_unknown;
-
- return sfuncs->detect(encoder, connector);
-}
-
-static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = rcar_du_hdmi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_connector *rcon;
- struct drm_connector *connector;
- int ret;
-
- rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
- if (rcon == NULL)
- return -ENOMEM;
-
- connector = &rcon->connector;
- connector->display_info.width_mm = 0;
- connector->display_info.height_mm = 0;
- connector->interlace_allowed = true;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0)
- return ret;
-
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_object_property_set_value(&connector->base,
- rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
-
- ret = drm_mode_connector_attach_encoder(connector, encoder);
- if (ret < 0)
- return ret;
-
- rcon->encoder = renc;
-
- return 0;
-}
+++ /dev/null
-/*
- * R-Car Display Unit HDMI Connector
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_HDMICON_H__
-#define __RCAR_DU_HDMICON_H__
-
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
-int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc);
-#else
-static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
-{
- return -ENOSYS;
-}
-#endif
-
-#endif /* __RCAR_DU_HDMICON_H__ */
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
- struct device *dev;
bool enabled;
};
#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
-#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
true);
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
-
hdmienc->enabled = true;
}
struct drm_connector_state *conn_state)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- const struct drm_display_mode *mode = &crtc_state->mode;
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
adjusted_mode);
- if (sfuncs->mode_fixup == NULL)
- return 0;
-
- return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
+ return 0;
}
+
static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->mode_set)
- sfuncs->mode_set(encoder, mode, adjusted_mode);
rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
}
rcar_du_hdmienc_disable(encoder);
drm_encoder_cleanup(encoder);
- put_device(hdmienc->dev);
}
static const struct drm_encoder_funcs encoder_funcs = {
struct rcar_du_encoder *renc, struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct drm_i2c_encoder_driver *driver;
- struct i2c_client *i2c_slave;
+ struct drm_bridge *bridge;
struct rcar_du_hdmienc *hdmienc;
int ret;
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate the slave I2C device and driver. */
- i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
- dev_dbg(rcdu->dev,
- "can't get I2C slave for %s, deferring probe\n",
- of_node_full_name(np));
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(np);
+ if (!bridge)
return -EPROBE_DEFER;
- }
-
- hdmienc->dev = &i2c_slave->dev;
-
- if (hdmienc->dev->driver == NULL) {
- dev_dbg(rcdu->dev,
- "I2C slave %s not probed yet, deferring probe\n",
- dev_name(hdmienc->dev));
- ret = -EPROBE_DEFER;
- goto error;
- }
-
- /* Initialize the slave encoder. */
- driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
- ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
- if (ret < 0)
- goto error;
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret < 0)
- goto error;
+ return ret;
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- return 0;
+ /* Link drm_bridge to encoder */
+ bridge->encoder = encoder;
+
+ ret = drm_bridge_attach(rcdu->ddev, bridge);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
-error:
- put_device(hdmienc->dev);
- return ret;
+ return 0;
}
depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_PANEL
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Rockchip soc chipset.
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
+#define RK3288_GRF_SOC_CON6 0x25c
+#define RK3288_EDP_LCDC_SEL BIT(5)
+#define RK3399_GRF_SOC_CON20 0x6250
+#define RK3399_EDP_LCDC_SEL BIT(5)
+
+#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+
#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm)
-/* dp grf register offset */
-#define GRF_SOC_CON6 0x025c
-#define GRF_EDP_LCD_SEL_MASK BIT(5)
-#define GRF_EDP_SEL_VOP_LIT BIT(5)
-#define GRF_EDP_SEL_VOP_BIG 0
+/**
+ * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
+ * @lcdsel_grf_reg: grf register offset of lcdc select
+ * @lcdsel_big: reg value of selecting vop big for eDP
+ * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @chip_type: specific chip type
+ */
+struct rockchip_dp_chip_data {
+ u32 lcdsel_grf_reg;
+ u32 lcdsel_big;
+ u32 lcdsel_lit;
+ u32 chip_type;
+};
struct rockchip_dp_device {
struct drm_device *drm_dev;
struct drm_display_mode mode;
struct clk *pclk;
+ struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ const struct rockchip_dp_chip_data *data;
+
struct analogix_dp_plat_data plat_data;
};
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+ clk_disable_unprepare(dp->pclk);
return ret;
}
return 0;
}
+static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+ struct drm_connector *connector)
+{
+ struct drm_display_info *di = &connector->display_info;
+ /* VOP couldn't output YUV video format for eDP rightly */
+ u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
+
+ if ((di->color_formats & mask)) {
+ DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
+ di->color_formats &= ~mask;
+ di->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ di->bpc = 8;
+ }
+
+ return 0;
+}
+
static bool
rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
return;
if (ret)
- val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+ val = dp->data->lcdsel_lit;
else
- val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+ val = dp->data->lcdsel_big;
dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
- ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
- if (ret != 0) {
- dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+ ret = clk_prepare_enable(dp->grfclk);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
+
+ ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+ if (ret != 0)
+ dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+
+ clk_disable_unprepare(dp->grfclk);
}
static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ int ret;
/*
- * FIXME(Yakir): driver should configure the CRTC output video
- * mode with the display information which indicated the monitor
- * support colorimetry.
- *
- * But don't know why the CRTC driver seems could only output the
- * RGBaaa rightly. For example, if connect the "innolux,n116bge"
- * eDP screen, EDID would indicated that screen only accepted the
- * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
- * screen would show a blue picture (RGB888 show a green picture).
- * But if I configure CTRC to RGBaaa, and eDP driver still keep
- * RGB666 input video mode, then screen would works prefect.
+ * The hardware IC designed that VOP must output the RGB10 video
+ * format to eDP controller, and if eDP panel only support RGB8,
+ * then eDP controller should cut down the video data, not via VOP
+ * controller, that's why we need to hardcode the VOP output mode
+ * to RGA10 here.
*/
+
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_eDP;
+ if (dp->data->chip_type == RK3399_EDP) {
+ /*
+ * For RK3399, VOP Lit must code the out mode to RGB888,
+ * VOP Big must code the out mode to RGB10.
+ */
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
+ encoder);
+ if (ret > 0)
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ }
return 0;
}
return PTR_ERR(dp->grf);
}
+ dp->grfclk = devm_clk_get(dev, "grf");
+ if (PTR_ERR(dp->grfclk) == -ENOENT) {
+ dp->grfclk = NULL;
+ } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(dp->grfclk)) {
+ dev_err(dev, "failed to get grf clock\n");
+ return PTR_ERR(dp->grfclk);
+ }
+
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
dev_err(dev, "failed to get pclk property\n");
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to pre init %d\n", ret);
+ clk_disable_unprepare(dp->pclk);
return ret;
}
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+ const struct rockchip_dp_chip_data *dp_data;
struct drm_device *drm_dev = data;
int ret;
*/
dev_set_drvdata(dev, NULL);
+ dp_data = of_device_get_match_data(dev);
+ if (!dp_data)
+ return -ENODEV;
+
ret = rockchip_dp_init(dp);
if (ret < 0)
return ret;
+ dp->data = dp_data;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
dp->plat_data.encoder = &dp->encoder;
- dp->plat_data.dev_type = RK3288_DP;
+ dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
+ dp->plat_data.get_modes = rockchip_dp_get_modes;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
}
{
struct device *dev = &pdev->dev;
struct device_node *panel_node, *port, *endpoint;
+ struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
- struct drm_panel *panel;
port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- dev_err(dev, "can't find output port\n");
- return -EINVAL;
- }
-
- endpoint = of_get_child_by_name(port, "endpoint");
- of_node_put(port);
- if (!endpoint) {
- dev_err(dev, "no output endpoint found\n");
- return -EINVAL;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no output node found\n");
- return -EINVAL;
- }
-
- panel = of_drm_find_panel(panel_node);
- if (!panel) {
- DRM_ERROR("failed to find panel\n");
+ if (port) {
+ endpoint = of_get_child_by_name(port, "endpoint");
+ of_node_put(port);
+ if (!endpoint) {
+ dev_err(dev, "no output endpoint found\n");
+ return -EINVAL;
+ }
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!panel_node) {
+ dev_err(dev, "no output node found\n");
+ return -EINVAL;
+ }
+
+ panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- return -EPROBE_DEFER;
+ if (!panel) {
+ DRM_ERROR("failed to find panel\n");
+ return -EPROBE_DEFER;
+ }
}
- of_node_put(panel_node);
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
#endif
};
+static const struct rockchip_dp_chip_data rk3399_edp = {
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
+ .chip_type = RK3399_EDP,
+};
+
+static const struct rockchip_dp_chip_data rk3288_dp = {
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
+ .chip_type = RK3288_DP,
+};
+
static const struct of_device_id rockchip_dp_dt_ids[] = {
- {.compatible = "rockchip,rk3288-dp",},
+ {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
+ {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/bridge/dw_hdmi.h>
#include "rockchip_drm_drv.h"
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return -EINVAL;
priv->crtc_funcs[pipe] = crtc_funcs;
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return;
priv->crtc_funcs[pipe] = NULL;
dev_set_drvdata(dev, NULL);
}
-void rockchip_drm_lastclose(struct drm_device *dev)
+static void rockchip_drm_lastclose(struct drm_device *dev)
{
struct rockchip_drm_private *priv = dev->dev_private;
is_support_iommu = false;
}
+ of_node_put(iommu);
component_match_add(dev, &match, compare_of, port->parent);
of_node_put(port);
}
#include <drm/drm_crtc_helper.h>
#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- struct drm_gem_object *obj;
int i;
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
- obj = rockchip_fb->obj[i];
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
- }
+ for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
+ drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(rockchip_fb);
drm_atomic_helper_cleanup_planes(dev, state);
}
-struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
+static struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
.atomic_commit_tail = rockchip_atomic_commit_tail,
};
scl_cal_scale2(src_h, dst_h));
if (is_yuv) {
VOP_SCL_SET(vop, win, scale_cbcr_x,
- scl_cal_scale2(src_w, dst_w));
+ scl_cal_scale2(cbcr_src_w, dst_w));
VOP_SCL_SET(vop, win, scale_cbcr_y,
- scl_cal_scale2(src_h, dst_h));
+ scl_cal_scale2(cbcr_src_h, dst_h));
}
return;
}
.atomic_disable = vop_plane_atomic_disable,
};
-void vop_atomic_plane_reset(struct drm_plane *plane)
+static void vop_atomic_plane_reset(struct drm_plane *plane)
{
struct vop_plane_state *vop_plane_state =
to_vop_plane_state(plane->state);
plane->state->plane = plane;
}
-struct drm_plane_state *
+static struct drm_plane_state *
vop_atomic_plane_duplicate_state(struct drm_plane *plane)
{
struct vop_plane_state *old_vop_plane_state;
drm_crtc_cleanup(crtc);
}
+static void vop_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ kfree(crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+
static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *rockchip_state;
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = vop_crtc_destroy,
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = vop_crtc_reset,
.atomic_duplicate_state = vop_crtc_duplicate_state,
.atomic_destroy_state = vop_crtc_destroy_state,
};
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
};
-static const struct vop_scl_regs rk3066_win_scl = {
+static const struct vop_scl_regs rk3036_win_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
};
static const struct vop_win_phy rk3036_win0_data = {
- .scl = &rk3066_win_scl,
+ .scl = &rk3036_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+ .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
};
static const struct vop_win_phy rk3036_win1_data = {
return 0;
}
-struct platform_driver vop_platform_driver = {
+static struct platform_driver vop_platform_driver = {
.probe = vop_probe,
.remove = vop_remove,
.driver = {
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
select DRM_KMS_CMA_HELPER
select DRM_PANEL
select FW_LOADER
+ select SND_SOC_HDMI_CODEC if SND_SOC
help
Choose this option to enable DRM on STM stiH41x chipset
#include "sti_awg_utils.h"
+#define AWG_DELAY (-5)
+
#define AWG_OPCODE_OFFSET 10
#define AWG_MAX_ARG 0x3ff
val = timing->blanking_level;
ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
- val = timing->trailing_pixels - 1;
+ val = timing->trailing_pixels - 1 + AWG_DELAY;
ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
}
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
compo->vtg_main = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
platform_set_drvdata(pdev, compo);
static void sti_crtc_enable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct device *dev = mixer->dev;
- struct sti_compositor *compo = dev_get_drvdata(dev);
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_READY;
- /* Prepare and enable the compo IP clock */
- if (mixer->id == STI_MIXER_MAIN) {
- if (clk_prepare_enable(compo->clk_compo_main))
- DRM_INFO("Failed to prepare/enable compo_main clk\n");
- } else {
- if (clk_prepare_enable(compo->clk_compo_aux))
- DRM_INFO("Failed to prepare/enable compo_aux clk\n");
- }
-
drm_crtc_vblank_on(crtc);
}
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct clk *clk;
+ struct clk *compo_clk, *pix_clk;
int rate = mode->clock * 1000;
- int res;
DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
mode->vsync_start, mode->vsync_end,
mode->vtotal, mode->type, mode->flags);
- /* Set rate and prepare/enable pixel clock */
- if (mixer->id == STI_MIXER_MAIN)
- clk = compo->clk_pix_main;
- else
- clk = compo->clk_pix_aux;
+ if (mixer->id == STI_MIXER_MAIN) {
+ compo_clk = compo->clk_compo_main;
+ pix_clk = compo->clk_pix_main;
+ } else {
+ compo_clk = compo->clk_compo_aux;
+ pix_clk = compo->clk_pix_aux;
+ }
+
+ /* Prepare and enable the compo IP clock */
+ if (clk_prepare_enable(compo_clk)) {
+ DRM_INFO("Failed to prepare/enable compositor clk\n");
+ goto compo_error;
+ }
- res = clk_set_rate(clk, rate);
- if (res < 0) {
+ /* Set rate and prepare/enable pixel clock */
+ if (clk_set_rate(pix_clk, rate) < 0) {
DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
- return -EINVAL;
+ goto pix_error;
}
- if (clk_prepare_enable(clk)) {
+ if (clk_prepare_enable(pix_clk)) {
DRM_ERROR("Failed to prepare/enable pix clk\n");
- return -EINVAL;
+ goto pix_error;
}
sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &crtc->mode);
- res = sti_mixer_active_video_area(mixer, &crtc->mode);
- if (res) {
+ if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
DRM_ERROR("Can't set active video area\n");
- return -EINVAL;
+ goto mixer_error;
}
- return res;
+ return 0;
+
+mixer_error:
+ clk_disable_unprepare(pix_clk);
+pix_error:
+ clk_disable_unprepare(compo_clk);
+compo_error:
+ return -EINVAL;
}
static void sti_crtc_disable(struct drm_crtc *crtc)
static void
sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- sti_crtc_enable(crtc);
sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = sti_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.atomic_begin = sti_crtc_atomic_begin,
.atomic_flush = sti_crtc_atomic_flush,
};
dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
if (!dvo->panel_node)
DRM_ERROR("No panel associated to the dvo output\n");
+ of_node_put(dvo->panel_node);
platform_set_drvdata(pdev, dvo);
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <sound/hdmi-codec.h>
+
#include "sti_hdmi.h"
#include "sti_hdmi_tx3g4c28phy.h"
#include "sti_hdmi_tx3g0c55phy.h"
#define HDMI_DFLT_CHL0_DAT 0x0110
#define HDMI_DFLT_CHL1_DAT 0x0114
#define HDMI_DFLT_CHL2_DAT 0x0118
+#define HDMI_AUDIO_CFG 0x0200
+#define HDMI_SPDIF_FIFO_STATUS 0x0204
#define HDMI_SW_DI_1_HEAD_WORD 0x0210
#define HDMI_SW_DI_1_PKT_WORD0 0x0214
#define HDMI_SW_DI_1_PKT_WORD1 0x0218
#define HDMI_SW_DI_1_PKT_WORD5 0x0228
#define HDMI_SW_DI_1_PKT_WORD6 0x022C
#define HDMI_SW_DI_CFG 0x0230
+#define HDMI_SAMPLE_FLAT_MASK 0x0244
+#define HDMI_AUDN 0x0400
+#define HDMI_AUD_CTS 0x0404
#define HDMI_SW_DI_2_HEAD_WORD 0x0600
#define HDMI_SW_DI_2_PKT_WORD0 0x0604
#define HDMI_SW_DI_2_PKT_WORD1 0x0608
#define HDMI_INT_DLL_LCK BIT(5)
#define HDMI_INT_NEW_FRAME BIT(6)
#define HDMI_INT_GENCTRL_PKT BIT(7)
+#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8)
#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
| HDMI_INT_GLOBAL)
#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_AUDIO_FIFO_XRUN \
| HDMI_INT_GENCTRL_PKT \
| HDMI_INT_NEW_FRAME \
| HDMI_INT_DLL_LCK \
#define HDMI_STA_SW_RST BIT(1)
+#define HDMI_AUD_CFG_8CH BIT(0)
+#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1)
+#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2)
+#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2))
+#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12)
+#define HDMI_AUD_CFG_DTS_INVALID BIT(16)
+#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21))
+#define HDMI_AUD_CFG_CH12_VALID BIT(28)
+#define HDMI_AUD_CFG_CH34_VALID BIT(29)
+#define HDMI_AUD_CFG_CH56_VALID BIT(30)
+#define HDMI_AUD_CFG_CH78_VALID BIT(31)
+
+/* sample flat mask */
+#define HDMI_SAMPLE_FLAT_NO 0
+#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
+#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
+#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
+#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
+#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
+ HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
+
#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
wake_up_interruptible(&hdmi->wait_event);
}
+ /* Audio FIFO underrun IRQ */
+ if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
+ DRM_INFO("Warning: audio FIFO underrun occurs!");
+
return IRQ_HANDLED;
}
*/
static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
{
- struct hdmi_audio_infoframe infofame;
+ struct hdmi_audio_params *audio = &hdmi->audio;
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
- int ret;
-
- ret = hdmi_audio_infoframe_init(&infofame);
- if (ret < 0) {
- DRM_ERROR("failed to setup audio infoframe: %d\n", ret);
- return ret;
- }
-
- infofame.channels = 2;
-
- ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer));
- if (ret < 0) {
- DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
- return ret;
+ int ret, val;
+
+ DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
+ audio->enabled ? "enable" : "disable");
+ if (audio->enabled) {
+ /* set audio parameters stored*/
+ ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
+ sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
+ return ret;
+ }
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+ } else {
+ /*disable audio info frame transmission */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
+ HDMI_IFRAME_SLOT_AUDIO);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
- hdmi_infoframe_write_infopack(hdmi, buffer, ret);
-
return 0;
}
DBGFS_DUMP("", HDMI_SW_DI_CFG);
hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
+ DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
+ DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
+ DBGFS_DUMP("\n", HDMI_AUDN);
+
seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
return count;
return NULL;
}
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+ unsigned int n;
+
+ switch (audio_fs) {
+ case 32000:
+ n = 4096;
+ break;
+ case 44100:
+ n = 6272;
+ break;
+ case 48000:
+ n = 6144;
+ break;
+ case 88200:
+ n = 6272 * 2;
+ break;
+ case 96000:
+ n = 6144 * 2;
+ break;
+ case 176400:
+ n = 6272 * 4;
+ break;
+ case 192000:
+ n = 6144 * 4;
+ break;
+ default:
+ /* Not pre-defined, recommended value: 128 * fs / 1000 */
+ n = (audio_fs * 128) / 1000;
+ }
+
+ return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi,
+ struct hdmi_audio_params *params)
+{
+ int audio_cfg, n;
+ struct hdmi_audio_infoframe *info = ¶ms->cea;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ /* update N parameter */
+ n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+ DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+ params->sample_rate, hdmi->mode.clock * 1000, n);
+ hdmi_write(hdmi, n, HDMI_AUDN);
+
+ /* update HDMI registers according to configuration */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+ switch (info->channels) {
+ case 8:
+ audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+ case 6:
+ audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+ case 4:
+ audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+ case 2:
+ audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+ break;
+ default:
+ DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+ info->channels);
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ hdmi->audio = *params;
+
+ return hdmi_audio_infoframe_config(hdmi);
+}
+
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ int audio_cfg;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* disable audio */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ hdmi->audio.enabled = 0;
+ hdmi_audio_infoframe_config(hdmi);
+}
+
+static int hdmi_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret;
+ struct hdmi_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
+ daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+ daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+
+ audio.enabled = 1;
+
+ ret = hdmi_audio_configure(hdmi, &audio);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
+
+ if (enable)
+ hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
+ else
+ hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
+
+ return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_connector *connector = hdmi->drm_connector;
+
+ DRM_DEBUG_DRIVER("\n");
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = hdmi_audio_hw_params,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .digital_mute = hdmi_audio_digital_mute,
+ .get_eld = hdmi_audio_get_eld,
+};
+
+static int sti_hdmi_register_audio_driver(struct device *dev,
+ struct sti_hdmi *hdmi)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+ };
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->audio.enabled = 0;
+
+ hdmi->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
+
+ return 0;
+}
+
static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
/* initialise property */
sti_hdmi_connector_init_property(drm_dev, drm_connector);
+ hdmi->drm_connector = drm_connector;
+
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
+ err = sti_hdmi_register_audio_driver(dev, hdmi);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec\n");
+ goto err_sysfs;
+ }
+
+ /* Initialize audio infoframe */
+ err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
+ if (err) {
+ DRM_ERROR("Failed to init audio infoframe\n");
+ goto err_sysfs;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
err_sysfs:
drm_bridge_remove(bridge);
+ hdmi->drm_connector = NULL;
return -EINVAL;
}
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
i2c_put_adapter(hdmi->ddc_adapt);
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
return 0;
void (*stop)(struct sti_hdmi *hdmi);
};
+struct hdmi_audio_params {
+ bool enabled;
+ unsigned int sample_width;
+ unsigned int sample_rate;
+ struct hdmi_audio_infoframe cea;
+};
+
/* values for the framing mode property */
enum sti_hdmi_modes {
HDMI_MODE_HDMI,
* @ddc_adapt: i2c ddc adapter
* @colorspace: current colorspace selected
* @hdmi_mode: select framing for HDMI or DVI
+ * @audio_pdev: ASoC hdmi-codec platform device
+ * @audio: hdmi audio parameters.
+ * @drm_connector: hdmi connector
*/
struct sti_hdmi {
struct device dev;
struct i2c_adapter *ddc_adapt;
enum hdmi_colorspace colorspace;
enum sti_hdmi_modes hdmi_mode;
+ struct platform_device *audio_pdev;
+ struct hdmi_audio_params audio;
+ struct drm_connector *drm_connector;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
hqvdp->vtg = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
platform_set_drvdata(pdev, hqvdp);
#define STI_FPS_INTERVAL_MS 3000
-static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
-{
- struct timespec tmp_ts = timespec_sub(lhs, rhs);
- u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
-
- do_div(tmp_ns, NSEC_PER_MSEC);
-
- return (u32)tmp_ns;
-}
-
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field)
{
- struct timespec now;
+ ktime_t now;
struct sti_fps_info *fps;
int fpks, fipks, ms_since_last, num_frames, num_fields;
- getrawmonotonic(&now);
+ now = ktime_get();
/* Compute number of frame updates */
fps = &plane->fps_info;
return;
fps->curr_frame_counter++;
- ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+ ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
num_frames = fps->curr_frame_counter - fps->last_frame_counter;
if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
unsigned int last_frame_counter;
unsigned int curr_field_counter;
unsigned int last_field_counter;
- struct timespec last_timestamp;
+ ktime_t last_timestamp;
char fps_str[FPS_LENGTH];
char fips_str[FPS_LENGTH];
};
#define HDMI_DELAY (5)
/* Delay introduced by the DVO in nb of pixel */
-#define DVO_DELAY (2)
+#define DVO_DELAY (7)
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
if (np) {
vtg->slave = of_vtg_find(np);
+ of_node_put(np);
if (!vtg->slave)
return -EPROBE_DEFER;
config DRM_SUN4I
tristate "DRM Support for Allwinner A10 Display Engine"
- depends on DRM && ARM
+ depends on DRM && ARM && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
/* Get the physical address of the buffer in memory */
gem = drm_fb_cma_get_gem_obj(fb, 0);
- DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr);
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
/* Compute the start of the displayed memory */
bpp = drm_format_plane_cpp(fb->pixel_format, 0);
paddr += (state->src_x >> 16) * bpp;
paddr += (state->src_y >> 16) * fb->pitches[0];
- DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr);
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = paddr << 3;
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
sun4i_tcon_disable(drv->tcon);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
}
static void sun4i_crtc_enable(struct drm_crtc *crtc)
static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate);
+ unsigned long best_parent = 0;
+ u8 best_div = 1;
+ int i;
+
+ for (i = 6; i < 127; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
+ ideal);
+
+ if (rounded == ideal) {
+ best_parent = rounded;
+ best_div = i;
+ goto out;
+ }
+
+ if ((rounded < ideal) && (rounded > best_parent)) {
+ best_parent = rounded;
+ best_div = i;
+ }
+ }
+
+out:
+ *parent_rate = best_parent;
+
+ return best_parent / best_div;
}
static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
- int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ u8 div = parent_rate / rate;
return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
GENMASK(6, 0), div);
const char *clk_name, *parent_name;
struct clk_init_data init;
struct sun4i_dclk *dclk;
+ int ret;
parent_name = __clk_get_name(tcon->sclk0);
- of_property_read_string_index(dev->of_node, "clock-output-names", 0,
- &clk_name);
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", 0,
+ &clk_name);
+ if (ret)
+ return ret;
dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
if (!dclk)
init.ops = &sun4i_dclk_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
dclk->regmap = tcon->regs;
dclk->hw.init = &init;
/* Frame Buffer Operations */
/* VBlank Operations */
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = sun4i_drv_enable_vblank,
.disable_vblank = sun4i_drv_disable_vblank,
};
+static void sun4i_remove_framebuffers(void)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ /* The framebuffer can be located anywhere in RAM */
+ ap->ranges[0].base = 0;
+ ap->ranges[0].size = ~0;
+
+ remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+ kfree(ap);
+}
+
static int sun4i_drv_bind(struct device *dev)
{
struct drm_device *drm;
}
drm->irq_enabled = true;
+ /* Remove early framebuffers (ie. simplefb) */
+ sun4i_remove_framebuffers();
+
/* Create our framebuffer */
drv->fbdev = sun4i_framebuffer_init(drm);
if (IS_ERR(drv->fbdev)) {
count += sun4i_drv_add_endpoints(&pdev->dev, &match,
pipeline);
+ of_node_put(pipeline);
DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
count, i);
{
struct sun4i_drv *drv = drm->dev_private;
- if (drv->fbdev)
- drm_fbdev_cma_hotplug_event(drv->fbdev);
+ drm_fbdev_cma_hotplug_event(drv->fbdev);
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
static int sun4i_rgb_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
+ unsigned long rate = mode->clock * 1000;
+ long rounded_rate;
DRM_DEBUG_DRIVER("Validating modes...\n");
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ rounded_rate = clk_round_rate(tcon->dclk, rate);
+ if (rounded_rate < rate)
+ return MODE_CLOCK_LOW;
+
+ if (rounded_rate > rate)
+ return MODE_CLOCK_HIGH;
+
+ DRM_DEBUG_DRIVER("Clock rate OK\n");
+
return MODE_OK;
}
int ret;
/* If we don't have a panel, there's no point in going on */
- if (!tcon->panel)
+ if (IS_ERR(tcon->panel))
return -ENODEV;
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
remote = of_graph_get_remote_port_parent(end_node);
if (!remote) {
- DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+ DRM_DEBUG_DRIVER("Unable to parse remote node\n");
return ERR_PTR(-EINVAL);
}
- return of_drm_find_panel(remote);
+ return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
}
static int sun4i_tcon_bind(struct device *dev, struct device *master,
return 0;
}
- return sun4i_rgb_init(drm);
+ ret = sun4i_rgb_init(drm);
+ if (ret < 0)
+ goto err_free_clocks;
+
+ return 0;
err_free_clocks:
sun4i_tcon_free_clocks(tcon);
* Defer the probe.
*/
panel = sun4i_tcon_find_panel(node);
- if (IS_ERR(panel)) {
- /*
- * If we don't have a panel endpoint, just go on
- */
- if (PTR_ERR(panel) != -ENODEV)
- return -EPROBE_DEFER;
+
+ /*
+ * If we don't have a panel endpoint, just go on
+ */
+ if (PTR_ERR(panel) == -EPROBE_DEFER) {
+ DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+ return -EPROBE_DEFER;
}
return component_add(&pdev->dev, &sun4i_tcon_ops);
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
tegra_dc_stats_reset(&dc->stats);
drm_crtc_vblank_off(crtc);
+
+ pm_runtime_put_sync(dc->dev);
}
static void tegra_crtc_enable(struct drm_crtc *crtc)
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ pm_runtime_get_sync(dc->dev);
+
+ /* initialize display controller */
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt);
+
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = SYNCPT_VSYNC_ENABLE | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ if (dc->soc->supports_border_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
+ /* apply PLL and pixel clock changes */
tegra_dc_commit_state(dc, state);
/* program display mode */
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- u32 value;
int err;
dc->syncpt = host1x_syncpt_request(dc->dev, flags);
goto cleanup;
}
- /* initialize display controller */
- if (dc->syncpt) {
- u32 syncpt = host1x_syncpt_id(dc->syncpt);
-
- value = SYNCPT_CNTRL_NO_STALL;
- tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-
- value = SYNCPT_VSYNC_ENABLE | syncpt;
- tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
- }
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
- /* initialize timer */
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
- WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
- WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- if (dc->soc->supports_border_color)
- tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
- tegra_dc_stats_reset(&dc->stats);
-
return 0;
cleanup:
return PTR_ERR(dc->rst);
}
+ reset_control_assert(dc->rst);
+
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
dc->powergate = TEGRA_POWERGATE_DIS;
else
dc->powergate = TEGRA_POWERGATE_DISB;
- err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
- dc->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to power partition: %d\n",
- err);
- return err;
- }
- } else {
- err = clk_prepare_enable(dc->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n",
- err);
- return err;
- }
-
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to deassert reset: %d\n",
- err);
- return err;
- }
+ tegra_powergate_power_off(dc->powergate);
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
return -ENXIO;
}
- INIT_LIST_HEAD(&dc->client.list);
- dc->client.ops = &dc_client_ops;
- dc->client.dev = &pdev->dev;
-
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
return err;
}
+ platform_set_drvdata(pdev, dc);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
err = host1x_client_register(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
return err;
}
- platform_set_drvdata(pdev, dc);
-
return 0;
}
return err;
}
- reset_control_assert(dc->rst);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct device *dev)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
if (dc->soc->has_powergate)
tegra_powergate_power_off(dc->powergate);
return 0;
}
+static int tegra_dc_resume(struct device *dev)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ int err;
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ return err;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dc_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
+};
+
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
+ .pm = &tegra_dc_pm_ops,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
struct completion complete;
struct work_struct work;
struct list_head list;
+
+#ifdef CONFIG_GENERIC_PINCONF
+ struct pinctrl_dev *pinctrl;
+ struct pinctrl_desc desc;
+#endif
};
static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
return ret;
}
+enum tegra_dpaux_functions {
+ DPAUX_PADCTL_FUNC_AUX,
+ DPAUX_PADCTL_FUNC_I2C,
+ DPAUX_PADCTL_FUNC_OFF,
+};
+
+static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
+{
+ u32 value;
+
+ switch (function) {
+ case DPAUX_PADCTL_FUNC_AUX:
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_AUX;
+ break;
+
+ case DPAUX_PADCTL_FUNC_I2C:
+ value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_I2C;
+ break;
+
+ case DPAUX_PADCTL_FUNC_OFF:
+ tegra_dpaux_pad_power_down(dpaux);
+ return 0;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ tegra_dpaux_pad_power_up(dpaux);
+
+ return 0;
+}
+
+#ifdef CONFIG_GENERIC_PINCONF
+static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
+ PINCTRL_PIN(0, "DP_AUX_CHx_P"),
+ PINCTRL_PIN(1, "DP_AUX_CHx_N"),
+};
+
+static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
+
+static const char * const tegra_dpaux_groups[] = {
+ "dpaux-io",
+};
+
+static const char * const tegra_dpaux_functions[] = {
+ "aux",
+ "i2c",
+ "off",
+};
+
+static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_groups);
+}
+
+static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
+ unsigned int group)
+{
+ return tegra_dpaux_groups[group];
+}
+
+static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = tegra_dpaux_pin_numbers;
+ *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
+
+ return 0;
+}
+
+static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
+ .get_groups_count = tegra_dpaux_get_groups_count,
+ .get_group_name = tegra_dpaux_get_group_name,
+ .get_group_pins = tegra_dpaux_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_functions);
+}
+
+static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
+ unsigned int function)
+{
+ return tegra_dpaux_functions[function];
+}
+
+static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
+ *groups = tegra_dpaux_groups;
+
+ return 0;
+}
+
+static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
+ unsigned int function, unsigned int group)
+{
+ struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
+
+ return tegra_dpaux_pad_config(dpaux, function);
+}
+
+static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
+ .get_functions_count = tegra_dpaux_get_functions_count,
+ .get_function_name = tegra_dpaux_get_function_name,
+ .get_function_groups = tegra_dpaux_get_function_groups,
+ .set_mux = tegra_dpaux_set_mux,
+};
+#endif
+
static int tegra_dpaux_probe(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux;
return -ENXIO;
}
- dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
- if (IS_ERR(dpaux->rst)) {
- dev_err(&pdev->dev, "failed to get reset control: %ld\n",
- PTR_ERR(dpaux->rst));
- return PTR_ERR(dpaux->rst);
+ if (!pdev->dev.pm_domain) {
+ dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+ if (IS_ERR(dpaux->rst)) {
+ dev_err(&pdev->dev,
+ "failed to get reset control: %ld\n",
+ PTR_ERR(dpaux->rst));
+ return PTR_ERR(dpaux->rst);
+ }
}
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
return err;
}
- reset_control_deassert(dpaux->rst);
+ if (dpaux->rst)
+ reset_control_deassert(dpaux->rst);
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dpaux->clk_parent)) {
dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
PTR_ERR(dpaux->clk_parent));
- return PTR_ERR(dpaux->clk_parent);
+ err = PTR_ERR(dpaux->clk_parent);
+ goto assert_reset;
}
err = clk_prepare_enable(dpaux->clk_parent);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
err);
- return err;
+ goto assert_reset;
}
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
err);
- return err;
+ goto disable_parent_clk;
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
PTR_ERR(dpaux->vdd));
- return PTR_ERR(dpaux->vdd);
+ err = PTR_ERR(dpaux->vdd);
+ goto disable_parent_clk;
}
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- return err;
+ goto disable_parent_clk;
}
disable_irq(dpaux->irq);
err = drm_dp_aux_register(&dpaux->aux);
if (err < 0)
- return err;
+ goto disable_parent_clk;
/*
* Assume that by default the DPAUX/I2C pads will be used for HDMI,
* is no possibility to perform the I2C mode configuration in the
* HDMI path.
*/
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
-
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
- value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_MODE_I2C;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
+ if (err < 0)
+ return err;
+#ifdef CONFIG_GENERIC_PINCONF
+ dpaux->desc.name = dev_name(&pdev->dev);
+ dpaux->desc.pins = tegra_dpaux_pins;
+ dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
+ dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
+ dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
+ dpaux->desc.owner = THIS_MODULE;
+
+ dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+ if (!dpaux->pinctrl) {
+ dev_err(&pdev->dev, "failed to register pincontrol\n");
+ return -ENODEV;
+ }
+#endif
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
platform_set_drvdata(pdev, dpaux);
return 0;
+
+disable_parent_clk:
+ clk_disable_unprepare(dpaux->clk_parent);
+assert_reset:
+ if (dpaux->rst)
+ reset_control_assert(dpaux->rst);
+
+ clk_disable_unprepare(dpaux->clk);
+
+ return err;
}
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
- u32 value;
/* make sure pads are powered down when not in use */
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+ tegra_dpaux_pad_power_down(dpaux);
drm_dp_aux_unregister(&dpaux->aux);
cancel_work_sync(&dpaux->work);
clk_disable_unprepare(dpaux->clk_parent);
- reset_control_assert(dpaux->rst);
+
+ if (dpaux->rst)
+ reset_control_assert(dpaux->rst);
+
clk_disable_unprepare(dpaux->clk);
return 0;
int drm_dp_aux_enable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
- u32 value;
-
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
- DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_MODE_AUX;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
-
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
- return 0;
+ return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
}
int drm_dp_aux_disable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
- u32 value;
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+ tegra_dpaux_pad_power_down(dpaux);
return 0;
}
*/
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, false);
drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state, true);
drm_atomic_helper_wait_for_vblanks(drm, state);
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
}
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+ return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ /*
+ * XXX Is this still needed? The module reset is deasserted right
+ * before this function is called.
+ */
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ /* start calibration */
+ tegra_dsi_pad_enable(dsi);
+
+ value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+ DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+ DSI_PAD_OUT_CLK(0x0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+ value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+ DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
+ return tegra_mipi_calibrate(dsi->mipi);
+}
+
static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
unsigned int vrefresh)
{
tegra_dsi_disable(dsi);
- return;
+ pm_runtime_put(dsi->dev);
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
+ int err;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
state = tegra_dsi_get_state(dsi);
if (output->panel)
drm_panel_enable(output->panel);
-
- return;
}
static int
.atomic_check = tegra_dsi_encoder_atomic_check,
};
-static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
-{
- u32 value;
-
- value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
-
- return 0;
-}
-
-static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
-{
- u32 value;
-
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
-
- /* start calibration */
- tegra_dsi_pad_enable(dsi);
-
- value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
- DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
- DSI_PAD_OUT_CLK(0x0);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
-
- value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
- DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
-
- return tegra_mipi_calibrate(dsi->mipi);
-}
-
static int tegra_dsi_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
- reset_control_deassert(dsi->rst);
-
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0) {
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
- goto reset;
- }
-
/* Gangsters must not register their own outputs. */
if (!dsi->master) {
dsi->output.dev = client->dev;
drm_connector_register(&dsi->output.connector);
err = tegra_output_init(drm, &dsi->output);
- if (err < 0) {
- dev_err(client->dev,
- "failed to initialize output: %d\n",
+ if (err < 0)
+ dev_err(dsi->dev, "failed to initialize output: %d\n",
err);
- goto reset;
- }
dsi->output.encoder.possible_crtcs = 0x3;
}
}
return 0;
-
-reset:
- reset_control_assert(dsi->rst);
- return err;
}
static int tegra_dsi_exit(struct host1x_client *client)
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_dsi_debugfs_exit(dsi);
- reset_control_assert(dsi->rst);
+ regulator_disable(dsi->vdd);
return 0;
}
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
- dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (!pdev->dev.pm_domain) {
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+ if (IS_ERR(dsi->rst))
+ return PTR_ERR(dsi->rst);
+ }
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
dev_err(&pdev->dev, "cannot get DSI clock\n");
- err = PTR_ERR(dsi->clk);
- goto reset;
- }
-
- err = clk_prepare_enable(dsi->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable DSI clock\n");
- goto reset;
+ return PTR_ERR(dsi->clk);
}
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp)) {
dev_err(&pdev->dev, "cannot get low-power clock\n");
- err = PTR_ERR(dsi->clk_lp);
- goto disable_clk;
- }
-
- err = clk_prepare_enable(dsi->clk_lp);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable low-power clock\n");
- goto disable_clk;
+ return PTR_ERR(dsi->clk_lp);
}
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent)) {
dev_err(&pdev->dev, "cannot get parent clock\n");
- err = PTR_ERR(dsi->clk_parent);
- goto disable_clk_lp;
+ return PTR_ERR(dsi->clk_parent);
}
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd)) {
dev_err(&pdev->dev, "cannot get VDD supply\n");
- err = PTR_ERR(dsi->vdd);
- goto disable_clk_lp;
- }
-
- err = regulator_enable(dsi->vdd);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable VDD supply\n");
- goto disable_clk_lp;
+ return PTR_ERR(dsi->vdd);
}
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- goto disable_vdd;
+ return err;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs)) {
- err = PTR_ERR(dsi->regs);
- goto disable_vdd;
- }
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
dsi->mipi = tegra_mipi_request(&pdev->dev);
- if (IS_ERR(dsi->mipi)) {
- err = PTR_ERR(dsi->mipi);
- goto disable_vdd;
- }
+ if (IS_ERR(dsi->mipi))
+ return PTR_ERR(dsi->mipi);
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
goto mipi_free;
}
+ platform_set_drvdata(pdev, dsi);
+ pm_runtime_enable(&pdev->dev);
+
INIT_LIST_HEAD(&dsi->client.list);
dsi->client.ops = &dsi_client_ops;
dsi->client.dev = &pdev->dev;
goto unregister;
}
- platform_set_drvdata(pdev, dsi);
-
return 0;
unregister:
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
-disable_vdd:
- regulator_disable(dsi->vdd);
-disable_clk_lp:
- clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
- clk_disable_unprepare(dsi->clk);
-reset:
- reset_control_assert(dsi->rst);
return err;
}
struct tegra_dsi *dsi = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&dsi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
mipi_dsi_host_unregister(&dsi->host);
tegra_mipi_free(dsi->mipi);
- regulator_disable(dsi->vdd);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dsi_suspend(struct device *dev)
+{
+ struct tegra_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
- reset_control_assert(dsi->rst);
+
+ regulator_disable(dsi->vdd);
return 0;
}
+static int tegra_dsi_resume(struct device *dev)
+{
+ struct tegra_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
+
+ return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+ return err;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
+};
+
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
+ .pm = &tegra_dsi_pm_ops,
},
.probe = tegra_dsi_probe,
.remove = tegra_dsi_remove,
struct tegra_bo *bo = fb->planes[i];
if (bo) {
- if (bo->pages && bo->vaddr)
+ if (bo->pages)
vunmap(bo->vaddr);
drm_gem_object_unreference_unlocked(&bo->gem);
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <sound/hda_verbs.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
+#define HDMI_ELD_BUFFER_SIZE 96
+
struct tmds_config {
unsigned int pclk;
u32 pll0;
u32 fuse_override_value;
bool has_sor_io_peak_current;
+ bool has_hda;
+ bool has_hbr;
};
struct tegra_hdmi {
const struct tegra_hdmi_config *config;
unsigned int audio_source;
- unsigned int audio_freq;
+ unsigned int audio_sample_rate;
+ unsigned int audio_channels;
+
+ unsigned int pixel_clock;
bool stereo;
bool dvi;
};
static const struct tegra_hdmi_audio_config *
-tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
{
const struct tegra_hdmi_audio_config *table;
- switch (audio_freq) {
+ switch (sample_rate) {
case 32000:
table = tegra_hdmi_audio_32k;
break;
}
}
-static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
+{
+ static const struct {
+ unsigned int sample_rate;
+ unsigned int offset;
+ } regs[] = {
+ { 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
+ { 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
+ { 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
+ { 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
+ { 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
+ { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
+ { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i].sample_rate == hdmi->audio_sample_rate) {
+ tegra_hdmi_writel(hdmi, value, regs[i].offset);
+ break;
+ }
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
{
- struct device_node *node = hdmi->dev->of_node;
const struct tegra_hdmi_audio_config *config;
- unsigned int offset = 0;
- u32 value;
+ u32 source, value;
switch (hdmi->audio_source) {
case HDA:
- value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ else
+ return -EINVAL;
+
break;
case SPDIF:
- value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
break;
default:
- value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
break;
}
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
- AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
- } else {
- value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+ /*
+ * Tegra30 and later use a slightly modified version of the register
+ * layout to accomodate for changes related to supporting HDA as the
+ * audio input source for HDMI. The source select field has moved to
+ * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
+ * per block fields remain in the AUDIO_CNTRL0 register.
+ */
+ if (hdmi->config->has_hda) {
+ /*
+ * Inject null samples into the audio FIFO for every frame in
+ * which the codec did not receive any samples. This applies
+ * to stereo LPCM only.
+ *
+ * XXX: This seems to be a remnant of MCP days when this was
+ * used to work around issues with monitors not being able to
+ * play back system startup sounds early. It is possibly not
+ * needed on Linux at all.
+ */
+ if (hdmi->audio_channels == 2)
+ value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
+ else
+ value = 0;
+
+ value |= source;
+
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ }
- value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
- AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ /*
+ * On Tegra20, HDA is not a supported audio source and the source
+ * select field is part of the AUDIO_CNTRL0 register.
+ */
+ value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6);
+
+ if (!hdmi->config->has_hda)
+ value |= source;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+ /*
+ * Advertise support for High Bit-Rate on Tegra114 and later.
+ */
+ if (hdmi->config->has_hbr) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
}
- config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+ config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
+ hdmi->pixel_clock);
if (!config) {
- dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
- hdmi->audio_freq, pclk);
+ dev_err(hdmi->dev,
+ "cannot set audio to %u Hz at %u Hz pixel clock\n",
+ hdmi->audio_sample_rate, hdmi->pixel_clock);
return -EINVAL;
}
tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
- value = ACR_SUBPACK_CTS(config->cts);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
value &= ~AUDIO_N_RESETF;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- switch (hdmi->audio_freq) {
- case 32000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
- break;
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_aval(hdmi, config->aval);
- case 44100:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
- break;
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
- case 48000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
- break;
+ return 0;
+}
- case 88200:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
- break;
+static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- case 96000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
- break;
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
- case 176400:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
- break;
+static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- case 192000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
- break;
- }
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
- tegra_hdmi_writel(hdmi, config->aval, offset);
- }
+static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
+{
+ size_t length = drm_eld_size(hdmi->output.connector.eld), i;
+ u32 value;
- tegra_hdmi_setup_audio_fs_tables(hdmi);
+ for (i = 0; i < length; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
- return 0;
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | 0,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+ value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
}
static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
u8 buffer[17];
ssize_t err;
- if (hdmi->dvi) {
- tegra_hdmi_writel(hdmi, 0,
- HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
- return;
- }
-
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
- HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
u8 buffer[14];
ssize_t err;
- if (hdmi->dvi) {
- tegra_hdmi_writel(hdmi, 0,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
- return;
- }
-
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
return;
}
- frame.channels = 2;
+ frame.channels = hdmi->audio_channels;
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
* bytes can be programmed.
*/
tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
+}
- tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
struct hdmi_vendor_infoframe frame;
u8 buffer[10];
ssize_t err;
- u32 value;
-
- if (!hdmi->stereo) {
- value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- value &= ~GENERIC_CTRL_ENABLE;
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- return;
- }
hdmi_vendor_infoframe_init(&frame);
frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
return drm_detect_hdmi_monitor(edid);
}
+static enum drm_connector_status
+tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ enum drm_connector_status status;
+
+ status = tegra_output_connector_detect(connector, force);
+ if (status == connector_status_connected)
+ return status;
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ return status;
+}
+
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = tegra_output_connector_detect,
+ .detect = tegra_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
+ struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
/*
tegra_dc_commit(dc);
}
+
+ if (!hdmi->dvi) {
+ if (hdmi->stereo)
+ tegra_hdmi_disable_stereo_infoframe(hdmi);
+
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_avi_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+
+ pm_runtime_put(hdmi->dev);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct device_node *node = output->dev->of_node;
struct tegra_hdmi *hdmi = to_hdmi(output);
- unsigned int pulse_start, div82, pclk;
+ unsigned int pulse_start, div82;
int retries = 1000;
u32 value;
int err;
- hdmi->dvi = !tegra_output_is_hdmi(output);
+ pm_runtime_get_sync(hdmi->dev);
- pclk = mode->clock * 1000;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI driver.
+ */
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
+
+ hdmi->pixel_clock = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = clk_set_rate(hdmi->clk, pclk);
+ err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+ hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) {
- err = tegra_hdmi_setup_audio(hdmi, pclk);
+ err = tegra_hdmi_setup_audio(hdmi);
if (err < 0)
hdmi->dvi = true;
}
- if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
- /*
- * TODO: add ELD support
- */
- }
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_eld(hdmi);
rekey = HDMI_REKEY_DEFAULT;
value = HDMI_CTRL_REKEY(rekey);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
- if (hdmi->dvi)
- tegra_hdmi_writel(hdmi, 0x0,
- HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- else
- tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
- HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ if (!hdmi->dvi) {
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_setup_avi_infoframe(hdmi, mode);
- tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_setup_stereo_infoframe(hdmi);
+ if (hdmi->stereo)
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+ }
/* TMDS CONFIG */
for (i = 0; i < hdmi->config->num_tmds; i++) {
- if (pclk <= hdmi->config->tmds[i].pclk) {
+ if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
break;
}
tegra_dc_commit(dc);
+ if (!hdmi->dvi) {
+ tegra_hdmi_enable_avi_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+
+ if (hdmi->stereo)
+ tegra_hdmi_enable_stereo_infoframe(hdmi);
+ }
+
/* TODO: add HDCP support */
}
DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_INT_MASK);
+ DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
#undef DUMP_REG
return err;
}
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- reset_control_deassert(hdmi->rst);
-
return 0;
}
tegra_output_exit(&hdmi->output);
- reset_control_assert(hdmi->rst);
- clk_disable_unprepare(hdmi->clk);
-
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
regulator_disable(hdmi->hdmi);
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
+ .has_hda = false,
+ .has_hbr = false,
};
static const struct tegra_hdmi_config tegra30_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
+ .has_hda = true,
+ .has_hbr = false,
};
static const struct tegra_hdmi_config tegra114_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
};
static const struct tegra_hdmi_config tegra124_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
};
static const struct of_device_id tegra_hdmi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
+static void hda_format_parse(unsigned int format, unsigned int *rate,
+ unsigned int *channels)
+{
+ unsigned int mul, div;
+
+ if (format & AC_FMT_BASE_44K)
+ *rate = 44100;
+ else
+ *rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ *rate = *rate * (mul + 1) / (div + 1);
+
+ *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ u32 value;
+ int err;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
+
+ if (value & INT_CODEC_SCRATCH0) {
+ unsigned int format;
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int sample_rate, channels;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ hda_format_parse(format, &sample_rate, &channels);
+
+ hdmi->audio_sample_rate = sample_rate;
+ hdmi->audio_channels = channels;
+
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0) {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ } else {
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+ }
+ } else {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static int tegra_hdmi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
hdmi->config = match->data;
hdmi->dev = &pdev->dev;
+
hdmi->audio_source = AUTO;
- hdmi->audio_freq = 44100;
+ hdmi->audio_sample_rate = 48000;
+ hdmi->audio_channels = 2;
hdmi->stereo = false;
hdmi->dvi = false;
hdmi->irq = err;
+ err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
+ dev_name(hdmi->dev), hdmi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hdmi->irq, err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+ pm_runtime_enable(&pdev->dev);
+
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
hdmi->client.dev = &pdev->dev;
return err;
}
- platform_set_drvdata(pdev, hdmi);
-
return 0;
}
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
tegra_output_remove(&hdmi->output);
- clk_disable_unprepare(hdmi->clk_parent);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_hdmi_suspend(struct device *dev)
+{
+ struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
clk_disable_unprepare(hdmi->clk);
return 0;
}
+static int tegra_hdmi_resume(struct device *dev)
+{
+ struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ clk_disable_unprepare(hdmi->clk);
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
+};
+
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
- .owner = THIS_MODULE,
.of_match_table = tegra_hdmi_of_match,
+ .pm = &tegra_hdmi_pm_ops,
},
.probe = tegra_hdmi_probe,
.remove = tegra_hdmi_remove,
#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
-#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define SOR_AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0 0xae
+#define SOR_AUDIO_SPARE0_HBR_ENABLE (1 << 27)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0 0xba
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1 0xbb
#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+#define SOR_AUDIO_HDA_PRESENSE_VALID (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PRESENT (1 << 0)
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+#define HDMI_NV_PDISP_INT_STATUS 0xcc
+#define INT_SCRATCH (1 << 3)
+#define INT_CP_REQUEST (1 << 2)
+#define INT_CODEC_SCRATCH1 (1 << 1)
+#define INT_CODEC_SCRATCH0 (1 << 0)
+#define HDMI_NV_PDISP_INT_MASK 0xcd
+#define HDMI_NV_PDISP_INT_ENABLE 0xce
+
#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
if (edid) {
err = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
}
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
const struct tegra_sor_hdmi_settings *settings;
unsigned int num_settings;
+
+ const u8 *xbar_cfg;
};
struct tegra_sor;
struct reset_control *rst;
struct clk *clk_parent;
+ struct clk *clk_brick;
struct clk *clk_safe;
+ struct clk *clk_src;
struct clk *clk_dp;
struct clk *clk;
struct regulator *hdmi_supply;
};
+struct tegra_sor_state {
+ struct drm_connector_state base;
+
+ unsigned int bpc;
+};
+
+static inline struct tegra_sor_state *
+to_sor_state(struct drm_connector_state *state)
+{
+ return container_of(state, struct tegra_sor_state, base);
+}
+
struct tegra_sor_config {
u32 bits_per_pixel;
writel(value, sor->regs + (offset << 2));
}
+static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
+{
+ int err;
+
+ clk_disable_unprepare(sor->clk);
+
+ err = clk_set_parent(sor->clk, parent);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+struct tegra_clk_sor_brick {
+ struct clk_hw hw;
+ struct tegra_sor *sor;
+};
+
+static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_sor_brick, hw);
+}
+
+static const char * const tegra_clk_sor_brick_parents[] = {
+ "pll_d2_out0", "pll_dp"
+};
+
+static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_sor_brick *brick = to_brick(hw);
+ struct tegra_sor *sor = brick->sor;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ switch (index) {
+ case 0:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ break;
+
+ case 1:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ return 0;
+}
+
+static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_sor_brick *brick = to_brick(hw);
+ struct tegra_sor *sor = brick->sor;
+ u8 parent = U8_MAX;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+
+ switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
+ parent = 0;
+ break;
+
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
+ parent = 1;
+ break;
+ }
+
+ return parent;
+}
+
+static const struct clk_ops tegra_clk_sor_brick_ops = {
+ .set_parent = tegra_clk_sor_brick_set_parent,
+ .get_parent = tegra_clk_sor_brick_get_parent,
+};
+
+static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
+ const char *name)
+{
+ struct tegra_clk_sor_brick *brick;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
+ if (!brick)
+ return ERR_PTR(-ENOMEM);
+
+ brick->sor = sor;
+
+ init.name = name;
+ init.flags = 0;
+ init.parent_names = tegra_clk_sor_brick_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
+ init.ops = &tegra_clk_sor_brick_ops;
+
+ brick->hw.init = &init;
+
+ clk = devm_clk_register(sor->dev, &brick->hw);
+ if (IS_ERR(clk))
+ kfree(brick);
+
+ return clk;
+}
+
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
{
return false;
}
-static int tegra_sor_calc_config(struct tegra_sor *sor,
- const struct drm_display_mode *mode,
- struct tegra_sor_config *config,
- struct drm_dp_link *link)
+static int tegra_sor_compute_config(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_config *config,
+ struct drm_dp_link *link)
{
const u64 f = 100000, link_rate = link->rate * 1000;
const u64 pclk = mode->clock * 1000;
return 0;
}
+static void tegra_sor_apply_config(struct tegra_sor *sor,
+ const struct tegra_sor_config *config)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+ value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
+ value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+ value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
+
+ if (config->active_polarity)
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+ else
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+ value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+ value |= config->hblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+ value |= config->vblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+}
+
+static void tegra_sor_mode_set(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+ unsigned int vbe, vse, hbe, hse, vbs, hbs;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+ value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
+ switch (state->bpc) {
+ case 16:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
+ break;
+
+ case 12:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
+ break;
+
+ case 10:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
+ break;
+
+ case 8:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 6:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+ /* sync end = sync width - 1 */
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+ /* blank end = sync end + back porch */
+ vbe = vse + (mode->vtotal - mode->vsync_end);
+ hbe = hse + (mode->htotal - mode->hsync_end);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+ /* blank start = blank end + active */
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ /* XXX interlacing support */
+ tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
+}
+
static int tegra_sor_detach(struct tegra_sor *sor)
{
unsigned long value, timeout;
if ((value & SOR_PWR_TRIGGER) != 0)
return -ETIMEDOUT;
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
sor->debugfs = NULL;
}
+static void tegra_sor_connector_reset(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ if (connector->state) {
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ kfree(connector->state);
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
static enum drm_connector_status
tegra_sor_connector_detect(struct drm_connector *connector, bool force)
{
return tegra_output_connector_detect(connector, force);
}
+static struct drm_connector_state *
+tegra_sor_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state = to_sor_state(connector->state);
+ struct tegra_sor_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, ©->base);
+
+ return ©->base;
+}
+
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = tegra_sor_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
tegra_sor_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ /* HDMI 2.0 modes are not yet supported */
+ if (mode->clock > 340000)
+ return MODE_NOCLOCK;
+
return MODE_OK;
}
if (output->panel)
drm_panel_unprepare(output->panel);
- reset_control_assert(sor->rst);
- clk_disable_unprepare(sor->clk);
+ pm_runtime_put(sor->dev);
}
#if 0
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
+ struct tegra_sor_state *state;
struct drm_dp_link link;
u8 rate, lanes;
+ unsigned int i;
int err = 0;
u32 value;
- err = clk_prepare_enable(sor->clk);
- if (err < 0)
- dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ state = to_sor_state(output->connector.state);
- reset_control_deassert(sor->rst);
+ pm_runtime_get_sync(sor->dev);
if (output->panel)
drm_panel_prepare(output->panel);
return;
}
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
- config.bits_per_pixel = output->connector.display_info.bpc * 3;
+ config.bits_per_pixel = state->bpc * 3;
- err = tegra_sor_calc_config(sor, mode, &config, &link);
+ err = tegra_sor_compute_config(sor, mode, &config, &link);
if (err < 0)
- dev_err(sor->dev, "failed to compute link configuration: %d\n",
- err);
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, SOR_PLL2);
- /* switch to DP clock */
- err = clk_set_parent(sor->clk, sor->clk_dp);
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /* switch to DP parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
if (err < 0)
- dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to set parent clock: %d\n", err);
/* power DP lanes */
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- /* set linkctl */
+ tegra_sor_apply_config(sor, &config);
+
+ /* enable link */
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
-
- value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
- value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
-
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
tegra_sor_writel(sor, value, SOR_DP_TPG);
- value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
- value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
- value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
-
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
-
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
-
- if (config.active_polarity)
- value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
- else
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-
- value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
- value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
- tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
-
- value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
- value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
- value |= config.hblank_symbols & 0xffff;
- tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-
- value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
- value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
- value |= config.vblank_symbols & 0xffff;
- tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-
/* enable pad calibration logic */
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- /*
- * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
- * raster, associate with display controller)
- */
- value = SOR_STATE_ASY_PROTOCOL_DP_A |
- SOR_STATE_ASY_CRC_MODE_COMPLETE |
- SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- value &= ~SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- value |= SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- value &= ~SOR_STATE_ASY_VSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- value |= SOR_STATE_ASY_VSYNCPOL;
-
- switch (config.bits_per_pixel) {
- case 24:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
- break;
-
- case 18:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
- break;
-
- default:
- BUG();
- break;
- }
-
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- /*
- * TODO: The video timing programming below doesn't seem to match the
- * register definitions.
- */
-
- value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
- vse = mode->vsync_end - mode->vsync_start - 1;
- hse = mode->hsync_end - mode->hsync_start - 1;
-
- value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
- vbe = vse + (mode->vsync_start - mode->vdisplay);
- hbe = hse + (mode->hsync_start - mode->hdisplay);
-
- value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
- vbs = vbe + mode->vdisplay;
- hbs = hbe + mode->hdisplay;
-
- value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
- tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
-
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ tegra_sor_mode_set(sor, mode, state);
+
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
if (err < 0)
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_sor_state *state = to_sor_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
+ struct drm_display_info *info;
int err;
+ info = &output->connector.display_info;
+
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
if (err < 0) {
return err;
}
+ switch (info->bpc) {
+ case 8:
+ case 6:
+ state->bpc = info->bpc;
+ break;
+
+ default:
+ DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
+ state->bpc = 8;
+ break;
+ }
+
return 0;
}
if (err < 0)
dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
- reset_control_assert(sor->rst);
- usleep_range(1000, 2000);
- clk_disable_unprepare(sor->clk);
+ pm_runtime_put(sor->dev);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
struct tegra_output *output = encoder_to_output(encoder);
unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
struct tegra_sor_hdmi_settings *settings;
struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_state *state;
struct drm_display_mode *mode;
- struct drm_display_info *info;
+ unsigned int div, i;
u32 value;
int err;
+ state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
- info = &output->connector.display_info;
- err = clk_prepare_enable(sor->clk);
- if (err < 0)
- dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ pm_runtime_get_sync(sor->dev);
- usleep_range(1000, 2000);
-
- reset_control_deassert(sor->rst);
-
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
tegra_sor_writel(sor, value, SOR_REFCLK);
- /* XXX don't hardcode */
- value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
- SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
- SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
- SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
- SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
- SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
- SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
- SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
- SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
- SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /* switch to parent clock */
+ err = clk_set_parent(sor->clk_src, sor->clk_parent);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set source clock: %d\n", err);
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_src);
if (err < 0)
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
value &= ~DITHER_CONTROL_MASK;
value &= ~BASE_COLOR_SIZE_MASK;
- switch (info->bpc) {
+ switch (state->bpc) {
case 6:
value |= BASE_COLOR_SIZE_666;
break;
break;
default:
- WARN(1, "%u bits-per-color not supported\n", info->bpc);
+ WARN(1, "%u bits-per-color not supported\n", state->bpc);
+ value |= BASE_COLOR_SIZE_888;
break;
}
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- /* configure mode */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
- value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
- value &= ~SOR_STATE_ASY_OWNER_MASK;
-
- value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
- SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- value &= ~SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- value |= SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- value &= ~SOR_STATE_ASY_VSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- value |= SOR_STATE_ASY_VSYNCPOL;
-
- switch (info->bpc) {
- case 8:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
- break;
-
- case 6:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
- break;
-
- default:
- BUG();
- break;
- }
-
- tegra_sor_writel(sor, value, SOR_STATE1);
-
+ /* configure dynamic range of output */
value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+ /* configure colorspace */
value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
- /*
- * TODO: The video timing programming below doesn't seem to match the
- * register definitions.
- */
-
- value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
- /* sync end = sync width - 1 */
- vse = mode->vsync_end - mode->vsync_start - 1;
- hse = mode->hsync_end - mode->hsync_start - 1;
-
- value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
- /* blank end = sync end + back porch */
- vbe = vse + (mode->vtotal - mode->vsync_end);
- hbe = hse + (mode->htotal - mode->hsync_end);
-
- value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
- /* blank start = blank end + active */
- vbs = vbe + mode->vdisplay;
- hbs = hbe + mode->hdisplay;
-
- value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
- tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+ tegra_sor_mode_set(sor, mode, state);
tegra_sor_update(sor);
* XXX: Remove this reset once proper hand-over from firmware to
* kernel is possible.
*/
- err = reset_control_assert(sor->rst);
- if (err < 0) {
- dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
- return err;
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+ err);
+ return err;
+ }
}
err = clk_prepare_enable(sor->clk);
usleep_range(1000, 3000);
- err = reset_control_deassert(sor->rst);
- if (err < 0) {
- dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
- return err;
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+ err);
+ return err;
+ }
}
err = clk_prepare_enable(sor->clk_safe);
.remove = tegra_sor_hdmi_remove,
};
+static const u8 tegra124_sor_xbar_cfg[5] = {
+ 0, 1, 2, 3, 4
+};
+
static const struct tegra_sor_soc tegra124_sor = {
.supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
};
static const struct tegra_sor_soc tegra210_sor = {
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
};
static const struct tegra_sor_soc tegra210_sor1 = {
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
+
+ .xbar_cfg = tegra210_sor_xbar_cfg,
};
static const struct of_device_id tegra_sor_of_match[] = {
goto remove;
}
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst)) {
- err = PTR_ERR(sor->rst);
- dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
- goto remove;
+ if (!pdev->dev.pm_domain) {
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+ dev_err(&pdev->dev, "failed to get reset control: %d\n",
+ err);
+ goto remove;
+ }
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
goto remove;
}
+ if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
+ sor->clk_src = devm_clk_get(&pdev->dev, "source");
+ if (IS_ERR(sor->clk_src)) {
+ err = PTR_ERR(sor->clk_src);
+ dev_err(sor->dev, "failed to get source clock: %d\n",
+ err);
+ goto remove;
+ }
+ }
+
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(sor->clk_parent)) {
err = PTR_ERR(sor->clk_parent);
goto remove;
}
+ platform_set_drvdata(pdev, sor);
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
+ pm_runtime_put(&pdev->dev);
+
+ if (IS_ERR(sor->clk_brick)) {
+ err = PTR_ERR(sor->clk_brick);
+ dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ goto remove;
+ }
+
INIT_LIST_HEAD(&sor->client.list);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
goto remove;
}
- platform_set_drvdata(pdev, sor);
-
return 0;
remove:
struct tegra_sor *sor = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
return 0;
}
+#ifdef CONFIG_PM
+static int tegra_sor_suspend(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sor->clk);
+
+ return 0;
+}
+
+static int tegra_sor_resume(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_sor_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+};
+
struct platform_driver tegra_sor_driver = {
.driver = {
.name = "tegra-sor",
.of_match_table = tegra_sor_of_match,
+ .pm = &tegra_sor_pm_ops,
},
.probe = tegra_sor_probe,
.remove = tegra_sor_remove,
#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_30_444 (0x6 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_36_444 (0x8 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_48_444 (0x9 << 17)
#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
- drm_handle_vblank(dev, 0);
+ drm_crtc_handle_vblank(crtc);
if (!skip_event) {
struct drm_pending_vblank_event *event;
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
-
- if (bo->ttm)
- ttm_tt_destroy(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
+ fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
out_err:
new_man = &bdev->man[bo->mem.mem_type];
- if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
- ttm_tt_unbind(bo->ttm);
+ if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, NULL);
- if (bo->ttm) {
- ttm_tt_unbind(bo->ttm);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
ttm_bo_mem_put(bo, &bo->mem);
ww_mutex_unlock (&bo->resv->lock);
struct ttm_placement placement;
int ret = 0;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS) {
- pr_err("Failed to expire sync object before buffer eviction\n");
- }
- goto out;
- }
-
lockdep_assert_held(&bo->resv->lock.base);
evict_mem = bo->mem;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait_gpu);
- if (ret) {
+ if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
}
EXPORT_SYMBOL(ttm_bo_mem_put);
+/**
+ * Add the last move fence to the BO and reserve a new shared slot.
+ */
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct fence *fence;
+ int ret;
+
+ spin_lock(&man->move_lock);
+ fence = fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ reservation_object_add_shared_fence(bo->resv, fence);
+
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (unlikely(ret))
+ return ret;
+
+ fence_put(bo->moving);
+ bo->moving = fence;
+ }
+
+ return 0;
+}
+
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
if (unlikely(ret != 0))
return ret;
} while (1);
- if (mem->mm_node == NULL)
- return -ENOMEM;
mem->mem_type = mem_type;
- return 0;
+ return ttm_bo_add_move_fence(bo, man, mem);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
bool has_erestartsys = false;
int i, ret;
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (unlikely(ret))
+ return ret;
+
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
return ret;
-
- if (mem->mm_node)
+
+ if (mem->mm_node) {
+ ret = ttm_bo_add_move_fence(bo, man, mem);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ return ret;
+ }
break;
+ }
}
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
lockdep_assert_held(&bo->resv->lock.base);
- /*
- * Don't wait for the BO on initial allocation. This is important when
- * the BO has an imported reservation object.
- */
- if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
- /*
- * FIXME: It's possible to pipeline buffer moves.
- * Have the driver move function wait for idle when necessary,
- * instead of doing it here.
- */
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
- if (ret)
- return ret;
- }
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
return ret;
}
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
return false;
}
+EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
- bo->priv_flags = 0;
+ bo->moving = NULL;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
+ struct fence *fence;
int ret;
/*
spin_lock(&glob->lru_lock);
}
spin_unlock(&glob->lru_lock);
+
+ spin_lock(&man->move_lock);
+ fence = fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ ret = fence_wait(fence, false);
+ fence_put(fence);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ pr_err("Cleanup eviction failed\n");
+ }
+ }
+ }
+
return 0;
}
mem_type);
return ret;
}
+ fence_put(man->move);
man->use_type = false;
man->has_type = false;
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
+ spin_lock_init(&man->move_lock);
INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
man->size = p_size;
INIT_LIST_HEAD(&man->lru);
+ man->move = NULL;
return 0;
}
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- struct reservation_object_list *fobj;
- struct reservation_object *resv;
- struct fence *excl;
- long timeout = 15 * HZ;
- int i;
-
- resv = bo->resv;
- fobj = reservation_object_get_list(resv);
- excl = reservation_object_get_excl(resv);
- if (excl) {
- if (!fence_is_signaled(excl)) {
- if (no_wait)
- return -EBUSY;
-
- timeout = fence_wait_timeout(excl,
- interruptible, timeout);
- }
- }
-
- for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
- struct fence *fence;
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
-
- if (!fence_is_signaled(fence)) {
- if (no_wait)
- return -EBUSY;
-
- timeout = fence_wait_timeout(fence,
- interruptible, timeout);
- }
- }
+ long timeout = no_wait ? 0 : 15 * HZ;
+ timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
if (timeout == 0)
return -EBUSY;
- reservation_object_add_excl_fence(resv, NULL);
- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ reservation_object_add_excl_fence(bo->resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
ttm_bo_list_ref_sub(bo, put_count, true);
/**
- * Wait for GPU, then move to system cached.
+ * Move to system cached
*/
- ret = ttm_bo_wait(bo, false, false);
-
- if (unlikely(ret != 0))
- goto out;
-
if ((bo->mem.placement & swap_placement) != swap_placement) {
struct ttm_mem_reg evict_mem;
goto out;
}
+ /**
+ * Make sure BO is idle.
+ */
+
+ ret = ttm_bo_wait(bo, false, false);
+ if (unlikely(ret != 0))
+ goto out;
+
ttm_bo_unmap_virtual(bo);
/**
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
unsigned long add = 0;
int dir;
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
- ttm_tt_unbind(ttm);
+ if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ fbo->moving = NULL;
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
bool evict,
- bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
if (ret)
return ret;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
- (bo->ttm != NULL)) {
- ttm_tt_unbind(bo->ttm);
+ if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
* operation has completed.
*/
- set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+ struct fence *fence, bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
+ struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+
+ int ret;
+
+ reservation_object_add_excl_fence(bo->resv, fence);
+
+ if (!evict) {
+ struct ttm_buffer_object *ghost_obj;
+
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
+
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
+
+ reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost_obj);
+ ttm_bo_unref(&ghost_obj);
+
+ } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+
+ /**
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
+ * this eviction and free up the allocation
+ */
+
+ spin_lock(&from->move_lock);
+ if (!from->move || fence_is_later(fence, from->move)) {
+ fence_put(from->move);
+ from->move = fence_get(fence);
+ }
+ spin_unlock(&from->move_lock);
+
+ ttm_bo_free_old_node(bo);
+
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
+
+ } else {
+ /**
+ * Last resort, wait for the move to be completed.
+ *
+ * Should never happen in pratice.
+ */
+
+ ret = ttm_bo_wait(bo, false, false);
+ if (ret)
+ return ret;
+
+ if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_free_old_node(bo);
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_pipeline_move);
{
int ret = 0;
- if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+ if (likely(!bo->moving))
goto out_unlock;
/*
* Quick non-stalling check for idle.
*/
- ret = ttm_bo_wait(bo, false, true);
- if (likely(ret == 0))
- goto out_unlock;
+ if (fence_is_signaled(bo->moving))
+ goto out_clear;
/*
* If possible, avoid waiting for GPU with mmap_sem
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait(bo, true, false);
+ (void) fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = ttm_bo_wait(bo, true, false);
- if (unlikely(ret != 0))
+ ret = fence_wait(bo->moving, true);
+ if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
+ goto out_unlock;
+ }
+
+out_clear:
+ fence_put(bo->moving);
+ bo->moving = NULL;
out_unlock:
return ret;
void ttm_tt_destroy(struct ttm_tt *ttm)
{
- if (unlikely(ttm == NULL))
+ int ret;
+
+ if (ttm == NULL)
return;
if (ttm->state == tt_bound) {
- ttm_tt_unbind(ttm);
+ ret = ttm->func->unbind(ttm);
+ BUG_ON(ret);
+ ttm->state = tt_unbound;
}
if (ttm->state == tt_unbound)
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
-void ttm_tt_unbind(struct ttm_tt *ttm)
-{
- int ret;
-
- if (ttm->state == tt_bound) {
- ret = ttm->func->unbind(ttm);
- BUG_ON(ret);
- ttm->state = tt_unbound;
- }
-}
-
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
- drm_connector_unregister_all(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_unplug_dev(dev);
const struct vc4_crtc_data *data;
void __iomem *regs;
+ /* Timestamp at start of vblank irq - unaffected by lock delays. */
+ ktime_t t_vblank;
+
/* Which HVS channel we're using for our CRTC. */
int channel;
u8 lut_r[256];
u8 lut_g[256];
u8 lut_b[256];
+ /* Size in pixels of the COB memory allocated to this CRTC. */
+ u32 cob_size;
struct drm_pending_vblank_event *event;
};
}
#endif
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ u32 val;
+ int fifo_lines;
+ int vblank_lines;
+ int ret = 0;
+
+ /*
+ * XXX Doesn't work well in interlaced mode yet, partially due
+ * to problems in vc4 kms or drm core interlaced mode handling,
+ * so disable for now in interlaced mode.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return ret;
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ /*
+ * Read vertical scanline which is currently composed for our
+ * pixelvalve by the HVS, and also the scaler status.
+ */
+ val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Vertical position of hvs composed scanline. */
+ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+ /* No hpos info available. */
+ if (hpos)
+ *hpos = 0;
+
+ /* This is the offset we need for translating hvs -> pv scanout pos. */
+ fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
+
+ if (fifo_lines > 0)
+ ret |= DRM_SCANOUTPOS_VALID;
+
+ /* HVS more than fifo_lines into frame for compositing? */
+ if (*vpos > fifo_lines) {
+ /*
+ * We are in active scanout and can get some meaningful results
+ * from HVS. The actual PV scanout can not trail behind more
+ * than fifo_lines as that is the fifo's capacity. Assume that
+ * in active scanout the HVS and PV work in lockstep wrt. HVS
+ * refilling the fifo and PV consuming from the fifo, ie.
+ * whenever the PV consumes and frees up a scanline in the
+ * fifo, the HVS will immediately refill it, therefore
+ * incrementing vpos. Therefore we choose HVS read position -
+ * fifo size in scanlines as a estimate of the real scanout
+ * position of the PV.
+ */
+ *vpos -= fifo_lines + 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ *vpos /= 2;
+
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ return ret;
+ }
+
+ /*
+ * Less: This happens when we are in vblank and the HVS, after getting
+ * the VSTART restart signal from the PV, just started refilling its
+ * fifo with new lines from the top-most lines of the new framebuffers.
+ * The PV does not scan out in vblank, so does not remove lines from
+ * the fifo, so the fifo will be full quickly and the HVS has to pause.
+ * We can't get meaningful readings wrt. scanline position of the PV
+ * and need to make things up in a approximative but consistent way.
+ */
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+
+ if (flags & DRM_CALLED_FROM_VBLIRQ) {
+ /*
+ * Assume the irq handler got called close to first
+ * line of vblank, so PV has about a full vblank
+ * scanlines to go, and as a base timestamp use the
+ * one taken at entry into vblank irq handler, so it
+ * is not affected by random delays due to lock
+ * contention on event_lock or vblank_time lock in
+ * the core.
+ */
+ *vpos = -vblank_lines;
+
+ if (stime)
+ *stime = vc4_crtc->t_vblank;
+ if (etime)
+ *etime = vc4_crtc->t_vblank;
+
+ /*
+ * If the HVS fifo is not yet full then we know for certain
+ * we are at the very beginning of vblank, as the hvs just
+ * started refilling, and the stime and etime timestamps
+ * truly correspond to start of vblank.
+ */
+ if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL)
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ } else {
+ /*
+ * No clue where we are inside vblank. Return a vpos of zero,
+ * which will cause calling code to just return the etime
+ * timestamp uncorrected. At least this is no worse than the
+ * standard fallback.
+ */
+ *vpos = 0;
+ }
+
+ return ret;
+}
+
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ struct drm_crtc *crtc = &vc4_crtc->base;
+ struct drm_crtc_state *state = crtc->state;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error,
+ vblank_time, flags,
+ &state->adjusted_mode);
+}
+
static void vc4_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
- vc4_hvs_dump_state(dev);
- }
-
if (crtc->state->event) {
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
vc4_crtc->event = crtc->state->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
}
}
{
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ u32 chan = vc4_crtc->channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (vc4_crtc->event) {
+ if (vc4_crtc->event &&
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
+ vc4_crtc->t_vblank = ktime_get();
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
drm_crtc_handle_vblank(&vc4_crtc->base);
vc4_crtc_handle_page_flip(vc4_crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ drm_crtc_vblank_put(crtc);
drm_framebuffer_unreference(flip_state->fb);
kfree(flip_state);
return ret;
}
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
/* Immediately update the plane's legacy fb pointer, so that later
* modeset prep sees the state that will be present when the semaphore
* is released.
}
}
+static void
+vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
+{
+ struct drm_device *drm = vc4_crtc->base.dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
+ /* Top/base are supposed to be 4-pixel aligned, but the
+ * Raspberry Pi firmware fills the low bits (which are
+ * presumably ignored).
+ */
+ u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ vc4_crtc->cob_size = top - base + 4;
+}
+
static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
crtc->cursor = cursor_plane;
}
+ vc4_crtc_get_cob_allocation(vc4_crtc);
+
CRTC_WRITE(PV_INTEN, 0);
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
{
struct drm_connector *connector = NULL;
struct vc4_dpi_connector *dpi_connector;
- int ret = 0;
dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
GFP_KERNEL);
- if (!dpi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!dpi_connector)
+ return ERR_PTR(-ENOMEM);
+
connector = &dpi_connector->base;
dpi_connector->encoder = dpi->encoder;
drm_mode_connector_attach_encoder(connector, dpi->encoder);
return connector;
-
- fail:
- if (connector)
- vc4_dpi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "drm_fb_cma_helper.h"
#include "uapi/drm/vc4_drm.h"
return map;
}
+static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_get_param *args = data;
+ int ret;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ switch (args->param) {
+ case DRM_VC4_PARAM_V3D_IDENT0:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT0);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT1:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT1);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT2:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT2);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ args->value = true;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void vc4_lastclose(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->fbdev)
- drm_fbdev_cma_restore_mode(vc4->fbdev);
+ drm_fbdev_cma_restore_mode(vc4->fbdev);
}
static const struct file_operations vc4_drm_fops = {
};
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .get_scanout_position = vc4_crtc_get_scanoutpos,
+ .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
vc4_bo_cache_init(drm);
drm_mode_config_init(drm);
- if (ret)
- goto unref;
vc4_gem_init(drm);
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
-unref:
drm_dev_unref(drm);
vc4_bo_cache_destroy(drm);
return ret;
static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
&vc4_dpi_driver,
- &vc4_crtc_driver,
&vc4_hvs_driver,
+ &vc4_crtc_driver,
&vc4_v3d_driver,
};
uint32_t uniforms_src_size;
uint32_t num_texture_samples;
struct vc4_texture_sample_info *texture_samples;
+
+ uint32_t num_uniform_addr_offsets;
+ uint32_t *uniform_addr_offsets;
};
/**
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags);
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
if (IS_ERR(hdmi->hd_regs))
return PTR_ERR(hdmi->hd_regs);
- ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
- if (!ddc_node) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
- return -ENODEV;
- }
-
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
DRM_ERROR("Failed to get pixel clock\n");
return PTR_ERR(hdmi->hsm_clock);
}
+ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (!ddc_node) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ return -ENODEV;
+ }
+
hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
if (!hdmi->ddc) {
DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->fbdev)
- drm_fbdev_cma_hotplug_event(vc4->fbdev);
+ drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
struct vc4_commit {
return -ENOMEM;
/* Make sure that any outstanding modesets have finished. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
+ if (nonblock) {
+ ret = down_trylock(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return -EBUSY;
+ }
+ } else {
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
+ }
}
ret = drm_atomic_helper_prepare_planes(dev, state);
.drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
},
+ {
+ .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
+ },
+ {
+ .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
+ },
{
.drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
QPU_R_ELEM_QPU = 38,
QPU_R_NOP,
QPU_R_XY_PIXEL_COORD = 41,
- QPU_R_MS_REV_FLAGS = 41,
+ QPU_R_MS_REV_FLAGS = 42,
QPU_R_VPM = 48,
QPU_R_VPM_LD_BUSY,
QPU_R_VPM_LD_WAIT,
#define QPU_COND_MUL_SHIFT 46
#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
+#define QPU_BRANCH_COND_SHIFT 52
+#define QPU_BRANCH_COND_MASK QPU_MASK(55, 52)
+
+#define QPU_BRANCH_REL ((uint64_t)1 << 51)
+#define QPU_BRANCH_REG ((uint64_t)1 << 50)
+
+#define QPU_BRANCH_RADDR_A_SHIFT 45
+#define QPU_BRANCH_RADDR_A_MASK QPU_MASK(49, 45)
+
#define QPU_SF ((uint64_t)1 << 45)
#define QPU_WADDR_ADD_SHIFT 38
#define QPU_OP_ADD_SHIFT 24
#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
+#define QPU_LOAD_IMM_SHIFT 0
+#define QPU_LOAD_IMM_MASK QPU_MASK(31, 0)
+
+#define QPU_BRANCH_TARGET_SHIFT 0
+#define QPU_BRANCH_TARGET_MASK QPU_MASK(31, 0)
+
#endif /* VC4_QPU_DEFINES_H */
#define SCALER_DISPLACT0 0x00000030
#define SCALER_DISPLACT1 0x00000034
#define SCALER_DISPLACT2 0x00000038
+#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
+ (x) * (SCALER_DISPLACT1 - \
+ SCALER_DISPLACT0))
+
#define SCALER_DISPCTRL0 0x00000040
# define SCALER_DISPCTRLX_ENABLE BIT(31)
# define SCALER_DISPCTRLX_RESET BIT(30)
# define SCALER_DISPBKGND_FILL BIT(24)
#define SCALER_DISPSTAT0 0x00000048
-#define SCALER_DISPBASE0 0x0000004c
# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30)
# define SCALER_DISPSTATX_MODE_SHIFT 30
# define SCALER_DISPSTATX_MODE_DISABLED 0
# define SCALER_DISPSTATX_MODE_EOF 3
# define SCALER_DISPSTATX_FULL BIT(29)
# define SCALER_DISPSTATX_EMPTY BIT(28)
+# define SCALER_DISPSTATX_FRAME_COUNT_MASK VC4_MASK(17, 12)
+# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT 12
+# define SCALER_DISPSTATX_LINE_MASK VC4_MASK(11, 0)
+# define SCALER_DISPSTATX_LINE_SHIFT 0
+
+#define SCALER_DISPBASE0 0x0000004c
+/* Last pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel. Must be 4-pixel aligned (and thus 4 pixels less than the
+ * next COB base).
+ */
+# define SCALER_DISPBASEX_TOP_MASK VC4_MASK(31, 16)
+# define SCALER_DISPBASEX_TOP_SHIFT 16
+/* First pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel. Must be 4-pixel aligned.
+ */
+# define SCALER_DISPBASEX_BASE_MASK VC4_MASK(15, 0)
+# define SCALER_DISPBASEX_BASE_SHIFT 0
+
#define SCALER_DISPCTRL1 0x00000050
#define SCALER_DISPBKGND1 0x00000054
#define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \
(x) * (SCALER_DISPSTAT1 - \
SCALER_DISPSTAT0))
#define SCALER_DISPBASE1 0x0000005c
+#define SCALER_DISPBASEX(x) (SCALER_DISPBASE0 + \
+ (x) * (SCALER_DISPBASE1 - \
+ SCALER_DISPBASE0))
#define SCALER_DISPCTRL2 0x00000060
#define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \
(x) * (SCALER_DISPCTRL1 - \
uint32_t src_offset = *(uint32_t *)(pkt_u + o);
uint32_t *texture_handles_u;
void *uniform_data_u;
- uint32_t tex;
+ uint32_t tex, uni;
*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
}
}
+ /* Fill in the uniform slots that need this shader's
+ * start-of-uniforms address (used for resetting the uniform
+ * stream in the presence of control flow).
+ */
+ for (uni = 0;
+ uni < validated_shader->num_uniform_addr_offsets;
+ uni++) {
+ uint32_t o = validated_shader->uniform_addr_offsets[uni];
+ ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
+ }
+
*(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
exec->uniforms_u += validated_shader->uniforms_src_size;
#include "vc4_drv.h"
#include "vc4_qpu_defines.h"
+#define LIVE_REG_COUNT (32 + 32 + 4)
+
struct vc4_shader_validation_state {
+ /* Current IP being validated. */
+ uint32_t ip;
+
+ /* IP at the end of the BO, do not read shader[max_ip] */
+ uint32_t max_ip;
+
+ uint64_t *shader;
+
struct vc4_texture_sample_info tmu_setup[2];
int tmu_write_count[2];
*
* This is used for the validation of direct address memory reads.
*/
- uint32_t live_min_clamp_offsets[32 + 32 + 4];
- bool live_max_clamp_regs[32 + 32 + 4];
+ uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
+ bool live_max_clamp_regs[LIVE_REG_COUNT];
+ uint32_t live_immediates[LIVE_REG_COUNT];
+
+ /* Bitfield of which IPs are used as branch targets.
+ *
+ * Used for validation that the uniform stream is updated at the right
+ * points and clearing the texturing/clamping state.
+ */
+ unsigned long *branch_targets;
+
+ /* Set when entering a basic block, and cleared when the uniform
+ * address update is found. This is used to make sure that we don't
+ * read uniforms when the address is undefined.
+ */
+ bool needs_uniform_address_update;
+
+ /* Set when we find a backwards branch. If the branch is backwards,
+ * the taraget is probably doing an address reset to read uniforms,
+ * and so we need to be sure that a uniforms address is present in the
+ * stream, even if the shader didn't need to read uniforms in later
+ * basic blocks.
+ */
+ bool needs_uniform_address_for_loop;
};
static uint32_t
}
static bool
-check_tmu_write(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+check_tmu_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
return false;
}
- /* We assert that the the clamped address is the first
+ /* We assert that the clamped address is the first
* argument, and the UBO base address is the second argument.
* This is arbitrary, but simpler than supporting flipping the
* two either way.
/* Since direct uses a RADDR uniform reference, it will get counted in
* check_instruction_reads()
*/
- if (!is_direct)
+ if (!is_direct) {
+ if (validation_state->needs_uniform_address_update) {
+ DRM_ERROR("Texturing with undefined uniform address\n");
+ return false;
+ }
+
validated_shader->uniforms_size += 4;
+ }
if (submit) {
if (!record_texture_sample(validated_shader,
return true;
}
+static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
+{
+ uint32_t o = validated_shader->num_uniform_addr_offsets;
+ uint32_t num_uniforms = validated_shader->uniforms_size / 4;
+
+ validated_shader->uniform_addr_offsets =
+ krealloc(validated_shader->uniform_addr_offsets,
+ (o + 1) *
+ sizeof(*validated_shader->uniform_addr_offsets),
+ GFP_KERNEL);
+ if (!validated_shader->uniform_addr_offsets)
+ return false;
+
+ validated_shader->uniform_addr_offsets[o] = num_uniforms;
+ validated_shader->num_uniform_addr_offsets++;
+
+ return true;
+}
+
static bool
-check_reg_write(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint64_t inst = validation_state->shader[validation_state->ip];
+ u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ u32 add_lri = raddr_add_a_to_live_reg_index(inst);
+ /* We want our reset to be pointing at whatever uniform follows the
+ * uniforms base address.
+ */
+ u32 expected_offset = validated_shader->uniforms_size + 4;
+
+ /* We only support absolute uniform address changes, and we
+ * require that they be in the current basic block before any
+ * of its uniform reads.
+ *
+ * One could potentially emit more efficient QPU code, by
+ * noticing that (say) an if statement does uniform control
+ * flow for all threads and that the if reads the same number
+ * of uniforms on each side. However, this scheme is easy to
+ * validate so it's all we allow for now.
+ */
+
+ if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+ DRM_ERROR("uniforms address change must be "
+ "normal math\n");
+ return false;
+ }
+
+ if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+ DRM_ERROR("Uniform address reset must be an ADD.\n");
+ return false;
+ }
+
+ if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
+ DRM_ERROR("Uniform address reset must be unconditional.\n");
+ return false;
+ }
+
+ if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
+ !(inst & QPU_PM)) {
+ DRM_ERROR("No packing allowed on uniforms reset\n");
+ return false;
+ }
+
+ if (add_lri == -1) {
+ DRM_ERROR("First argument of uniform address write must be "
+ "an immediate value.\n");
+ return false;
+ }
+
+ if (validation_state->live_immediates[add_lri] != expected_offset) {
+ DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+ validation_state->live_immediates[add_lri],
+ expected_offset);
+ return false;
+ }
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("Second argument of uniform address write must be "
+ "a uniform.\n");
+ return false;
+ }
+
+ validation_state->needs_uniform_address_update = false;
+ validation_state->needs_uniform_address_for_loop = false;
+ return require_uniform_address_uniform(validated_shader);
+}
+
+static bool
+check_reg_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ bool ws = inst & QPU_WS;
+ bool is_b = is_mul ^ ws;
+ u32 lri = waddr_to_live_reg_index(waddr, is_b);
+
+ if (lri != -1) {
+ uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+ uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
+
+ if (sig == QPU_SIG_LOAD_IMM &&
+ QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
+ ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
+ (!is_mul && cond_add == QPU_COND_ALWAYS))) {
+ validation_state->live_immediates[lri] =
+ QPU_GET_FIELD(inst, QPU_LOAD_IMM);
+ } else {
+ validation_state->live_immediates[lri] = ~0;
+ }
+ }
switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS:
- /* XXX: We'll probably need to support this for reladdr, but
- * it's definitely a security-related one.
- */
- DRM_ERROR("uniforms address load unsupported\n");
- return false;
+ if (is_b) {
+ DRM_ERROR("relative uniforms address change "
+ "unsupported\n");
+ return false;
+ }
+
+ return validate_uniform_address_write(validated_shader,
+ validation_state,
+ is_mul);
case QPU_W_TLB_COLOR_MS:
case QPU_W_TLB_COLOR_ALL:
case QPU_W_TMU1_T:
case QPU_W_TMU1_R:
case QPU_W_TMU1_B:
- return check_tmu_write(inst, validated_shader, validation_state,
+ return check_tmu_write(validated_shader, validation_state,
is_mul);
case QPU_W_HOST_INT:
}
static void
-track_live_clamps(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+track_live_clamps(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
}
static bool
-check_instruction_writes(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
bool ok;
return false;
}
- ok = (check_reg_write(inst, validated_shader, validation_state,
- false) &&
- check_reg_write(inst, validated_shader, validation_state,
- true));
+ ok = (check_reg_write(validated_shader, validation_state, false) &&
+ check_reg_write(validated_shader, validation_state, true));
- track_live_clamps(inst, validated_shader, validation_state);
+ track_live_clamps(validated_shader, validation_state);
return ok;
}
static bool
-check_instruction_reads(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader)
+check_branch(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ int ip)
+{
+ int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+
+ if ((int)branch_imm < 0)
+ validation_state->needs_uniform_address_for_loop = true;
+
+ /* We don't want to have to worry about validation of this, and
+ * there's no need for it.
+ */
+ if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
+ DRM_ERROR("branch instruction at %d wrote a register.\n",
+ validation_state->ip);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
* already be OOM.
*/
validated_shader->uniforms_size += 4;
+
+ if (validation_state->needs_uniform_address_update) {
+ DRM_ERROR("Uniform read with undefined uniform "
+ "address\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Make sure that all branches are absolute and point within the shader, and
+ * note their targets for later.
+ */
+static bool
+vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t max_branch_target = 0;
+ bool found_shader_end = false;
+ int ip;
+ int shader_end_ip = 0;
+ int last_branch = -2;
+
+ for (ip = 0; ip < validation_state->max_ip; ip++) {
+ uint64_t inst = validation_state->shader[ip];
+ int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ uint32_t after_delay_ip = ip + 4;
+ uint32_t branch_target_ip;
+
+ if (sig == QPU_SIG_PROG_END) {
+ shader_end_ip = ip;
+ found_shader_end = true;
+ continue;
+ }
+
+ if (sig != QPU_SIG_BRANCH)
+ continue;
+
+ if (ip - last_branch < 4) {
+ DRM_ERROR("Branch at %d during delay slots\n", ip);
+ return false;
+ }
+ last_branch = ip;
+
+ if (inst & QPU_BRANCH_REG) {
+ DRM_ERROR("branching from register relative "
+ "not supported\n");
+ return false;
+ }
+
+ if (!(inst & QPU_BRANCH_REL)) {
+ DRM_ERROR("relative branching required\n");
+ return false;
+ }
+
+ /* The actual branch target is the instruction after the delay
+ * slots, plus whatever byte offset is in the low 32 bits of
+ * the instruction. Make sure we're not branching beyond the
+ * end of the shader object.
+ */
+ if (branch_imm % sizeof(inst) != 0) {
+ DRM_ERROR("branch target not aligned\n");
+ return false;
+ }
+
+ branch_target_ip = after_delay_ip + (branch_imm >> 3);
+ if (branch_target_ip >= validation_state->max_ip) {
+ DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+ ip, branch_target_ip,
+ validation_state->max_ip);
+ return false;
+ }
+ set_bit(branch_target_ip, validation_state->branch_targets);
+
+ /* Make sure that the non-branching path is also not outside
+ * the shader.
+ */
+ if (after_delay_ip >= validation_state->max_ip) {
+ DRM_ERROR("Branch at %d continues past shader end "
+ "(%d/%d)\n",
+ ip, after_delay_ip, validation_state->max_ip);
+ return false;
+ }
+ set_bit(after_delay_ip, validation_state->branch_targets);
+ max_branch_target = max(max_branch_target, after_delay_ip);
+
+ /* There are two delay slots after program end is signaled
+ * that are still executed, then we're finished.
+ */
+ if (found_shader_end && ip == shader_end_ip + 2)
+ break;
+ }
+
+ if (max_branch_target > shader_end_ip) {
+ DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+ return false;
}
return true;
}
+/* Resets any known state for the shader, used when we may be branched to from
+ * multiple locations in the program (or at shader start).
+ */
+static void
+reset_validation_state(struct vc4_shader_validation_state *validation_state)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
+
+ for (i = 0; i < LIVE_REG_COUNT; i++) {
+ validation_state->live_min_clamp_offsets[i] = ~0;
+ validation_state->live_max_clamp_regs[i] = false;
+ validation_state->live_immediates[i] = ~0;
+ }
+}
+
+static bool
+texturing_in_progress(struct vc4_shader_validation_state *validation_state)
+{
+ return (validation_state->tmu_write_count[0] != 0 ||
+ validation_state->tmu_write_count[1] != 0);
+}
+
+static bool
+vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t ip = validation_state->ip;
+
+ if (!test_bit(ip, validation_state->branch_targets))
+ return true;
+
+ if (texturing_in_progress(validation_state)) {
+ DRM_ERROR("Branch target landed during TMU setup\n");
+ return false;
+ }
+
+ /* Reset our live values tracking, since this instruction may have
+ * multiple predecessors.
+ *
+ * One could potentially do analysis to determine that, for
+ * example, all predecessors have a live max clamp in the same
+ * register, but we don't bother with that.
+ */
+ reset_validation_state(validation_state);
+
+ /* Since we've entered a basic block from potentially multiple
+ * predecessors, we need the uniforms address to be updated before any
+ * unforms are read. We require that after any branch point, the next
+ * uniform to be loaded is a uniform address offset. That uniform's
+ * offset will be marked by the uniform address register write
+ * validation, or a one-off the end-of-program check.
+ */
+ validation_state->needs_uniform_address_update = true;
+
+ return true;
+}
+
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
- uint32_t ip, max_ip;
- uint64_t *shader;
- struct vc4_validated_shader_info *validated_shader;
+ uint32_t ip;
+ struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
- int i;
memset(&validation_state, 0, sizeof(validation_state));
+ validation_state.shader = shader_obj->vaddr;
+ validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
- for (i = 0; i < 8; i++)
- validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
- for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
- validation_state.live_min_clamp_offsets[i] = ~0;
+ reset_validation_state(&validation_state);
- shader = shader_obj->vaddr;
- max_ip = shader_obj->base.size / sizeof(uint64_t);
+ validation_state.branch_targets =
+ kcalloc(BITS_TO_LONGS(validation_state.max_ip),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!validation_state.branch_targets)
+ goto fail;
validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
if (!validated_shader)
- return NULL;
+ goto fail;
+
+ if (!vc4_validate_branches(&validation_state))
+ goto fail;
- for (ip = 0; ip < max_ip; ip++) {
- uint64_t inst = shader[ip];
+ for (ip = 0; ip < validation_state.max_ip; ip++) {
+ uint64_t inst = validation_state.shader[ip];
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ validation_state.ip = ip;
+
+ if (!vc4_handle_branch_target(&validation_state))
+ goto fail;
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
- if (!check_instruction_writes(inst, validated_shader,
+ if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
goto fail;
}
- if (!check_instruction_reads(inst, validated_shader))
+ if (!check_instruction_reads(validated_shader,
+ &validation_state))
goto fail;
if (sig == QPU_SIG_PROG_END) {
break;
case QPU_SIG_LOAD_IMM:
- if (!check_instruction_writes(inst, validated_shader,
+ if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
goto fail;
}
break;
+ case QPU_SIG_BRANCH:
+ if (!check_branch(inst, validated_shader,
+ &validation_state, ip))
+ goto fail;
+ break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip);
break;
}
- if (ip == max_ip) {
+ if (ip == validation_state.max_ip) {
DRM_ERROR("shader failed to terminate before "
"shader BO end at %zd\n",
shader_obj->base.size);
goto fail;
}
+ /* If we did a backwards branch and we haven't emitted a uniforms
+ * reset since then, we still need the uniforms stream to have the
+ * uniforms address available so that the backwards branch can do its
+ * uniforms reset.
+ *
+ * We could potentially prove that the backwards branch doesn't
+ * contain any uses of uniforms until program exit, but that doesn't
+ * seem to be worth the trouble.
+ */
+ if (validation_state.needs_uniform_address_for_loop) {
+ if (!require_uniform_address_uniform(validated_shader))
+ goto fail;
+ validated_shader->uniforms_size += 4;
+ }
+
/* Again, no chance of integer overflow here because the worst case
* scenario is 8 bytes of uniforms plus handles per 8-byte
* instruction.
(validated_shader->uniforms_size +
4 * validated_shader->num_texture_samples);
+ kfree(validation_state.branch_targets);
+
return validated_shader;
fail:
+ kfree(validation_state.branch_targets);
if (validated_shader) {
kfree(validated_shader->texture_samples);
kfree(validated_shader);
ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
obj-$(CONFIG_DRM_VGEM) += vgem.o
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
-{
- drm_gem_put_pages(&obj->base, obj->pages, false, false);
- obj->pages = NULL;
-}
-
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
- drm_gem_free_mmap_offset(obj);
-
- if (vgem_obj->use_dma_buf && obj->dma_buf) {
- dma_buf_put(obj->dma_buf);
- obj->dma_buf = NULL;
- }
-
drm_gem_object_release(obj);
-
- if (vgem_obj->pages)
- vgem_gem_put_pages(vgem_obj);
-
- vgem_obj->pages = NULL;
-
kfree(vgem_obj);
}
-int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
-{
- struct page **pages;
-
- if (obj->pages || obj->use_dma_buf)
- return 0;
-
- pages = drm_gem_get_pages(&obj->base);
- if (IS_ERR(pages)) {
- return PTR_ERR(pages);
- }
-
- obj->pages = pages;
-
- return 0;
-}
-
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
- loff_t num_pages;
- pgoff_t page_offset;
- int ret;
-
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
-
- num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
- if (page_offset > num_pages)
- return VM_FAULT_SIGBUS;
-
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- obj->pages[page_offset]);
- switch (ret) {
- case 0:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -EBUSY:
- return VM_FAULT_RETRY;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ON(1);
- return VM_FAULT_SIGBUS;
+ unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ struct page *page;
+
+ page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
+ (vaddr - vma->vm_start) >> PAGE_SHIFT);
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ return 0;
+ } else switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON_ONCE(PTR_ERR(page));
+ return VM_FAULT_SIGBUS;
}
}
.close = drm_gem_vm_close,
};
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct vgem_file *vfile;
+ int ret;
+
+ vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+ if (!vfile)
+ return -ENOMEM;
+
+ file->driver_priv = vfile;
+
+ ret = vgem_fence_open(vfile);
+ if (ret) {
+ kfree(vfile);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct vgem_file *vfile = file->driver_priv;
+
+ vgem_fence_close(vfile);
+ kfree(vfile);
+}
+
/* ioctls */
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
- struct drm_gem_object *gem_object;
- int err;
-
- size = roundup(size, PAGE_SIZE);
+ int ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
- gem_object = &obj->base;
-
- err = drm_gem_object_init(dev, gem_object, size);
- if (err)
- goto out;
-
- err = vgem_gem_get_pages(obj);
- if (err)
- goto out;
-
- err = drm_gem_handle_create(file, gem_object, handle);
- if (err)
- goto handle_out;
+ ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
+ if (ret)
+ goto err_free;
- drm_gem_object_unreference_unlocked(gem_object);
+ ret = drm_gem_handle_create(file, &obj->base, handle);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ goto err;
- return gem_object;
+ return &obj->base;
-handle_out:
- drm_gem_object_release(gem_object);
-out:
+err_free:
kfree(obj);
- return ERR_PTR(err);
+err:
+ return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gem_object;
- uint64_t size;
- uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ u64 pitch, size;
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch;
if (size == 0)
return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size);
-
- if (IS_ERR(gem_object)) {
- DRM_DEBUG_DRIVER("object creation failed\n");
+ if (IS_ERR(gem_object))
return PTR_ERR(gem_object);
- }
args->size = gem_object->size;
args->pitch = pitch;
return 0;
}
-int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
{
- int ret = 0;
struct drm_gem_object *obj;
+ int ret;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
+ if (!obj->filp) {
+ ret = -EINVAL;
+ goto unref;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto unref;
- BUG_ON(!obj->filp);
-
- obj->filp->private_data = obj;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
-
unref:
drm_gem_object_unreference_unlocked(obj);
}
static struct drm_ioctl_desc vgem_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
+static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long flags = vma->vm_flags;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ /* Keep the WC mmaping set by drm_gem_mmap() but our pages
+ * are ordinary and not special.
+ */
+ vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
+ return 0;
+}
+
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .mmap = drm_gem_mmap,
+ .mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
};
+static int vgem_prime_pin(struct drm_gem_object *obj)
+{
+ long n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages;
+
+ /* Flush the object from the CPU cache so that importers can rely
+ * on coherent indirect access via the exported dma-address.
+ */
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ drm_clflush_pages(pages, n_pages);
+ drm_gem_put_pages(obj, pages, true, false);
+
+ return 0;
+}
+
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct sg_table *st;
+ struct page **pages;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
+ st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
+ drm_gem_put_pages(obj, pages, false, false);
+
+ return st;
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+ long n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages;
+ void *addr;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return NULL;
+
+ addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ drm_gem_put_pages(obj, pages, false, false);
+
+ return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ if (obj->size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (!obj->filp)
+ return -ENODEV;
+
+ ret = obj->filp->f_op->mmap(obj->filp, vma);
+ if (ret)
+ return ret;
+
+ fput(vma->vm_file);
+ vma->vm_file = get_file(obj->filp);
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM,
+ .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .open = vgem_open,
+ .preclose = vgem_preclose,
.gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
+ .num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
+
.dumb_create = vgem_gem_dumb_create,
.dumb_map_offset = vgem_gem_dumb_map,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .gem_prime_pin = vgem_prime_pin,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = vgem_prime_get_sg_table,
+ .gem_prime_vmap = vgem_prime_vmap,
+ .gem_prime_vunmap = vgem_prime_vunmap,
+ .gem_prime_mmap = vgem_prime_mmap,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.minor = DRIVER_MINOR,
};
-struct drm_device *vgem_device;
+static struct drm_device *vgem_device;
static int __init vgem_init(void)
{
}
ret = drm_dev_register(vgem_device, 0);
-
if (ret)
goto out_unref;
module_exit(vgem_exit);
MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+ struct idr fence_idr;
+ struct mutex fence_mutex;
+};
+
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
- struct page **pages;
- bool use_dma_buf;
};
-/* vgem_drv.c */
-extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
-extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
#endif
--- /dev/null
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+#define VGEM_FENCE_TIMEOUT (10*HZ)
+
+struct vgem_fence {
+ struct fence base;
+ struct spinlock lock;
+ struct timer_list timer;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+ return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+ return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+ return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static void vgem_fence_release(struct fence *base)
+{
+ struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+ del_timer_sync(&fence->timer);
+ fence_free(&fence->base);
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+ int size)
+{
+ snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+static const struct fence_ops vgem_fence_ops = {
+ .get_driver_name = vgem_fence_get_driver_name,
+ .get_timeline_name = vgem_fence_get_timeline_name,
+ .enable_signaling = vgem_fence_enable_signaling,
+ .signaled = vgem_fence_signaled,
+ .wait = fence_default_wait,
+ .release = vgem_fence_release,
+
+ .fence_value_str = vgem_fence_value_str,
+ .timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static void vgem_fence_timeout(unsigned long data)
+{
+ struct vgem_fence *fence = (struct vgem_fence *)data;
+
+ fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
+{
+ struct vgem_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ spin_lock_init(&fence->lock);
+ fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ fence_context_alloc(1), 1);
+
+ setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+ /* We force the fence to expire within 10s to prevent driver hangs */
+ mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
+
+ return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+ struct drm_gem_object *obj)
+{
+ struct dma_buf *dmabuf;
+
+ if (obj->dma_buf)
+ return 0;
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ obj->dma_buf = dmabuf;
+ drm_gem_object_reference(obj);
+ return 0;
+}
+
+/*
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
+ *
+ * Create and attach a fence to the vGEM handle. This fence is then exposed
+ * via the dma-buf reservation object and visible to consumers of the exported
+ * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
+ * vGEM buffer is being written to by the client and is exposed as an exclusive
+ * fence, otherwise the fence indicates the client is current reading from the
+ * buffer and all future writes should wait for the client to signal its
+ * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
+ * an exclusive fence when adding a read, or any fence when adding a write),
+ * -EBUSY is reported. Serialisation between operations should be handled
+ * by waiting upon the dma-buf.
+ *
+ * This returns the handle for the new fence that must be signaled within 10
+ * seconds (or otherwise it will automatically expire). See
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
+ *
+ * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
+ */
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct drm_vgem_fence_attach *arg = data;
+ struct vgem_file *vfile = file->driver_priv;
+ struct reservation_object *resv;
+ struct drm_gem_object *obj;
+ struct fence *fence;
+ int ret;
+
+ if (arg->flags & ~VGEM_FENCE_WRITE)
+ return -EINVAL;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, arg->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = attach_dmabuf(dev, obj);
+ if (ret)
+ goto err;
+
+ fence = vgem_fence_create(vfile, arg->flags);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Check for a conflicting fence */
+ resv = obj->dma_buf->resv;
+ if (!reservation_object_test_signaled_rcu(resv,
+ arg->flags & VGEM_FENCE_WRITE)) {
+ ret = -EBUSY;
+ goto err_fence;
+ }
+
+ /* Expose the fence via the dma-buf */
+ ret = 0;
+ mutex_lock(&resv->lock.base);
+ if (arg->flags & VGEM_FENCE_WRITE)
+ reservation_object_add_excl_fence(resv, fence);
+ else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+ reservation_object_add_shared_fence(resv, fence);
+ mutex_unlock(&resv->lock.base);
+
+ /* Record the fence in our idr for later signaling */
+ if (ret == 0) {
+ mutex_lock(&vfile->fence_mutex);
+ ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+ mutex_unlock(&vfile->fence_mutex);
+ if (ret > 0) {
+ arg->out_fence = ret;
+ ret = 0;
+ }
+ }
+err_fence:
+ if (ret) {
+ fence_signal(fence);
+ fence_put(fence);
+ }
+err:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+/*
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
+ *
+ * Signal and consume a fence ealier attached to a vGEM handle using
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
+ *
+ * All fences must be signaled within 10s of attachment or otherwise they
+ * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
+ *
+ * Signaling a fence indicates to all consumers of the dma-buf that the
+ * client has completed the operation associated with the fence, and that the
+ * buffer is then ready for consumption.
+ *
+ * If the fence does not exist (or has already been signaled by the client),
+ * vgem_fence_signal_ioctl returns -ENOENT.
+ */
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct vgem_file *vfile = file->driver_priv;
+ struct drm_vgem_fence_signal *arg = data;
+ struct fence *fence;
+ int ret = 0;
+
+ if (arg->flags)
+ return -EINVAL;
+
+ mutex_lock(&vfile->fence_mutex);
+ fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+ mutex_unlock(&vfile->fence_mutex);
+ if (!fence)
+ return -ENOENT;
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ if (fence_is_signaled(fence))
+ ret = -ETIMEDOUT;
+
+ fence_signal(fence);
+ fence_put(fence);
+ return ret;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+ mutex_init(&vfile->fence_mutex);
+ idr_init(&vfile->fence_idr);
+
+ return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+ fence_signal(p);
+ fence_put(p);
+ return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+ idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+ idr_destroy(&vfile->fence_idr);
+}
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
- if (virtio_gpu_fb->obj)
- drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+ drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(obj);
return NULL;
}
drm_atomic_helper_cleanup_planes(dev, state);
}
-struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
+static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
.atomic_commit_tail = vgdev_atomic_commit_tail,
};
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ int ret;
+
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
virtio_gpu_move_null(bo, new_mem);
return 0;
}
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+
if (!ret)
vmw_bo_pin_reserved(buf, true);
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
if (unlikely(ret != 0))
goto err;
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
+ uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
+ bo->mem.start > 0 &&
+ buf->pin_count == 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
dev_priv->enable_fb = enable_fbdev;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ /*
+ * Workaround for low memory 2D VMs to compensate for the
+ * allocation taken by fbdev
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ mem_size *= 2;
+
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
- if (file_priv->minor->type != DRM_MINOR_LEGACY ||
- !(flags & DRM_AUTH))
+ if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
return NULL;
ret = mutex_lock_interruptible(&dev->master_mutex);
spinlock_t hw_lock;
spinlock_t cap_lock;
bool has_dx;
+ bool assume_16bpp;
/*
* VGA registers.
par->set_fb = &vfb->base;
- if (!par->bo_ptr) {
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us.
- */
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev framebuffer.\n");
- return ret;
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- vfb->unpin(vfb);
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- return ret;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
return 0;
}
if (ret)
goto out_unlock;
+ if (!par->bo_ptr) {
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+ /*
+ * Pin before mapping. Since we don't know in what placement
+ * to pin, call into KMS to do it for us.
+ */
+ ret = vfb->pin(vfb);
+ if (ret) {
+ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+ par->vmw_bo->base.num_pages, &par->map);
+ if (ret) {
+ vfb->unpin(vfb);
+ DRM_ERROR("Could not map the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ }
+
+
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
- u32 assumed_bpp = 2;
+ u32 assumed_bpp = 4;
- /*
- * If using screen objects, then assume 32-bpp because that's what the
- * SVGA device is assuming
- */
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- assumed_bpp = 4;
+ if (dev_priv->assume_16bpp)
+ assumed_bpp = 2;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/frame.h>
#include <asm/hypervisor.h>
#include "drmP.h"
#include "vmwgfx_msg.h"
return -EINVAL;
}
-
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
/**
break;
}
+ if (retries == RETRIES)
+ return -EINVAL;
+
*msg_len = reply_len;
*msg = reply;
return 0;
}
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
/**
WARN_ON_ONCE(!stdu->defined);
- if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
- new_fb->height == mode->vdisplay)
+ new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+ if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
+ new_vfbs->surface->base_size.height == mode->vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->dmabuf)
new_content_type = SEPARATE_DMA;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
} else {
- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
content_srf = *new_vfbs->surface;
}
return ret;
}
} else if (new_content_type == SAME_AS_DISPLAY) {
- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
new_display_srf = vmw_surface_reference(new_vfbs->surface);
}
*/
static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
{
- u32 pos = pb->pos;
- u32 *p = (u32 *)((void *)pb->mapped + pos);
- WARN_ON(pos == pb->fence);
+ u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
+
+ WARN_ON(pb->pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
- pb->pos = (pos + 8) & (pb->size_bytes - 1);
+ pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
}
/*
enum cdma_event event)
{
for (;;) {
+ struct push_buffer *pb = &cdma->push_buffer;
unsigned int space;
- if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
space = list_empty(&cdma->sync_queue) ? 1 : 0;
- else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
- struct push_buffer *pb = &cdma->push_buffer;
+ break;
+
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
space = host1x_pushbuffer_space(pb);
- } else {
+ break;
+
+ default:
WARN_ON(1);
return -EINVAL;
}
mutex_lock(&cdma->lock);
continue;
}
+
cdma->event = event;
mutex_unlock(&cdma->lock);
down(&cdma->sem);
mutex_lock(&cdma->lock);
}
+
return 0;
}
/* Start timer on next pending syncpt */
if (job->timeout)
cdma_start_timer_locked(cdma, job);
+
break;
}
/* Pop push buffer slots */
if (job->num_slots) {
struct push_buffer *pb = &cdma->push_buffer;
+
host1x_pushbuffer_pop(pb, job->num_slots);
+
if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
signal = true;
}
void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
struct device *dev)
{
- u32 restart_addr;
- u32 syncpt_incrs;
- struct host1x_job *job = NULL;
- u32 syncpt_val;
struct host1x *host1x = cdma_to_host1x(cdma);
+ u32 restart_addr, syncpt_incrs, syncpt_val;
+ struct host1x_job *job = NULL;
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
syncpt_val += syncpt_incrs;
}
- /* The following sumbits from the same client may be dependent on the
+ /*
+ * The following sumbits from the same client may be dependent on the
* failed submit and therefore they may fail. Force a small timeout
- * to make the queue cleanup faster */
+ * to make the queue cleanup faster.
+ */
list_for_each_entry_from(job, &cdma->sync_queue, list)
if (job->client == cdma->timeout.client)
err = host1x_pushbuffer_init(&cdma->push_buffer);
if (err)
return err;
+
return 0;
}
/* init state on first submit with timeout value */
if (!cdma->timeout.initialized) {
int err;
+
err = host1x_hw_cdma_timeout_init(host1x, cdma,
job->syncpt_id);
if (err) {
}
}
}
+
if (!cdma->running)
host1x_hw_cdma_start(host1x, cdma);
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
}
+
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
host1x_pushbuffer_push(pb, op1, op2);
struct host1x_channel *host1x_channel_request(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev->parent);
- int max_channels = host->info->nb_channels;
+ unsigned int max_channels = host->info->nb_channels;
struct host1x_channel *channel = NULL;
- int index, err;
+ unsigned long index;
+ int err;
mutex_lock(&host->chlist_mutex);
va_start(args, fmt);
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
+
o->fn(o->ctx, o->buf, len);
}
struct output *o = data;
mutex_lock(&ch->reflock);
+
if (ch->refcount) {
mutex_lock(&ch->cdma.lock);
+
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
+
host1x_hw_show_channel_cdma(m, ch, o);
mutex_unlock(&ch->cdma.lock);
}
+
mutex_unlock(&ch->reflock);
return 0;
static void show_syncpts(struct host1x *m, struct output *o)
{
- int i;
+ unsigned int i;
+
host1x_debug_output(o, "---- syncpts ----\n");
+
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
+
if (!min && !max)
continue;
- host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+
+ host1x_debug_output(o, "id %u (%s) min %d max %d\n",
i, m->syncpt[i].name, min, max);
}
for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
u32 base_val;
+
base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
if (base_val)
- host1x_debug_output(o, "waitbase id %d val %d\n", i,
+ host1x_debug_output(o, "waitbase id %u val %d\n", i,
base_val);
}
.fn = write_to_seqfile,
.ctx = s
};
+
show_all(s->private, &o);
+
return 0;
}
.fn = write_to_seqfile,
.ctx = s
};
+
show_all_no_fifo(s->private, &o);
+
return 0;
}
}
static const struct file_operations host1x_debug_all_fops = {
- .open = host1x_debug_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = host1x_debug_open_all,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static int host1x_debug_open(struct inode *inode, struct file *file)
}
static const struct file_operations host1x_debug_fops = {
- .open = host1x_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = host1x_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void host1x_debugfs_init(struct host1x *host1x)
struct output o = {
.fn = write_to_printk
};
+
show_all(host1x, &o);
}
struct output o = {
.fn = write_to_printk
};
+
show_syncpts(host1x, &o);
}
}
static const struct host1x_info host1x01_info = {
- .nb_channels = 8,
- .nb_pts = 32,
- .nb_mlocks = 16,
- .nb_bases = 8,
- .init = host1x01_init,
- .sync_offset = 0x3000,
- .dma_mask = DMA_BIT_MASK(32),
+ .nb_channels = 8,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 8,
+ .init = host1x01_init,
+ .sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x02_info = {
.dma_mask = DMA_BIT_MASK(34),
};
-static struct of_device_id host1x_of_match[] = {
+static const struct of_device_id host1x_of_match[] = {
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
void (*start)(struct host1x_cdma *cdma);
void (*stop)(struct host1x_cdma *cdma);
void (*flush)(struct host1x_cdma *cdma);
- int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+ int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
void (*timeout_destroy)(struct host1x_cdma *cdma);
void (*freeze)(struct host1x_cdma *cdma);
void (*resume)(struct host1x_cdma *cdma, u32 getptr);
int (*init_host_sync)(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *work));
void (*set_syncpt_threshold)(
- struct host1x *host, u32 id, u32 thresh);
- void (*enable_syncpt_intr)(struct host1x *host, u32 id);
- void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+ struct host1x *host, unsigned int id, u32 thresh);
+ void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
+ void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
void (*disable_all_syncpt_intrs)(struct host1x *host);
int (*free_syncpt_irq)(struct host1x *host);
};
struct host1x_info {
- int nb_channels; /* host1x: num channels supported */
- int nb_pts; /* host1x: num syncpoints supported */
- int nb_bases; /* host1x: num syncpoints supported */
- int nb_mlocks; /* host1x: number of mlocks */
- int (*init)(struct host1x *); /* initialize per SoC ops */
- int sync_offset;
- u64 dma_mask; /* mask of addressable memory */
+ unsigned int nb_channels; /* host1x: number of channels supported */
+ unsigned int nb_pts; /* host1x: number of syncpoints supported */
+ unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
+ unsigned int nb_mlocks; /* host1x: number of mlocks supported */
+ int (*init)(struct host1x *host1x); /* initialize per SoC ops */
+ unsigned int sync_offset; /* offset of syncpoint registers */
+ u64 dma_mask; /* mask of addressable memory */
};
struct host1x {
struct clk *clk;
struct mutex intr_mutex;
- struct workqueue_struct *intr_wq;
int intr_syncpt_irq;
const struct host1x_syncpt_ops *syncpt_op;
}
static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
- u32 id, u32 thresh)
+ unsigned int id,
+ u32 thresh)
{
host->intr_op->set_syncpt_threshold(host, id, thresh);
}
static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
- u32 id)
+ unsigned int id)
{
host->intr_op->enable_syncpt_intr(host, id);
}
static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
- u32 id)
+ unsigned int id)
{
host->intr_op->disable_syncpt_intr(host, id);
}
static inline int host1x_hw_channel_init(struct host1x *host,
struct host1x_channel *channel,
- int chid)
+ unsigned int id)
{
- return host->channel_op->init(channel, host, chid);
+ return host->channel_op->init(channel, host, id);
}
static inline int host1x_hw_channel_submit(struct host1x *host,
static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
struct host1x_cdma *cdma,
- u32 syncpt_id)
+ unsigned int syncpt)
{
- return host->cdma_op->timeout_init(cdma, syncpt_id);
+ return host->cdma_op->timeout_init(cdma, syncpt);
}
static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
- u32 i;
+ unsigned int i;
for (i = 0; i < syncpt_incrs; i++)
host1x_syncpt_incr(cdma->timeout.syncpt);
&pb->phys, getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
+
wmb();
}
struct host1x_channel *ch = cdma_to_channel(cdma);
mutex_lock(&cdma->lock);
+
if (cdma->running) {
host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma->running = false;
}
+
mutex_unlock(&cdma->lock);
}
u32 cmdproc_stop;
dev_dbg(host1x->dev,
- "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+ "resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop &= ~(BIT(ch->id));
+ cmdproc_stop &= ~BIT(ch->id);
host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
cdma->torndown = false;
*/
static void cdma_timeout_handler(struct work_struct *work)
{
+ u32 prev_cmdproc, cmdproc_stop, syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
- u32 syncpt_val;
-
- u32 prev_cmdproc, cmdproc_stop;
-
cdma = container_of(to_delayed_work(work), struct host1x_cdma,
timeout.wq);
host1x = cdma_to_host1x(cdma);
return;
}
- dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
- __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
- syncpt_val, cdma->timeout.syncpt_val);
+ dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
+ __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+ syncpt_val, cdma->timeout.syncpt_val);
/* stop HW, resetting channel/module */
host1x_hw_cdma_freeze(host1x, cdma);
/*
* Init timeout resources
*/
-static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
{
if (cdma->timeout.initialized)
cancel_delayed_work(&cdma->timeout.wq);
+
cdma->timeout.initialized = false;
}
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+
offset += i * sizeof(u32);
trace_host1x_cdma_push_gather(dev_name(dev), bo,
struct host1x_job_gather *g = &job->gathers[i];
u32 op1 = host1x_opcode_gather(g->words);
u32 op2 = g->base + g->offset;
+
trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
host1x_cdma_push(cdma, op1, op2);
}
{
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
- u32 id, value;
+ unsigned int id;
+ u32 value;
value = host1x_syncpt_read_max(sp);
id = sp->base->id;
static unsigned int show_channel_command(struct output *o, u32 val)
{
- unsigned mask;
- unsigned subop;
+ unsigned int mask, subop;
switch (val >> 28) {
case HOST1X_OPCODE_SETCLASS:
val >> 6 & 0x3ff,
val >> 16 & 0xfff, mask);
return hweight8(mask);
- } else {
- host1x_debug_output(o, "SETCL(class=%03x)\n",
- val >> 6 & 0x3ff);
- return 0;
}
+ host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ return 0;
+
case HOST1X_OPCODE_INCR:
host1x_debug_output(o, "INCR(offset=%03x, [",
val >> 16 & 0xfff);
struct host1x_job *job;
list_for_each_entry(job, &cdma->sync_queue, list) {
- int i;
+ unsigned int i;
+
host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
job, job->syncpt_id, job->syncpt_end,
job->first_get, job->timeout,
cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
- host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
!ch->cdma.push_buffer.mapped) {
if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT)
+ HOST1X_UCLASS_WAIT_SYNCPT)
host1x_debug_output(o, "waiting on syncpt %d val %d\n",
cbread >> 24, cbread & 0xffffff);
else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
- HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
-
+ HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
base = (cbread >> 16) & 0xff;
baseval =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
u32 val, rd_ptr, wr_ptr, start, end;
unsigned int data_count = 0;
- host1x_debug_output(o, "%d: fifo:\n", ch->id);
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
host1x_debug_output(o, "FIFOSTAT %08x\n", val);
static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
{
- int i;
+ unsigned int i;
host1x_debug_output(o, "---- mlocks ----\n");
+
for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
u32 owner =
host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
- host1x_debug_output(o, "%d: locked by channel %d\n",
+ host1x_debug_output(o, "%u: locked by channel %u\n",
i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
- host1x_debug_output(o, "%d: locked by cpu\n", i);
+ host1x_debug_output(o, "%u: locked by cpu\n", i);
else
- host1x_debug_output(o, "%d: unlocked\n", i);
+ host1x_debug_output(o, "%u: unlocked\n", i);
}
+
host1x_debug_output(o, "\n");
}
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
- queue_work(host->intr_wq, &syncpt->intr.work);
+ schedule_work(&syncpt->intr.work);
}
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x *host = dev_id;
unsigned long reg;
- int i, id;
+ unsigned int i, id;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
{
- u32 i;
+ unsigned int i;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
host1x_sync_writel(host, 0xffffffffu,
}
}
-static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
- void (*syncpt_thresh_work)(struct work_struct *))
+static int
+_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+ void (*syncpt_thresh_work)(struct work_struct *))
{
- int i, err;
+ unsigned int i;
+ int err;
host1x_hw_intr_disable_all_syncpt_intrs(host);
}
static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
- u32 id, u32 thresh)
+ unsigned int id,
+ u32 thresh)
{
host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
}
-static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
+ unsigned int id)
{
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
}
-static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
+ unsigned int id)
{
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
static int _host1x_free_syncpt_irq(struct host1x *host)
{
+ unsigned int i;
+
devm_free_irq(host->dev, host->intr_syncpt_irq, host);
- flush_workqueue(host->intr_wq);
+
+ for (i = 0; i < host->info->nb_pts; i++)
+ cancel_work_sync(&host->syncpt[i].intr.work);
+
return 0;
}
*/
static void syncpt_restore(struct host1x_syncpt *sp)
{
+ u32 min = host1x_syncpt_read_min(sp);
struct host1x *host = sp->host;
- int min = host1x_syncpt_read_min(sp);
+
host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
}
static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
+
host1x_sync_writel(host, sp->base_val,
HOST1X_SYNC_SYNCPT_BASE(sp->id));
}
static void syncpt_read_wait_base(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
+
sp->base_val =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
}
if (!host1x_syncpt_client_managed(sp) &&
host1x_syncpt_idle(sp))
return -EINVAL;
+
host1x_sync_writel(host, BIT_MASK(sp->id),
HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
wmb();
/* remove a wait pointed to by patch_addr */
static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
{
- u32 override = host1x_class_host_wait_syncpt(
- HOST1X_SYNCPT_RESERVED, 0);
+ u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
*((u32 *)patch_addr) = override;
+
return 0;
}
static void action_wakeup(struct host1x_waitlist *waiter)
{
wait_queue_head_t *wq = waiter->data;
+
wake_up(wq);
}
static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
{
wait_queue_head_t *wq = waiter->data;
+
wake_up_interruptible(wq);
}
typedef void (*action_handler)(struct host1x_waitlist *waiter);
-static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
action_submit_complete,
action_wakeup,
action_wakeup_interruptible,
host1x_syncpt_load(host->syncpt + id));
}
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref)
{
return 0;
}
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
{
struct host1x_waitlist *waiter = ref;
struct host1x_syncpt *syncpt;
mutex_init(&host->intr_mutex);
host->intr_syncpt_irq = irq_sync;
- host->intr_wq = create_workqueue("host_syncpt");
- if (!host->intr_wq)
- return -ENOMEM;
for (id = 0; id < nb_pts; ++id) {
struct host1x_syncpt *syncpt = host->syncpt + id;
INIT_LIST_HEAD(&syncpt->intr.wait_head);
snprintf(syncpt->intr.thresh_irq_name,
sizeof(syncpt->intr.thresh_irq_name),
- "host1x_sp_%02d", id);
+ "host1x_sp_%02u", id);
}
host1x_intr_start(host);
void host1x_intr_deinit(struct host1x *host)
{
host1x_intr_stop(host);
- destroy_workqueue(host->intr_wq);
}
void host1x_intr_start(struct host1x *host)
if (!list_empty(&syncpt[id].intr.wait_head)) {
/* output diagnostics */
mutex_unlock(&host->intr_mutex);
- pr_warn("%s cannot stop syncpt intr id=%d\n",
+ pr_warn("%s cannot stop syncpt intr id=%u\n",
__func__, id);
return;
}
*
* This is a non-blocking api.
*/
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref);
* You must call this if you passed non-NULL as ref.
* @ref the ref returned from host1x_intr_add_action()
*/
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
if (host1x_syncpt_is_expired(sp, wait->thresh)) {
dev_dbg(host->dev,
- "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+ "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
wait->syncpt_id, sp->name, wait->thresh,
host1x_syncpt_read_min(sp));
for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i];
+
size += g->words * sizeof(u32);
}
bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
for (i = 0; i < job->num_waitchk; i++) {
u32 syncpt_id = job->waitchk[i].syncpt_id;
+
if (syncpt_id < host1x_syncpt_nb_pts(host))
set_bit(syncpt_id, waitchk_mask);
}
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+
host1x_bo_unpin(unpin->bo, unpin->sgt);
host1x_bo_put(unpin->bo);
}
+
job->num_unpins = 0;
if (job->gather_copy_size)
dma_free_wc(job->channel->dev, job->gather_copy_size,
- job->gather_copy_mapped, job->gather_copy);
+ job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
return NULL;
}
- name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+ name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
return NULL;
void host1x_syncpt_restore(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
- u32 i;
+ unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
host1x_hw_syncpt_restore(host, sp_base + i);
+
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+
wmb();
}
void host1x_syncpt_save(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
- u32 i;
+ unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
if (host1x_syncpt_client_managed(sp_base + i))
u32 host1x_syncpt_load(struct host1x_syncpt *sp)
{
u32 val;
+
val = host1x_hw_syncpt_load(sp->host, sp);
trace_host1x_syncpt_load_min(sp->id, val);
*/
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
{
- u32 val;
host1x_hw_syncpt_load_wait_base(sp->host, sp);
- val = sp->base_val;
- return val;
+
+ return sp->base_val;
}
/*
static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
host1x_hw_syncpt_load(sp->host, sp);
+
return host1x_syncpt_is_expired(sp, thresh);
}
* Main entrypoint for syncpoint value waits.
*/
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
- u32 *value)
+ u32 *value)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
void *ref;
if (host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = host1x_syncpt_load(sp);
+
return 0;
}
if (host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = val;
+
goto done;
}
/* wait for the syncpoint, or timeout, or signal */
while (timeout) {
long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
- int remain = wait_event_interruptible_timeout(wq,
+ int remain;
+
+ remain = wait_event_interruptible_timeout(wq,
syncpt_load_min_is_expired(sp, thresh),
check);
if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = host1x_syncpt_load(sp);
+
err = 0;
+
break;
}
+
if (remain < 0) {
err = remain;
break;
}
+
timeout -= check;
+
if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
dev_warn(sp->host->dev,
- "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+ "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
current->comm, sp->id, sp->name,
thresh, timeout);
host1x_debug_dump_syncpts(sp->host);
+
if (check_count == MAX_STUCK_CHECK_COUNT)
host1x_debug_dump(sp->host);
+
check_count++;
}
}
+
host1x_intr_put_ref(sp->host, sp->id, ref);
done:
{
u32 current_val;
u32 future_val;
+
smp_rmb();
+
current_val = (u32)atomic_read(&sp->min_val);
future_val = (u32)atomic_read(&sp->max_val);
{
struct host1x_syncpt_base *bases;
struct host1x_syncpt *syncpt;
- int i;
+ unsigned int i;
- syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+ syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
GFP_KERNEL);
if (!syncpt)
return -ENOMEM;
- bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+ bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
GFP_KERNEL);
if (!bases)
return -ENOMEM;
unsigned long flags)
{
struct host1x *host = dev_get_drvdata(dev->parent);
+
return host1x_syncpt_alloc(host, dev, flags);
}
EXPORT_SYMBOL(host1x_syncpt_request);
void host1x_syncpt_deinit(struct host1x *host)
{
- int i;
struct host1x_syncpt *sp = host->syncpt;
+ unsigned int i;
+
for (i = 0; i < host->info->nb_pts; i++, sp++)
kfree(sp->name);
}
/*
* Read max. It indicates how many operations there are in queue, either in
* channel or in a software thread.
- * */
+ */
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
{
smp_rmb();
+
return (u32)atomic_read(&sp->max_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_max);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
{
smp_rmb();
+
return (u32)atomic_read(&sp->min_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
}
EXPORT_SYMBOL(host1x_syncpt_read);
-int host1x_syncpt_nb_pts(struct host1x *host)
+unsigned int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
}
-int host1x_syncpt_nb_bases(struct host1x *host)
+unsigned int host1x_syncpt_nb_bases(struct host1x *host)
{
return host->info->nb_bases;
}
-int host1x_syncpt_nb_mlocks(struct host1x *host)
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
{
return host->info->nb_mlocks;
}
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
{
if (host->info->nb_pts < id)
return NULL;
+
return host->syncpt + id;
}
EXPORT_SYMBOL(host1x_syncpt_get);
};
struct host1x_syncpt {
- int id;
+ unsigned int id;
atomic_t min_val;
atomic_t max_val;
u32 base_val;
void host1x_syncpt_deinit(struct host1x *host);
/* Return number of sync point supported. */
-int host1x_syncpt_nb_pts(struct host1x *host);
+unsigned int host1x_syncpt_nb_pts(struct host1x *host);
/* Return number of wait bases supported. */
-int host1x_syncpt_nb_bases(struct host1x *host);
+unsigned int host1x_syncpt_nb_bases(struct host1x *host);
/* Return number of mlocks supported. */
-int host1x_syncpt_nb_mlocks(struct host1x *host);
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
/*
* Check sync point sanity. If max is larger than min, there have too many
static int ipu_bus_format_to_map(u32 fmt)
{
switch (fmt) {
+ default:
+ WARN_ON(1);
+ /* fall-through */
case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
case MEDIA_BUS_FMT_RGB565_1X16:
return IPU_DC_MAP_LVDS666;
case MEDIA_BUS_FMT_BGR888_1X24:
return IPU_DC_MAP_BGR24;
- default:
- return -EINVAL;
}
}
dc->di = ipu_di_get_num(di);
map = ipu_bus_format_to_map(bus_format);
- if (map < 0) {
- dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
- return map;
- }
/*
* In interlaced mode we need more counters to create the asymmetric
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
di->id, sig->mode.hactive, sig->mode.vactive);
- if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
- return -EINVAL;
-
dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
#define DMFC_DP_CHAN_6B_24 16
#define DMFC_DP_CHAN_6F_29 24
-#define DMFC_FIFO_SIZE_64 (3 << 3)
-#define DMFC_FIFO_SIZE_128 (2 << 3)
-#define DMFC_FIFO_SIZE_256 (1 << 3)
-#define DMFC_FIFO_SIZE_512 (0 << 3)
-
-#define DMFC_SEGMENT(x) ((x & 0x7) << 0)
-#define DMFC_BURSTSIZE_128 (0 << 6)
-#define DMFC_BURSTSIZE_64 (1 << 6)
-#define DMFC_BURSTSIZE_32 (2 << 6)
-#define DMFC_BURSTSIZE_16 (3 << 6)
-
struct dmfc_channel_data {
int ipu_channel;
unsigned long channel_reg;
struct dmfc_channel {
unsigned slots;
- unsigned slotmask;
- unsigned segment;
- int burstsize;
struct ipu_soc *ipu;
struct ipu_dmfc_priv *priv;
const struct dmfc_channel_data *data;
struct device *dev;
struct dmfc_channel channels[DMFC_NUM_CHANNELS];
struct mutex mutex;
- unsigned long bandwidth_per_slot;
void __iomem *base;
int use_count;
};
}
EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
-static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
- int segment, int burstsize)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- u32 val, field;
-
- dev_dbg(priv->dev,
- "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
- slots, segment, dmfc->data->ipu_channel);
-
- switch (slots) {
- case 1:
- field = DMFC_FIFO_SIZE_64;
- break;
- case 2:
- field = DMFC_FIFO_SIZE_128;
- break;
- case 4:
- field = DMFC_FIFO_SIZE_256;
- break;
- case 8:
- field = DMFC_FIFO_SIZE_512;
- break;
- default:
- return -EINVAL;
- }
-
- switch (burstsize) {
- case 16:
- field |= DMFC_BURSTSIZE_16;
- break;
- case 32:
- field |= DMFC_BURSTSIZE_32;
- break;
- case 64:
- field |= DMFC_BURSTSIZE_64;
- break;
- case 128:
- field |= DMFC_BURSTSIZE_128;
- break;
- }
-
- field |= DMFC_SEGMENT(segment);
-
- val = readl(priv->base + dmfc->data->channel_reg);
-
- val &= ~(0xff << dmfc->data->shift);
- val |= field << dmfc->data->shift;
-
- writel(val, priv->base + dmfc->data->channel_reg);
-
- dmfc->slots = slots;
- dmfc->segment = segment;
- dmfc->burstsize = burstsize;
- dmfc->slotmask = ((1 << slots) - 1) << segment;
-
- return 0;
-}
-
-static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
- unsigned long bandwidth)
-{
- int slots = 1;
-
- while (slots * priv->bandwidth_per_slot < bandwidth)
- slots *= 2;
-
- return slots;
-}
-
-static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
-{
- unsigned slotmask_need, slotmask_used = 0;
- int i, segment = 0;
-
- slotmask_need = (1 << slots) - 1;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++)
- slotmask_used |= priv->channels[i].slotmask;
-
- while (slotmask_need <= 0xff) {
- if (!(slotmask_used & slotmask_need))
- return segment;
-
- slotmask_need <<= 1;
- segment++;
- }
-
- return -EBUSY;
-}
-
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- int i;
-
- dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
- dmfc->slots, dmfc->segment);
-
- mutex_lock(&priv->mutex);
-
- if (!dmfc->slots)
- goto out;
-
- dmfc->slotmask = 0;
- dmfc->slots = 0;
- dmfc->segment = 0;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++)
- priv->channels[i].slotmask = 0;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
- if (priv->channels[i].slots > 0) {
- priv->channels[i].segment =
- dmfc_find_slots(priv, priv->channels[i].slots);
- priv->channels[i].slotmask =
- ((1 << priv->channels[i].slots) - 1) <<
- priv->channels[i].segment;
- }
- }
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
- if (priv->channels[i].slots > 0)
- ipu_dmfc_setup_channel(&priv->channels[i],
- priv->channels[i].slots,
- priv->channels[i].segment,
- priv->channels[i].burstsize);
- }
-out:
- mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
-
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
- unsigned long bandwidth_pixel_per_second, int burstsize)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
- int segment = -1, ret = 0;
-
- dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
- bandwidth_pixel_per_second / 1000000,
- dmfc->data->ipu_channel);
-
- ipu_dmfc_free_bandwidth(dmfc);
-
- mutex_lock(&priv->mutex);
-
- if (slots > 8) {
- ret = -EBUSY;
- goto out;
- }
-
- /* For the MEM_BG channel, first try to allocate twice the slots */
- if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
- segment = dmfc_find_slots(priv, slots * 2);
- else if (slots < 2)
- /* Always allocate at least 128*4 bytes (2 slots) */
- slots = 2;
-
- if (segment >= 0)
- slots *= 2;
- else
- segment = dmfc_find_slots(priv, slots);
- if (segment < 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
-
-out:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
void ipu_dmfc_put(struct dmfc_channel *dmfc)
{
- ipu_dmfc_free_bandwidth(dmfc);
}
EXPORT_SYMBOL_GPL(ipu_dmfc_put);
priv->channels[i].priv = priv;
priv->channels[i].ipu = ipu;
priv->channels[i].data = &dmfcdata[i];
- }
-
- writel(0x0, priv->base + DMFC_WR_CHAN);
- writel(0x0, priv->base + DMFC_DP_CHAN);
- /*
- * We have a total bandwidth of clkrate * 4pixel divided
- * into 8 slots.
- */
- priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
-
- dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
- priv->bandwidth_per_slot / 1000000);
+ if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
+ dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
+ dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
+ priv->channels[i].slots = 2;
+ }
+ writel(0x00000050, priv->base + DMFC_WR_CHAN);
+ writel(0x00005654, priv->base + DMFC_DP_CHAN);
writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
writel(0x00000003, priv->base + DMFC_GENERAL1);
*
* * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
* * muxless: Dual GPUs but only one of them is connected to outputs.
- * The other one is merely used to offload rendering, its results
- * are copied over PCIe into the framebuffer. On Linux this is
- * supported with DRI PRIME.
+ * The other one is merely used to offload rendering, its results
+ * are copied over PCIe into the framebuffer. On Linux this is
+ * supported with DRI PRIME.
*
* Hybrid graphics started to appear in the late Naughties and were initially
* all muxed. Newer laptops moved to a muxless architecture for cost reasons.
* * OFF: Power off the device not in use.
* * ON: Power on the device not in use.
* * IGD: Switch to the integrated graphics device.
- * Power on the integrated GPU if necessary, power off the discrete GPU.
- * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
- * have opened device files of the GPUs or the audio client. If the
- * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
- * and /dev/snd/controlC1 to identify processes blocking the switch.
+ * Power on the integrated GPU if necessary, power off the discrete GPU.
+ * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
+ * have opened device files of the GPUs or the audio client. If the
+ * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
+ * and /dev/snd/controlC1 to identify processes blocking the switch.
* * DIS: Switch to the discrete graphics device.
* * DIGD: Delayed switch to the integrated graphics device.
- * This will perform the switch once the last user space process has
- * closed the device files of the GPUs and the audio client.
+ * This will perform the switch once the last user space process has
+ * closed the device files of the GPUs and the audio client.
* * DDIS: Delayed switch to the discrete graphics device.
* * MIGD: Mux-only switch to the integrated graphics device.
- * Does not remap console or change the power state of either gpu.
- * If the integrated GPU is currently off, the screen will turn black.
- * If it is on, the screen will show whatever happens to be in VRAM.
- * Either way, the user has to blindly enter the command to switch back.
+ * Does not remap console or change the power state of either gpu.
+ * If the integrated GPU is currently off, the screen will turn black.
+ * If it is on, the screen will show whatever happens to be in VRAM.
+ * Either way, the user has to blindly enter the command to switch back.
* * MDIS: Mux-only switch to the discrete graphics device.
*
* For GPUs whose power state is controlled by the driver's runtime pm,
struct elo_priv *priv = hid_get_drvdata(hdev);
hid_hw_stop(hdev);
- flush_workqueue(wq);
+ cancel_delayed_work_sync(&priv->work);
kfree(priv);
}
#define MT_QUIRK_ALWAYS_VALID (1 << 4)
#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
+#define MT_QUIRK_CONFIDENCE (1 << 7)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
#define MT_QUIRK_NO_AREA (1 << 9)
#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
bool inrange_state; /* is the finger in proximity of the sensor? */
+ bool confidence_state; /* is the touch made by a finger? */
};
struct mt_class {
return 1;
case HID_DG_CONFIDENCE:
if (cls->name == MT_CLS_WIN_8 &&
- field->application == HID_DG_TOUCHPAD) {
- cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
- cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
- }
+ field->application == HID_DG_TOUCHPAD)
+ cls->quirks |= MT_QUIRK_CONFIDENCE;
mt_store_field(usage, td, hi);
return 1;
case HID_DG_TIPSWITCH:
return;
if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
+ int active;
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
struct input_mt *mt = input->mt;
return;
}
+ if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
+ s->confidence_state = 1;
+ active = (s->touch_state || s->inrange_state) &&
+ s->confidence_state;
+
input_mt_slot(input, slotnum);
- input_mt_report_slot_state(input, MT_TOOL_FINGER,
- s->touch_state || s->inrange_state);
- if (s->touch_state || s->inrange_state) {
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
+ if (active) {
/* this finger is in proximity of the sensor */
int wide = (s->w > s->h);
/* divided by two to match visual scale of touch */
td->curdata.touch_state = value;
break;
case HID_DG_CONFIDENCE:
+ if (quirks & MT_QUIRK_CONFIDENCE)
+ td->curdata.confidence_state = value;
if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
td->curvalid = value;
break;
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
+ /* Ntrig Panel */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_NTRIG, 0x1b05) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
goto inval;
} else if (uref->usage_index >= field->report_count)
goto inval;
-
- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
- uref->usage_index + uref_multi->num_values > field->report_count))
- goto inval;
}
+ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+ uref->usage_index + uref_multi->num_values > field->report_count))
+ goto inval;
+
switch (cmd) {
case HIDIOCGUSAGE:
uref->value = field->value[uref->usage_index];
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/ctype.h>
#include <linux/i8k.h>
static DEFINE_MUTEX(i8k_mutex);
static char bios_version[4];
+static char bios_machineid[16];
static struct device *i8k_hwmon_dev;
static u32 i8k_hwmon_flags;
static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
+static bool disallow_fan_type_call;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
#if IS_ENABLED(CONFIG_I8K)
-static bool restricted;
+static bool restricted = true;
module_param(restricted, bool, 0);
-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
+MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
static bool power_status;
module_param(power_status, bool, 0600);
-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
#endif
static uint fan_mult;
/*
* Read the fan type.
*/
-static int i8k_get_fan_type(int fan)
+static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
+ if (disallow_fan_type_call)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(®s) ? : regs.eax & 0xff;
}
+static int i8k_get_fan_type(int fan)
+{
+ /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
+ static int types[2] = { INT_MIN, INT_MIN };
+
+ if (types[fan] == INT_MIN)
+ types[fan] = _i8k_get_fan_type(fan);
+
+ return types[fan];
+}
+
/*
* Read the fan nominal rpm for specific fan speed.
*/
switch (cmd) {
case I8K_BIOS_VERSION:
+ if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
+ !isdigit(bios_version[2]))
+ return -EINVAL;
+
val = (bios_version[0] << 16) |
(bios_version[1] << 8) | bios_version[2];
break;
case I8K_MACHINE_ID:
- memset(buff, 0, 16);
- strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
- sizeof(buff));
+ if (restricted && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ memset(buff, 0, sizeof(buff));
+ strlcpy(buff, bios_machineid, sizeof(buff));
break;
case I8K_FN_STATUS:
seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
I8K_PROC_FMT,
bios_version,
- i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
cpu_temp,
left_fan, right_fan, left_speed, right_speed,
ac_power, fn_key);
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_type_call &&
+ (index == 9 || index == 12))
+ return 0;
if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
return 0;
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
- /* First fan attributes, if fan type is OK */
- err = i8k_get_fan_type(0);
+ /* First fan attributes, if fan status or type is OK */
+ err = i8k_get_fan_status(0);
+ if (err < 0)
+ err = i8k_get_fan_type(0);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
- /* Second fan attributes, if fan type is OK */
- err = i8k_get_fan_type(1);
+ /* Second fan attributes, if fan status or type is OK */
+ err = i8k_get_fan_status(1);
+ if (err < 0)
+ err = i8k_get_fan_type(1);
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+/*
+ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
+ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
+ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+ */
+static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
{
- /*
- * CPU fan speed going up and down on Dell Studio XPS 8000
- * for unknown reasons.
- */
.ident = "Dell Studio XPS 8000",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
},
},
{
- /*
- * CPU fan speed going up and down on Dell Studio XPS 8100
- * for unknown reasons.
- */
.ident = "Dell Studio XPS 8100",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
},
},
+ {
+ .ident = "Dell Inspiron 580",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
+ },
+ },
{ }
};
/*
* Get DMI information
*/
- if (!dmi_check_system(i8k_dmi_table) ||
- dmi_check_system(i8k_blacklist_dmi_table)) {
+ if (!dmi_check_system(i8k_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
+ disallow_fan_type_call = true;
+
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
+ strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ sizeof(bios_machineid));
/*
* Get SMM Dell signature
*/
static int read_registers(struct fam15h_power_data *data)
{
- int this_cpu, ret, cpu;
int core, this_core;
cpumask_var_t mask;
+ int ret, cpu;
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
if (!ret)
memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
get_online_cpus();
- this_cpu = smp_processor_id();
/*
* Choose the first online core of each compute unit, and then
cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
}
- if (cpumask_test_cpu(this_cpu, mask))
- do_read_registers_on_cu(data);
+ on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
- smp_call_function_many(mask, do_read_registers_on_cu, data, true);
put_online_cpus();
-
free_cpumask_var(mask);
return 0;
int kind;
u32 flags;
- int update_interval; /* in milliseconds */
+ unsigned int update_interval; /* in milliseconds */
u8 config_orig; /* Original configuration register value */
u8 convrate_orig; /* Original conversion rate register value */
if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
- * buffer. As such zero-out the buffer so that we don't end
- * up with stale data.
- *
- * Since the tracer is still enabled drvdata::buf
- * can't be NULL.
+ * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+ * so we don't have to explicitly clear it. Also, since the
+ * tracer is still enabled drvdata::buf can't be NULL.
*/
- memset(drvdata->buf, 0, drvdata->size);
tmc_etr_enable_hw(drvdata);
} else {
/*
*/
vaddr = drvdata->vaddr;
paddr = drvdata->paddr;
- drvdata->buf = NULL;
+ drvdata->buf = drvdata->vaddr = NULL;
}
drvdata->reading = false;
int i;
bool found = false;
struct coresight_node *node;
- struct coresight_connection *conn;
/* An activated sink has been found. Enqueue the element */
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
- if (_coresight_build_path(conn->child_dev, path) == 0) {
+ struct coresight_device *child_dev = csdev->conns[i].child_dev;
+
+ if (child_dev && _coresight_build_path(child_dev, path) == 0) {
found = true;
break;
}
struct list_head *coresight_build_path(struct coresight_device *csdev)
{
struct list_head *path;
+ int rc;
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
INIT_LIST_HEAD(path);
- if (_coresight_build_path(csdev, path)) {
+ rc = _coresight_build_path(csdev, path);
+ if (rc) {
kfree(path);
- path = NULL;
+ return ERR_PTR(rc);
}
return path;
goto out;
path = coresight_build_path(csdev);
- if (!path) {
+ if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
+ ret = PTR_ERR(path);
goto out;
}
struct platform_device *mux_pdev;
#endif
struct platform_device *tco_pdev;
+
+ /*
+ * If set to true the host controller registers are reserved for
+ * ACPI AML use. Protected by acpi_lock.
+ */
+ bool acpi_reserved;
+ struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC (1 << 0)
int ret = 0, xact = 0;
struct i801_priv *priv = i2c_get_adapdata(adap);
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved) {
+ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
+ }
+
pm_runtime_get_sync(&priv->pci_dev->dev);
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
out:
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
return ret;
}
priv->tco_pdev = pdev;
}
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+{
+ struct i801_priv *priv = handler_context;
+ struct pci_dev *pdev = priv->pci_dev;
+ acpi_status status;
+
+ /*
+ * Once BIOS AML code touches the OpRegion we warn and inhibit any
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+
+ /*
+ * BIOS is accessing the host controller so prevent it from
+ * suspending automatically from now on.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ }
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ status = acpi_os_read_port(address, (u32 *)value, bits);
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (adev) {
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+ NULL, priv);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ }
+
+ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (!adev)
+ return;
+
+ acpi_remove_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved)
+ pm_runtime_put(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
priv->adapter.dev.parent = &dev->dev;
ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
priv->adapter.retries = 3;
+ mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
switch (dev->device) {
return -ENODEV;
}
- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
- if (err) {
+ if (i801_acpi_probe(priv))
return -ENODEV;
- }
err = pcim_iomap_regions(dev, 1 << SMBBAR,
dev_driver_string(&dev->dev));
"Failed to request SMBus region 0x%lx-0x%Lx\n",
priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
+ i801_acpi_remove(priv);
return err;
}
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+ i801_acpi_remove(priv);
return err;
}
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
+ i801_acpi_remove(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
return result;
for (i = 0; i < length; i++) {
- /* for the last byte TWSI_CTL_AAK must not be set */
- if (i + 1 == length)
+ /*
+ * For the last byte to receive TWSI_CTL_AAK must not be set.
+ *
+ * A special case is I2C_M_RECV_LEN where we don't know the
+ * additional length yet. If recv_len is set we assume we're
+ * not reading the final byte and therefore need to set
+ * TWSI_CTL_AAK.
+ */
+ if ((i + 1 == length) && !(recv_len && i == 0))
final_read = true;
/* clear iflg to allow next event */
data[i] = octeon_i2c_data_read(i2c);
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(i2c->dev,
- "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
- __func__, data[i]);
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
- }
length += data[i];
}
}
}
+ idx = 0;
+
do {
if (msgs[idx].len == 0) {
ret = -EINVAL;
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
- goto unprepare_div_clk;
+ goto disable_div_clk;
}
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
*/
-int __init
-i2c_register_board_info(int busnum,
- struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
int status;
mux->data.idle_in_use = true;
/* map address from "reg" if exists */
- if (of_address_to_resource(np, 0, &res)) {
+ if (of_address_to_resource(np, 0, &res) == 0) {
mux->data.reg_size = resource_size(&res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(mux->data.reg))
.remove = i2c_mux_reg_remove,
.driver = {
.name = "i2c-mux-reg",
+ .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
},
};
mutex_lock(&st->buf_lock);
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
- if (ret)
+ if (ret < 0)
goto error_ret;
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
break;
case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
- if (ret)
+ if (ret < 0)
goto error_ret;
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
ret = IIO_VAL_INT_PLUS_MICRO;
int st_accel_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
}
static const struct iio_trigger_ops st_accel_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
#else
st = iio_priv(indio_dev);
- st->reg = devm_regulator_get(&spi->dev, "vref");
- if (!IS_ERR_OR_NULL(st->reg)) {
+ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
return ret;
st->vref_mv = ret / 1000;
} else {
+ /* Any other error indicates that the regulator does exist */
+ if (PTR_ERR(st->reg) != -ENODEV)
+ return PTR_ERR(st->reg);
/* Use internal reference */
st->vref_mv = 2500;
}
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ s64 timestamp;
- /* If we have a status register, check if this IRQ came from us */
- if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
- u8 status;
-
- len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.addr_stat_drdy,
- &status);
- if (len < 0)
- dev_err(sdata->dev, "could not read channel status\n");
-
- /*
- * If this was not caused by any channels on this sensor,
- * return IRQ_NONE
- */
- if (!(status & (u8)indio_dev->active_scan_mask[0]))
- return IRQ_NONE;
- }
+ /* If we do timetamping here, do it before reading the values */
+ if (sdata->hw_irq_trigger)
+ timestamp = sdata->hw_timestamp;
+ else
+ timestamp = iio_get_time_ns();
len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
if (len < 0)
goto st_sensors_get_buffer_element_error;
iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
- pf->timestamp);
+ timestamp);
st_sensors_get_buffer_element_error:
iio_trigger_notify_done(indio_dev->trig);
if (err < 0)
return err;
+ /* Disable DRDY, this might be still be enabled after reboot. */
+ err = st_sensors_set_dataready_irq(indio_dev, false);
+ if (err < 0)
+ return err;
+
if (sdata->current_fullscale) {
err = st_sensors_set_fullscale(indio_dev,
sdata->current_fullscale->num);
else
drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
+ /* Flag to the poll function that the hardware trigger is in use */
+ sdata->hw_irq_trigger = enable;
+
/* Enable/Disable the interrupt generator for data ready. */
err = st_sensors_write_data_with_mask(indio_dev,
sdata->sensor_settings->drdy_irq.addr,
#include <linux/iio/common/st_sensors.h>
#include "st_sensors_core.h"
+/**
+ * st_sensors_irq_handler() - top half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_handler(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ /* Get the time stamp as close in time as possible */
+ sdata->hw_timestamp = iio_get_time_ns();
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_thread(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * If this trigger is backed by a hardware interrupt and we have a
+ * status register, check if this IRQ came from us
+ */
+ if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+ u8 status;
+
+ ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ &status);
+ if (ret < 0) {
+ dev_err(sdata->dev, "could not read channel status\n");
+ goto out_poll;
+ }
+ /*
+ * the lower bits of .active_scan_mask[0] is directly mapped
+ * to the channels on the sensor: either bit 0 for
+ * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+ * gyroscopes or magnetometers. No sensor use more than 3
+ * channels, so cut the other status bits here.
+ */
+ status &= 0x07;
+
+ /*
+ * If this was not caused by any channels on this sensor,
+ * return IRQ_NONE
+ */
+ if (!indio_dev->active_scan_mask)
+ return IRQ_NONE;
+ if (!(status & (u8)indio_dev->active_scan_mask[0]))
+ return IRQ_NONE;
+ }
+
+out_poll:
+ /* It's our IRQ: proceed to handle the register polling */
+ iio_trigger_poll_chained(p);
+ return IRQ_HANDLED;
+}
+
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
return -ENOMEM;
}
+ iio_trigger_set_drvdata(sdata->trig, indio_dev);
+ sdata->trig->ops = trigger_ops;
+ sdata->trig->dev.parent = sdata->dev;
+
irq = sdata->get_irq_data_ready(indio_dev);
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
/*
sdata->sensor_settings->drdy_irq.addr_stat_drdy)
irq_trig |= IRQF_SHARED;
- err = request_threaded_irq(irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
+ /* Let's create an interrupt thread masking the hard IRQ here */
+ irq_trig |= IRQF_ONESHOT;
+
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ st_sensors_irq_handler,
+ st_sensors_irq_thread,
irq_trig,
sdata->trig->name,
sdata->trig);
goto iio_trigger_free;
}
- iio_trigger_set_drvdata(sdata->trig, indio_dev);
- sdata->trig->ops = trigger_ops;
- sdata->trig->dev.parent = sdata->dev;
-
err = iio_trigger_register(sdata->trig);
if (err < 0) {
dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
}
EXPORT_SYMBOL(st_sensors_deallocate_trigger);
+int st_sensors_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+ if (indio != indio_dev)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(st_sensors_validate_device);
+
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
MODULE_LICENSE("GPL v2");
config STX104
tristate "Apex Embedded Systems STX104 DAC driver"
- depends on X86 && ISA
+ depends on X86 && ISA_BUS_API
help
Say yes here to build support for the 2-channel DAC on the Apex
Embedded Systems STX104 integrated analog PC/104 card. The base port
device_for_each_child_node(st->dev, child) {
ret = fwnode_property_read_u32(child, "reg", ®);
- if (ret || reg > ARRAY_SIZE(st->channel_modes))
+ if (ret || reg >= ARRAY_SIZE(st->channel_modes))
continue;
ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
int st_gyro_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
}
static const struct iio_trigger_ops st_gyro_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
#else
struct am2315_sensor_data sensor_data;
ret = am2315_read_data(data, &sensor_data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
+ if (ret < 0)
goto err;
- }
mutex_lock(&data->lock);
if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
},
{ /* IIO_HUMIDITYRELATIVE channel */
.shift = 8,
- .mask = 2,
+ .mask = 3,
},
};
dev_err(&client->dev, "cannot read high byte measurement");
return ret;
}
- val = ret << 6;
+ val = ret << 8;
ret = i2c_smbus_read_byte(client);
if (ret < 0) {
dev_err(&client->dev, "cannot read low byte measurement");
return ret;
}
- val |= ret >> 2;
+ val |= ret;
return val;
}
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_TEMP) {
- *val = 165;
- *val2 = 65536 >> 2;
+ *val = 165000;
+ *val2 = 65536;
return IIO_VAL_FRACTIONAL;
} else {
- *val = 0;
- *val2 = 10000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 100;
+ *val2 = 65536;
+ return IIO_VAL_FRACTIONAL;
}
break;
case IIO_CHAN_INFO_OFFSET:
- *val = -3971;
- *val2 = 879096;
+ *val = -15887;
+ *val2 = 515151;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
};
static const struct bmi160_odr bmi160_accel_odr[] = {
- {0x01, 0, 78125},
- {0x02, 1, 5625},
- {0x03, 3, 125},
- {0x04, 6, 25},
- {0x05, 12, 5},
+ {0x01, 0, 781250},
+ {0x02, 1, 562500},
+ {0x03, 3, 125000},
+ {0x04, 6, 250000},
+ {0x05, 12, 500000},
{0x06, 25, 0},
{0x07, 50, 0},
{0x08, 100, 0},
{0x08, 100, 0},
{0x09, 200, 0},
{0x0A, 400, 0},
- {0x0B, 8000, 0},
+ {0x0B, 800, 0},
{0x0C, 1600, 0},
{0x0D, 3200, 0},
};
return regmap_update_bits(data->regmap,
bmi160_regs[t].config,
- bmi160_odr_table[t].tbl[i].bits,
- bmi160_regs[t].config_odr_mask);
+ bmi160_regs[t].config_odr_mask,
+ bmi160_odr_table[t].tbl[i].bits);
}
static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
int i;
acpi_status status;
union acpi_object *cpm;
+ int ret;
status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
if (ACPI_FAILURE(status))
}
}
}
-
+ ret = cpm->package.count;
kfree(buffer.pointer);
- return cpm->package.count;
+ return ret;
}
static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
/* Prevent the module from being removed whilst attached to a trigger */
__module_get(pf->indio_dev->info->driver_module);
+
+ /* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
+ if (pf->irq < 0)
+ goto out_put_module;
+
+ /* Request irq */
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
- if (ret < 0) {
- module_put(pf->indio_dev->info->driver_module);
- return ret;
- }
+ if (ret < 0)
+ goto out_put_irq;
+ /* Enable trigger in driver */
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
- module_put(pf->indio_dev->info->driver_module);
+ goto out_free_irq;
}
return ret;
+
+out_free_irq:
+ free_irq(pf->irq, pf);
+out_put_irq:
+ iio_trigger_put_irq(trig, pf->irq);
+out_put_module:
+ module_put(pf->indio_dev->info->driver_module);
+ return ret;
}
static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->dev.parent = &client->dev;
indio_dev->info = &apds9960_info;
indio_dev->name = APDS9960_DRV_NAME;
indio_dev->channels = apds9960_channels;
int ret;
if (!readval)
- bh1780_write(bh1780, (u8)reg, (u8)writeval);
+ return bh1780_write(bh1780, (u8)reg, (u8)writeval);
ret = bh1780_read(bh1780, (u8)reg);
if (ret < 0)
indio_dev->dev.parent = &client->dev;
indio_dev->info = &bh1780_info;
- indio_dev->name = id->name;
+ indio_dev->name = "bh1780";
indio_dev->channels = bh1780_channels;
indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
static int bh1780_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
int ret;
ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
static int bh1780_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
int ret;
ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
{
.type = IIO_PROXIMITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.scan_index = MAX44000_SCAN_INDEX_PRX,
.scan_type = {
.sign = 'u',
int st_magn_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
}
static const struct iio_trigger_ops st_magn_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
#else
if (ret < 0)
return ret;
if (chip_id != id->driver_data) {
- dev_err(&client->dev, "bad chip id. expected %x got %x\n",
- BMP280_CHIP_ID, chip_id);
+ dev_err(&client->dev, "bad chip id. expected %lx got %x\n",
+ id->driver_data, chip_id);
return -EINVAL;
}
int st_press_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_press_buffer_setup_ops);
}
#include <linux/iio/common/st_sensors.h>
#include "st_pressure.h"
+#define MCELSIUS_PER_CELSIUS 1000
+
+/* Default pressure sensitivity */
#define ST_PRESS_LSB_PER_MBAR 4096UL
#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
ST_PRESS_LSB_PER_MBAR)
+
+/* Default temperature sensitivity */
#define ST_PRESS_LSB_PER_CELSIUS 480UL
-#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
- ST_PRESS_LSB_PER_CELSIUS)
+#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
+
#define ST_PRESS_NUMBER_DATA_CHANNELS 1
/* FULLSCALE */
+#define ST_PRESS_FS_AVL_1100MB 1100
#define ST_PRESS_FS_AVL_1260MB 1260
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_PRESS_LPS331AP_PW_MASK 0x80
#define ST_PRESS_LPS331AP_FS_ADDR 0x23
#define ST_PRESS_LPS331AP_FS_MASK 0x30
-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
#define ST_PRESS_LPS331AP_BDU_MASK 0x04
#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
/* CUSTOM VALUES FOR LPS001WP SENSOR */
+
+/* LPS001WP pressure resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
+/* LPS001WP temperature resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
+
#define ST_PRESS_LPS001WP_WAI_EXP 0xba
#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
#define ST_PRESS_LPS001WP_ODR_MASK 0x30
#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
#define ST_PRESS_LPS001WP_PW_ADDR 0x20
#define ST_PRESS_LPS001WP_PW_MASK 0x40
+#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
+ (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
#define ST_PRESS_LPS001WP_BDU_MASK 0x04
#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
#define ST_PRESS_LPS25H_PW_ADDR 0x20
#define ST_PRESS_LPS25H_PW_MASK 0x80
-#define ST_PRESS_LPS25H_FS_ADDR 0x00
-#define ST_PRESS_LPS25H_FS_MASK 0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
#define ST_PRESS_LPS25H_BDU_ADDR 0x20
#define ST_PRESS_LPS25H_BDU_MASK 0x04
#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS25H_MULTIREAD_BIT true
-#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
.storagebits = 16,
.endianness = IIO_LE,
},
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
.modified = 0,
},
{
},
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SCALE),
.modified = 0,
},
IIO_CHAN_SOFT_TIMESTAMP(1)
.addr = ST_PRESS_LPS331AP_FS_ADDR,
.mask = ST_PRESS_LPS331AP_FS_MASK,
.fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS331AP datasheet.
+ */
[0] = {
.num = ST_PRESS_FS_AVL_1260MB,
- .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
- .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
- .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
},
},
},
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = 0,
+ .fs_avl = {
+ /*
+ * Pressure and temperature resolution values
+ * as defined in table 3 of LPS001WP datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1100MB,
+ .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
+ .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
+ },
+ },
},
.bdu = {
.addr = ST_PRESS_LPS001WP_BDU_ADDR,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
.fs = {
- .addr = ST_PRESS_LPS25H_FS_ADDR,
- .mask = ST_PRESS_LPS25H_FS_MASK,
.fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS25H datasheet.
+ */
[0] = {
.num = ST_PRESS_FS_AVL_1260MB,
- .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
- .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
- .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
},
},
},
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
-
switch (ch->type) {
case IIO_PRESSURE:
+ *val = 0;
*val2 = press_data->current_fullscale->gain;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_TEMP:
+ *val = MCELSIUS_PER_CELSIUS;
*val2 = press_data->current_fullscale->gain2;
- break;
+ return IIO_VAL_FRACTIONAL;
default:
err = -EINVAL;
goto read_error;
}
- return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_OFFSET:
switch (ch->type) {
case IIO_TEMP:
- *val = 425;
- *val2 = 10;
+ *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
+ press_data->current_fullscale->gain2;
+ *val2 = MCELSIUS_PER_CELSIUS;
break;
default:
err = -EINVAL;
static const struct iio_trigger_ops st_press_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
#else
struct delayed_work work;
u32 tune_cap;
+ u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
u8 buf[2] ____cacheline_aligned;
};
.type = IIO_PROXIMITY,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_PROCESSED),
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
.scan_type = {
.sign = 'u',
/* storm out of range */
if (*val == AS3935_DATA_MASK)
return -EINVAL;
- *val *= 1000;
+
+ if (m == IIO_CHAN_INFO_PROCESSED)
+ *val *= 1000;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000;
break;
default:
return -EINVAL;
ret = as3935_read(st, AS3935_DATA, &val);
if (ret)
goto err_read;
- val &= AS3935_DATA_MASK;
- val *= 1000;
- iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
+ st->buffer[0] = val & AS3935_DATA_MASK;
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+ pf->timestamp);
err_read:
iio_trigger_notify_done(indio_dev->trig);
{
int ret = 0;
struct net_device *old_net_dev;
+ enum ib_gid_type old_gid_type;
/* in rdma_cap_roce_gid_table, this funciton should be protected by a
* sleep-able lock.
}
old_net_dev = table->data_vec[ix].attr.ndev;
+ old_gid_type = table->data_vec[ix].attr.gid_type;
if (old_net_dev && old_net_dev != attr->ndev)
dev_put(old_net_dev);
/* if modify_gid failed, just delete the old gid */
attr = &zattr;
table->data_vec[ix].context = NULL;
}
- if (default_gid)
- table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+ if (default_gid) {
+ table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+ if (action == GID_TABLE_WRITE_ACTION_DEL)
+ table->data_vec[ix].attr.gid_type = old_gid_type;
+ }
if (table->data_vec[ix].attr.ndev &&
table->data_vec[ix].attr.ndev != old_net_dev)
dev_hold(table->data_vec[ix].attr.ndev);
for (ix = 0; ix < table->sz; ix++)
if (table->data_vec[ix].attr.ndev == ndev)
- if (!del_gid(ib_dev, port, table, ix, false))
+ if (!del_gid(ib_dev, port, table, ix,
+ !!(table->data_vec[ix].props &
+ GID_TABLE_ENTRY_DEFAULT)))
deleted = true;
write_unlock_irq(&table->rwlock);
work->cm_event.event = IB_CM_USER_ESTABLISHED;
/* Check if the device started its remove_one */
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down) {
queue_delayed_work(cm.wq, &work->work, 0);
} else {
kfree(work);
ret = -ENODEV;
}
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
out:
return ret;
complete(&id_priv->comp);
}
-static int cma_disable_callback(struct rdma_id_private *id_priv,
- enum rdma_cm_state state)
-{
- mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != state) {
- mutex_unlock(&id_priv->handler_mutex);
- return -EINVAL;
- }
- return 0;
-}
-
struct rdma_cm_id *rdma_create_id(struct net *net,
rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps,
struct rdma_cm_event event;
int ret = 0;
+ mutex_lock(&id_priv->handler_mutex);
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+ id_priv->state != RDMA_CM_CONNECT) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
- return 0;
+ id_priv->state != RDMA_CM_DISCONNECT))
+ goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
- struct rdma_id_private *listen_id, *conn_id;
+ struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event;
struct net_device *net_dev;
int offset, ret;
goto net_dev_put;
}
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN) {
ret = -ECONNABORTED;
- goto net_dev_put;
+ goto err1;
}
memset(&event, 0, sizeof event);
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+ goto out;
memset(&event, 0, sizeof event);
switch (iw_event->event) {
return ret;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return ret;
}
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
- int ret;
+ int ret = -ECONNABORTED;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
listen_id = cm_id->context;
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
- return -ECONNABORTED;
+
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN)
+ goto out;
/* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+ goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
struct rdma_id_private *id_priv;
struct cma_multicast *mc = multicast->context;
struct rdma_cm_event event;
- int ret;
+ int ret = 0;
id_priv = mc->id_priv;
- if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
- cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+ id_priv->state != RDMA_CM_ADDR_RESOLVED)
+ goto out;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
return 0;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return 0;
}
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) {
- if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
- if (!err) {
- mc->igmp_joined = true;
- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ if (!err)
+ mc->igmp_joined = true;
}
} else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
if (err || port_attr->subnet_prefix)
return err;
+ if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+ return 0;
+
err = ib_query_gid(device, port_num, 0, &gid, NULL);
if (err)
return err;
goto err_mad;
}
- if (ib_add_ibnl_clients()) {
+ ret = ib_add_ibnl_clients();
+ if (ret) {
pr_warn("Couldn't register ibnl clients\n");
goto err_sa;
}
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
__func__, msg_seq);
- return -EINVAL;
+ return -EINVAL;
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
/* Now, check to see if there are any methods still in use */
if (!check_method_table(method)) {
/* If not, release management method table */
- kfree(method);
- class->method_table[mgmt_class] = NULL;
- /* Any management classes left ? */
+ kfree(method);
+ class->method_table[mgmt_class] = NULL;
+ /* Any management classes left ? */
if (!check_class_table(class)) {
/* If not, release management class table */
kfree(class);
static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320);
/*
* Counters added by extended set
&port_pma_attr_port_rcv_data.attr.attr,
&port_pma_attr_port_xmit_packets.attr.attr,
&port_pma_attr_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
&port_pma_attr_ext_port_xmit_data.attr.attr,
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_xmit_packets.attr.attr,
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
u8 port_num)
{
- struct attribute_group *hsag = NULL;
+ struct attribute_group *hsag;
struct rdma_hw_stats *stats;
- int i = 0, ret;
+ int i, ret;
stats = device->alloc_hw_stats(device, port_num);
return;
if (!stats->names || stats->num_counters <= 0)
- goto err;
+ goto err_free_stats;
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
hsag = kzalloc(sizeof(*hsag) +
- // 1 extra for the lifespan config entry
- sizeof(void *) * (stats->num_counters + 1),
+ sizeof(void *) * (stats->num_counters + 2),
GFP_KERNEL);
if (!hsag)
- return;
+ goto err_free_stats;
ret = device->get_hw_stats(device, stats, port_num,
stats->num_counters);
if (ret != stats->num_counters)
- goto err;
+ goto err_free_hsag;
stats->timestamp = jiffies;
hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
if (!hsag->attrs[i])
goto err;
+ sysfs_attr_init(hsag->attrs[i]);
}
/* treat an error here as non-fatal */
hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+ if (hsag->attrs[i])
+ sysfs_attr_init(hsag->attrs[i]);
if (port) {
struct kobject *kobj = &port->kobj;
return;
err:
- kfree(stats);
for (; i >= 0; i--)
kfree(hsag->attrs[i]);
+err_free_hsag:
kfree(hsag);
+err_free_stats:
+ kfree(stats);
return;
}
struct ib_srq *srq = NULL;
struct ib_qp *qp;
char *buf;
- struct ib_qp_init_attr attr;
+ struct ib_qp_init_attr attr = {};
struct ib_uverbs_ex_create_qp_resp resp;
int ret;
ah_attr->grh.dgid = sgid;
if (!rdma_cap_eth_ah(device, port_num)) {
- ret = ib_find_cached_gid_by_port(device, &dgid,
- IB_GID_TYPE_IB,
- port_num, NULL,
- &gid_index);
- if (ret)
- return ret;
+ if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+ ret = ib_find_cached_gid_by_port(device, &dgid,
+ IB_GID_TYPE_IB,
+ port_num, NULL,
+ &gid_index);
+ if (ret)
+ return ret;
+ } else {
+ gid_index = 0;
+ }
}
ah_attr->grh.sgid_index = (u8) gid_index;
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct cpu_mask_set *set = &dd->affinity->proc;
- char buf[1024];
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
goto done;
}
cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
&dd->affinity->rcv_intr.mask :
&dd->affinity->rcv_intr.used));
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
- hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+ hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+ cpumask_pr_args(intrs));
/*
* If we don't have a NUMA node requested, preference is towards
if (node == -1)
node = dd->node;
node_mask = cpumask_of_node(node);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+ hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+ cpumask_pr_args(node_mask));
/* diff will hold all unused cpus */
cpumask_andnot(diff, &set->mask, &set->used);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
- hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+ hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
/* get cpumask of available CPUs on preferred NUMA */
cpumask_and(mask, diff, node_mask);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+ hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
/*
* At first, we don't want to place processes on the same
cpumask_andnot(diff, &set->mask, &set->used);
cpumask_andnot(mask, diff, node_mask);
}
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+ hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+ cpumask_pr_args(mask));
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) /* empty */
static void dc_start(struct hfi1_devdata *);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np);
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
/*
* Error interrupt table entry. This is used as input to the interrupt
}
reset_neighbor_info(ppd);
- if (ppd->mgmt_allowed)
- remove_full_mgmt_pkey(ppd);
/* disable the port */
clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
__func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
ppd->pkeys[2] = FULL_MGMT_P_KEY;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
}
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
- ppd->pkeys[2] = 0;
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ if (ppd->pkeys[2] != 0) {
+ ppd->pkeys[2] = 0;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+ }
}
/*
* save first 2 flits in the packet that caused
* the error
*/
- dd->err_info_rcvport.packet_flit1 = hdr0;
- dd->err_info_rcvport.packet_flit2 = hdr1;
+ dd->err_info_rcvport.packet_flit1 = hdr0;
+ dd->err_info_rcvport.packet_flit2 = hdr1;
}
switch (info) {
case 1:
return 0;
}
+ /*
+ * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+ * pkey table can be configured properly if the HFI unit is connected
+ * to switch port with MgmtAllowed=NO
+ */
+ clear_full_mgmt_pkey(ppd);
+
return set_link_state(ppd, HLS_DN_POLL);
}
u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
& SEND_LEN_CHECK1_LEN_VL15_MASK) <<
SEND_LEN_CHECK1_LEN_VL15_SHIFT;
- int i;
+ int i, j;
u32 thres;
for (i = 0; i < ppd->vls_supported; i++) {
sc_mtu_to_threshold(dd->vld[i].sc,
dd->vld[i].mtu,
dd->rcd[0]->rcvhdrqentsize));
- sc_set_cr_threshold(dd->vld[i].sc, thres);
+ for (j = 0; j < INIT_SC_PER_VL; j++)
+ sc_set_cr_threshold(
+ pio_select_send_context_vl(dd, j, i),
+ thres);
}
thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
sc_mtu_to_threshold(dd->vld[15].sc,
hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
}
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
#define C_MAX_NAME 13 /* 12 chars + one for /0 */
{
unsigned long flags;
struct hfi1_devdata *tmp, *peer = NULL;
+ struct hfi1_asic_data *asic_data;
int ret = 0;
+ /* pre-allocate the asic structure in case we are the first device */
+ asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+ if (!asic_data)
+ return -ENOMEM;
+
spin_lock_irqsave(&hfi1_devs_lock, flags);
/* Find our peer device */
list_for_each_entry(tmp, &hfi1_dev_list, list) {
}
if (peer) {
+ /* use already allocated structure */
dd->asic_data = peer->asic_data;
+ kfree(asic_data);
} else {
- dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
- if (!dd->asic_data) {
- ret = -ENOMEM;
- goto done;
- }
+ dd->asic_data = asic_data;
mutex_init(&dd->asic_data->asic_resource_mutex);
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
return ret;
}
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
+ if (uctxt)
+ return -EINVAL;
+
if (copy_from_user(&uinfo,
(struct hfi1_user_info __user *)arg,
sizeof(uinfo)))
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
dd->rcvhdrtail_dummy_physaddr);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
}
for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = NULL;
+ struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
memset(data, 0, size);
}
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+{
+ struct ib_event event;
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.rdi.ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
+}
+
static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
{
struct ib_mad_send_buf *send_buf;
}
if (changed) {
- struct ib_event event;
-
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
+ hfi1_event_pkey_change(dd, port);
}
+
return 0;
}
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+
#endif /* _HFI1_MAD_H */
/* counter is reset if occupancy count changes */
if (reg != reg_prev)
loop = 0;
- if (loop > 500) {
+ if (loop > 50000) {
/* timed out - bounce the link */
dd_dev_err(dd,
"%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
pio_map_free(m);
}
+/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+ u32 thres;
+
+ thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+ 50),
+ sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+ dd->vld[i].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
/*
* pio_map_init - called when #vls change
* @dd: hfi1_devdata
if (!newmap->map[i])
goto bail;
newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
- /* assign send contexts */
+ /*
+ * assign send contexts and
+ * adjust credit return threshold
+ */
for (j = 0; j < sz; j++) {
- if (dd->kernel_send_context[scontext])
+ if (dd->kernel_send_context[scontext]) {
newmap->map[i]->ksc[j] =
dd->kernel_send_context[scontext];
+ set_threshold(dd, scontext, i);
+ }
if (++scontext >= first_scontext +
vl_scontexts[i])
/* wrap back to first send context */
if (ppd->qsfp_info.cache_valid) {
if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
- sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+ snprintf(lenstr, sizeof(lenstr), "%dM ",
+ cache[QSFP_MOD_LEN_OFFS]);
power_byte = cache[QSFP_MOD_PWR_OFFS];
sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
return ret;
}
-const char *print_u64_array(
- struct trace_seq *p,
- u64 *arr, int len)
-{
- int i;
- const char *ret = trace_seq_buffer_ptr(p);
-
- for (i = 0; i < len; i++)
- trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
- trace_seq_putc(p, 0);
- return ret;
-}
-
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
- bool sc4_bit = has_sc4_bit(packet);
- u8 sc;
+ u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
u32 bth1;
int is_mcast;
struct ib_grh *grh = NULL;
*/
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl, sc5;
+ u8 sl;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
}
if (qp->ibqp.qp_num > 1) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u16 slid;
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
slid = be16_to_cpu(hdr->lrh[3]);
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp *smp = (struct opa_smp *)data;
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
goto drop;
}
wc.slid = be16_to_cpu(hdr->lrh[3]);
- sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc |= sc4_bit;
- wc.sl = ibp->sc_to_sl[sc];
+ wc.sl = ibp->sc_to_sl[sc5];
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
struct sdma_mmu_node *node;
};
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
struct sdma_mmu_node {
struct mmu_rb_node rb;
*/
SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
req->tidoffset, req->tidoffset / req->omfactor,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
req->tidoffset / req->omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
}
done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
- unsigned long flags;
- spin_lock_irqsave(&qp->s_lock, flags);
write_seqlock(&dev->iowait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
}
out:
write_sequnlock(&dev->iowait_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
return tx;
}
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
+ __must_hold(&qp->slock)
{
struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv;
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768
+#define I40IW_MAX_PAGES_PER_FMR 512
+#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460
cqp_init_info.scratch_array = cqp->scratch_array;
status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
if (status) {
- i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
- status, maj_err, min_err);
+ i40iw_pr_err("cqp init status %d\n", status);
goto exit;
}
status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1;
+ props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0;
}
info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->length;
+ info->remote_access = true;
cqp_info->cqp_cmd = OP_ALLOC_STAG;
cqp_info->post_sq = 1;
cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
mutex_unlock(&iwdev->pbl_mutex);
- if (!status)
+ if (status)
goto err1;
if (palloc->level != I40IW_LEVEL_1)
struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
struct i40iw_fast_reg_stag_info info;
+ memset(&info, 0, sizeof(info));
info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
info.access_rights |= i40iw_get_user_access(flags);
info.stag_key = reg_wr(ib_wr)->key & 0xff;
info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
info.total_len = iwmr->ibmr.length;
+ info.reg_addr_pa = *(u64 *)palloc->level1.addr;
info.first_pm_pbl_index = palloc->level1.idx;
info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
+ if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+ info.chunk_size = 1;
+
if (page_shift == 21)
info.page_size = 1; /* 2M page */
{
struct i40iw_cq *iwcq;
struct i40iw_cq_uk *ukcq;
- enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+ unsigned long flags;
+ enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
iwcq = (struct i40iw_cq *)ibcq;
ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_NEXT_COMP)
- cq_notify = IW_CQ_COMPL_EVENT;
+ if (notify_flags == IB_CQ_SOLICITED)
+ cq_notify = IW_CQ_COMPL_SOLICITED;
+ spin_lock_irqsave(&iwcq->lock, flags);
ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
return 0;
}
ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
ah->av.ib.g_slid = ah_attr->src_path_bits;
+ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
if (ah_attr->ah_flags & IB_AH_GRH) {
ah->av.ib.g_slid |= 0x80;
ah->av.ib.gid_index = ah_attr->grh.sgid_index;
!(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
--ah->av.ib.stat_rate;
}
- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
return &ah->ibah;
}
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock);
if (ret)
- goto out;
+ goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
- if (ret)
- ib_destroy_ah(ah);
+ if (!ret)
+ return 0;
+ out:
+ spin_lock(&tun_qp->tx_lock);
+ tun_qp->tx_ix_tail++;
+ spin_unlock(&tun_qp->tx_lock);
+ tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+ ib_destroy_ah(ah);
return ret;
}
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+
+ spin_lock(&sqp->tx_lock);
+ sqp->tx_ix_tail++;
+ spin_unlock(&sqp->tx_lock);
+ sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- if (ret)
- ib_destroy_ah(ah);
+ ib_destroy_ah(ah);
return ret;
}
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
+ if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+ return ERR_PTR(-EINVAL);
+
if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
(flow_attr->type != IB_FLOW_ATTR_NORMAL))
return ERR_PTR(-EOPNOTSUPP);
u32 max_pages;
struct mlx4_mr mmr;
struct ib_umem *umem;
- void *pages_alloc;
+ size_t page_map_size;
};
struct mlx4_ib_mw {
struct mlx4_ib_mr *mr,
int max_pages)
{
- int size = max_pages * sizeof(u64);
- int add_size;
int ret;
- add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+ /* Ensure that size is aligned to DMA cacheline
+ * requirements.
+ * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+ * so page_map_size will never cross PAGE_SIZE.
+ */
+ mr->page_map_size = roundup(max_pages * sizeof(u64),
+ MLX4_MR_PAGES_ALIGN);
- mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
- if (!mr->pages_alloc)
+ /* Prevent cross page boundary allocation. */
+ mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+ if (!mr->pages)
return -ENOMEM;
- mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
mr->page_map = dma_map_single(device->dma_device, mr->pages,
- size, DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
if (dma_mapping_error(device->dma_device, mr->page_map)) {
ret = -ENOMEM;
}
return 0;
-err:
- kfree(mr->pages_alloc);
+err:
+ free_page((unsigned long)mr->pages);
return ret;
}
{
if (mr->pages) {
struct ib_device *device = mr->ibmr.device;
- int size = mr->max_pages * sizeof(u64);
dma_unmap_single(device->dma_device, mr->page_map,
- size, DMA_TO_DEVICE);
- kfree(mr->pages_alloc);
+ mr->page_map_size, DMA_TO_DEVICE);
+ free_page((unsigned long)mr->pages);
mr->pages = NULL;
}
}
mr->npages = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
- sizeof(u64) * mr->max_pages,
- DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
- sizeof(u64) * mr->max_pages,
- DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
return rc;
}
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
- sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
udata, 0, &qp, gfp);
- if (err)
+ if (err) {
+ kfree(qp);
return ERR_PTR(err);
+ }
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
int eqn;
int err;
- if (entries < 0)
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
if (check_cq_create_flags(attr->flags))
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
pma_cnt_ext->port_xmit_data =
cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
transmitted_ib_multicast.octets) >> 2);
- pma_cnt_ext->port_xmit_data =
+ pma_cnt_ext->port_rcv_data =
cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
received_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_packets =
MLX5_CAP_ETH(dev->mdev, scatter_fcs))
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+ if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = L1_CACHE_BYTES;
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
- if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ /*
+ * We don't want to expose information from the PCI bar that is located
+ * after 4096 bytes, so if the arch only supports larger pages, let's
+ * pretend we don't support reading the HCA's core clock. This is also
+ * forced by mmap function.
+ */
+ if (PAGE_SIZE <= 4096 &&
+ field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset =
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+ return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
break;
case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
-
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr)
+ u32 path_flags, const struct ib_qp_attr *attr,
+ bool alt)
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = attr->pkey_index;
+ path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+ attr->pkey_index);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >=
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
} else {
- path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
- 0;
+ path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+ path->fl_free_ar |=
+ (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
path->rlid = cpu_to_be16(ah->dlid);
path->grh_mlid = ah->src_path_bits & 0x7f;
if (ah->ah_flags & IB_AH_GRH)
path->port = port;
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = attr->timeout << 3;
+ path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = attr->pkey_index;
+ context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
/* todo implement counter_index functionality */
if (attr_mask & IB_QP_AV) {
err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
- attr_mask, 0, attr);
+ attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_ALT_PATH) {
err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
&context->alt_path,
- attr->alt_port_num, attr_mask, 0, attr);
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ 0, attr, true);
if (err)
goto out;
}
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
-
- } else {
- return 0;
+ } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+ return MLX5_FENCE_MODE_FENCE;
}
+
+ return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+ qp_attr->alt_pkey_index =
+ be16_to_cpu(context->alt_path.pkey_index);
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
- qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+ qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
qp_attr->port_num = context->pri_path.port;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
switch (cmd.type) {
case QIB_CMD_ASSIGN_CTXT:
+ if (rcd) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
if (ret)
goto bail;
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/workqueue.h>
#include <linux/list.h>
int i;
int flags;
dma_addr_t pa;
- DEFINE_DMA_ATTRS(attrs);
-
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (!can_do_mlock())
return -EPERM;
/* wrap to first map page, invert bit 0 */
offset = qpt->incr | ((offset & 1) ^ 1);
}
- /* there can be no bits at shift and below */
- WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+ /* there can be no set bits in low-order QoS bits */
+ WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
qpn = mk_qpn(qpt, map, offset);
}
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
+ __releases(&qp->s_lock)
+ __releases(&qp->s_hlock)
+ __releases(&qp->r_lock)
+ __acquires(&qp->r_lock)
+ __acquires(&qp->s_hlock)
+ __acquires(&qp->s_lock)
{
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_mig_state = IB_MIG_MIGRATED;
- if (qp->s_ack_queue)
- memset(
- qp->s_ack_queue,
- 0,
- rvt_max_atomic(rdi) *
- sizeof(*qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0;
* initialization that is needed.
*/
priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
- if (!priv)
+ if (IS_ERR(priv)) {
+ ret = priv;
goto bail_qp;
+ }
qp->priv = priv;
qp->timeout_jiffies =
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
!rdi->driver_f.quiesce_qp ||
!rdi->driver_f.notify_error_qp ||
!rdi->driver_f.mtu_from_qp ||
- !rdi->driver_f.mtu_to_path_mtu ||
- !rdi->driver_f.shut_down_port ||
- !rdi->driver_f.cap_mask_chg)
+ !rdi->driver_f.mtu_to_path_mtu)
return -EINVAL;
break;
IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_FLAG_DEV_ADDR_SET = 13,
IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+ IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
{
struct net_device *dev = to_net_dev(d);
int ret;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+ return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
return false;
- netif_addr_lock(priv->dev);
+ netif_addr_lock_bh(priv->dev);
/* The subnet prefix may have changed, update it now so we won't have
* to do it later
search_gid.global.interface_id = priv->local_gid.global.interface_id;
- netif_addr_unlock(priv->dev);
+ netif_addr_unlock_bh(priv->dev);
err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
priv->dev, &port, &index);
- netif_addr_lock(priv->dev);
+ netif_addr_lock_bh(priv->dev);
if (search_gid.global.interface_id !=
priv->local_gid.global.interface_id)
}
out:
- netif_addr_unlock(priv->dev);
+ netif_addr_unlock_bh(priv->dev);
return ret;
}
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
struct ipoib_dev_priv *child_priv;
struct net_device *netdev = priv->dev;
- netif_addr_lock(netdev);
+ netif_addr_lock_bh(netdev);
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
- netif_addr_unlock(netdev);
+ netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
down_read(&priv->vlan_rwsem);
union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
int ret = 0;
- netif_addr_lock(dev);
+ netif_addr_lock_bh(dev);
/* Make sure the QPN, reserved and subnet prefix match the current
* lladdr, it also makes sure the lladdr is unicast.
gid->global.interface_id == 0)
ret = -EINVAL;
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
return ret;
}
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
+ /* mark interface in the middle of destruction */
+ set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();
return;
}
priv->local_lid = port_attr.lid;
- netif_addr_lock(dev);
+ netif_addr_lock_bh(dev);
if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
return;
}
- netif_addr_unlock(dev);
+ netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
if (!rtnl_trylock())
return restart_syscall();
{
unsigned int sg_offset = 0;
- state->desc = req->indirect_desc;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
struct scatterlist *sg;
int i;
- state->desc = req->indirect_desc;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
+ state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
else if (dev->use_fmr)
int mr_page_shift, p;
u64 max_pages_per_mr;
- srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+ srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return;
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->global_mr))
goto err_pd;
- } else {
- srp_dev->global_mr = NULL;
}
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
*/
qp_init->cap.max_send_wr = srp_sq_size / 2;
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
- qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd,
- sdev->device->attrs.max_sge);
+ qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
qp_init->port_num = ch->sport->port;
ch->qp = ib_create_qp(sdev->pd, qp_init);
SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
SRPT_DEF_SG_TABLESIZE = 128,
+ SRPT_DEF_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
case XTYPE_XBOXONE:
packet->data[0] = 0x09; /* activate rumble */
- packet->data[1] = 0x08;
+ packet->data[1] = 0x00;
packet->data[2] = xpad->odata_serial++;
- packet->data[3] = 0x08; /* continuous effect */
- packet->data[4] = 0x00; /* simple rumble mode */
- packet->data[5] = 0x03; /* L and R actuator only */
- packet->data[6] = 0x00; /* TODO: LT actuator */
- packet->data[7] = 0x00; /* TODO: RT actuator */
+ packet->data[3] = 0x09;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x0F;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
- packet->data[10] = 0x80; /* length of pulse */
- packet->data[11] = 0x00; /* stop period of pulse */
+ packet->data[10] = 0xFF;
+ packet->data[11] = 0x00;
packet->data[12] = 0x00;
packet->len = 13;
packet->pending = true;
int ep_irq_in_idx;
int i, error;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+ return -ENODEV;
+
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
break;
}
- if (xpad_device[i].xtype == XTYPE_XBOXONE &&
- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
- /*
- * The Xbox One controller lists three interfaces all with the
- * same interface class, subclass and protocol. Differentiate by
- * interface number.
- */
- return -ENODEV;
- }
-
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
if (!xpad)
return -ENOMEM;
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
xpad->xtype = XTYPE_XBOX360W;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+ xpad->xtype = XTYPE_XBOXONE;
else
xpad->xtype = XTYPE_XBOX360;
} else {
xpad->mapping |= MAP_STICKS_TO_NULL;
}
+ if (xpad->xtype == XTYPE_XBOXONE &&
+ intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+ * interface number.
+ */
+ error = -ENODEV;
+ goto err_free_in_urb;
+ }
+
error = xpad_init_output(intf, xpad);
if (error)
goto err_free_in_urb;
case 5:
etd->hw_version = 3;
break;
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 13:
- case 14:
+ case 6 ... 14:
etd->hw_version = 4;
break;
default:
return -ENXIO;
}
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
/* Check if the device is present */
response = ~VMMOUSE_PROTO_MAGIC;
VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
- release_region(VMMOUSE_PROTO_PORT, 4);
+ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
return -ENXIO;
- }
if (set_properties) {
psmouse->vendor = VMMOUSE_VENDOR;
psmouse->model = version;
}
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return 0;
}
psmouse_reset(psmouse);
input_unregister_device(priv->abs_dev);
kfree(priv);
- release_region(VMMOUSE_PROTO_PORT, 4);
}
/**
struct input_dev *rel_dev = psmouse->dev, *abs_dev;
int error;
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
psmouse_reset(psmouse);
error = vmmouse_enable(psmouse);
if (error)
- goto release_region;
+ return error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
abs_dev = input_allocate_device();
kfree(priv);
psmouse->private = NULL;
-release_region:
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return error;
}
static void rmi_function_of_probe(struct rmi_function *fn)
{
char of_name[9];
+ struct device_node *node = fn->rmi_dev->xport->dev->of_node;
snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
fn->fd.function_number);
- fn->dev.of_node = of_find_node_by_name(
- fn->rmi_dev->xport->dev->of_node, of_name);
+ fn->dev.of_node = of_get_child_by_name(node, of_name);
}
#else
static inline void rmi_function_of_probe(struct rmi_function *fn)
struct rmi_device *rmi_dev = fn->rmi_dev;
int ret;
int offset;
- u8 buf[14];
+ u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
int clip_x_low = 0;
offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
- if (item->reg_size > 14) {
- dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
- item->reg_size);
+ if (item->reg_size > sizeof(buf)) {
+ dev_err(&fn->dev,
+ "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+ sizeof(buf), item->reg_size);
return -ENODEV;
}
return -ENODEV;
}
+ ts->regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
+ if (IS_ERR(ts->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(ts->regmap);
+ }
+
error = of_property_read_u32_index(np, "syscon", 1, ®);
if (error < 0) {
dev_err(dev, "no offset in syscon\n");
ts->bit = BIT(bit);
- ts->regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(ts->regmap)) {
- dev_err(dev, "cannot get parent's regmap\n");
- return PTR_ERR(ts->regmap);
- }
-
return 0;
}
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-#define W8001_MAX_LENGTH 11
+#define W8001_MAX_LENGTH 13
#define W8001_LEAD_MASK 0x80
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
bool touch = data[0] & (1 << i);
input_mt_slot(dev, i);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
if (touch) {
x = (data[6 * i + 1] << 7) | data[6 * i + 2];
y = (data[6 * i + 3] << 7) | data[6 * i + 4];
w8001->idx = 0;
parse_multi_touch(w8001);
break;
+
+ default:
+ /*
+ * ThinkPad X60 Tablet PC (pen only device) sometimes
+ * sends invalid data packets that are larger than
+ * W8001_PKTLEN_TPCPEN. Let's start over again.
+ */
+ if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
+ w8001->idx = 0;
}
return IRQ_HANDLED;
0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y,
0, touch.y, 0, 0);
+ input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
strlcat(basename, " 2FG", basename_sz);
if (w8001->max_pen_x && w8001->max_pen_y)
break;
}
+ devid = e->devid;
DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
hid, uid,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
- devid = e->devid;
flags = e->flags;
ret = add_acpi_hid_device(hid, uid, &devid, false);
break;
}
+ /*
+ * Order is important here to make sure any unity map requirements are
+ * fulfilled. The unity mappings are created and written to the device
+ * table during the amd_iommu_init_api() call.
+ *
+ * After that we call init_device_table_dma() to make sure any
+ * uninitialized DTE will block DMA, and in the end we flush the caches
+ * of all IOMMUs to make sure the changes to the device table are
+ * active.
+ */
+ ret = amd_iommu_init_api();
+
init_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
- ret = amd_iommu_init_api();
-
if (!ret)
print_iommu_info();
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
}
}
- iommu_flush_write_buffer(iommu);
- iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
#ifdef CONFIG_INTEL_IOMMU_SVM
#endif
}
+ /*
+ * Now that qi is enabled on all iommus, set the root entry and flush
+ * caches. This is required on some Intel X58 chipsets, otherwise the
+ * flush_context function will loop forever and the boot hangs.
+ */
+ for_each_active_iommu(iommu, drhd) {
+ iommu_flush_write_buffer(iommu);
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ }
+
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
struct dmar_domain *domain;
- u16 did;
+ int did;
if (!iommu)
continue;
- for (did = 0; did < 0xffff; did++) {
- domain = get_iommu_domain(iommu, did);
+ for (did = 0; did < cap_ndoms(iommu->cap); did++) {
+ domain = get_iommu_domain(iommu, (u16)did);
if (!domain)
continue;
/* Try replenishing IOVAs by flushing rcache. */
flushed_rcache = true;
+ preempt_disable();
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
+ preempt_enable();
goto retry;
}
bool can_insert = false;
unsigned long flags;
- cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_full(cpu_rcache->loaded)) {
iova_magazine_push(cpu_rcache->loaded, iova_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ put_cpu_ptr(rcache->cpu_rcaches);
if (mag_to_free) {
iova_magazine_free_pfns(mag_to_free, iovad);
bool has_pfn = false;
unsigned long flags;
- cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_empty(cpu_rcache->loaded)) {
iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ put_cpu_ptr(rcache->cpu_rcaches);
return iova_pfn;
}
dte_addr = virt_to_phys(rk_domain->dt);
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
- rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
- gic_map_to_vpe(intr, vpe);
+ gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
/* verify that it doesn't conflict with an IPI irq */
if (test_bit(spec->hwirq, ipi_resrv))
return -EBUSY;
+
+ hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
+
+ return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ &gic_level_irq_controller,
+ NULL);
} else {
base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
if (base_hwirq == gic_shared_intrs) {
&gic_level_irq_controller,
NULL);
if (ret)
- return ret;
+ goto error;
}
return 0;
+
+error:
+ irq_domain_free_irqs_parent(d, virq, nr_irqs);
+ return ret;
}
void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
switch (bus_token) {
case DOMAIN_BUS_IPI:
is_ipi = d->bus_token == bus_token;
- return to_of_node(d->fwnode) == node && is_ipi;
+ return (!node || to_of_node(d->fwnode) == node) && is_ipi;
break;
default:
return 0;
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
+ led_cdev->flags &= ~LED_BLINK_SW;
return;
}
if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
return;
}
return;
}
+ led_cdev->flags |= LED_BLINK_SW;
mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
+ led_cdev->flags &= ~LED_BLINK_SW;
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
enum led_brightness brightness)
{
/*
- * In case blinking is on delay brightness setting
+ * If software blink is active, delay brightness setting
* until the next timer tick.
*/
- if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
+ if (led_cdev->flags & LED_BLINK_SW) {
/*
* If we need to disable soft blinking delegate this to the
* work queue task to avoid problems in case we are called
#include <linux/sched.h>
#include <linux/leds.h>
#include <linux/reboot.h>
+#include <linux/suspend.h>
#include "../leds.h"
static int panic_heartbeats;
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_pm_notifier(struct notifier_block *nb,
+ unsigned long pm_event, void *unused)
+{
+ int rc;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ led_trigger_unregister(&heartbeat_led_trigger);
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ rc = led_trigger_register(&heartbeat_led_trigger);
+ if (rc)
+ pr_err("could not re-register heartbeat trigger\n");
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static int heartbeat_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *unused)
{
return NOTIFY_DONE;
}
+static struct notifier_block heartbeat_pm_nb = {
+ .notifier_call = heartbeat_pm_notifier,
+};
+
static struct notifier_block heartbeat_reboot_nb = {
.notifier_call = heartbeat_reboot_notifier,
};
atomic_notifier_chain_register(&panic_notifier_list,
&heartbeat_panic_nb);
register_reboot_notifier(&heartbeat_reboot_nb);
+ register_pm_notifier(&heartbeat_pm_nb);
}
return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_pm_notifier(&heartbeat_pm_nb);
unregister_reboot_notifier(&heartbeat_reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list,
&heartbeat_panic_nb);
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
const struct mcb_device_id *found_id;
+ struct module *carrier_mod;
+ int ret;
found_id = mcb_match_id(mdrv->id_table, mdev);
if (!found_id)
return -ENODEV;
- return mdrv->probe(mdev, found_id);
+ carrier_mod = mdev->dev.parent->driver->owner;
+ if (!try_module_get(carrier_mod))
+ return -EINVAL;
+
+ get_device(dev);
+ ret = mdrv->probe(mdev, found_id);
+ if (ret)
+ module_put(carrier_mod);
+
+ return ret;
}
static int mcb_remove(struct device *dev)
{
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
+ struct module *carrier_mod;
mdrv->remove(mdev);
+ carrier_mod = mdev->dev.parent->driver->owner;
+ module_put(carrier_mod);
+
put_device(&mdev->dev);
return 0;
V4L2_DV_BT_CAP_CUSTOM)
};
-static inline const struct v4l2_dv_timings_cap *
-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+/*
+ * Return the DV timings capabilities for the requested sink pad. As a special
+ * case, pad value -1 returns the capabilities for the currently selected input.
+ */
+static const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
{
- return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
- &adv7604_timings_cap_analog;
+ if (pad == -1) {
+ struct adv76xx_state *state = to_state(sd);
+
+ pad = state->selected_input;
+ }
+
+ switch (pad) {
+ case ADV76XX_PAD_HDMI_PORT_A:
+ case ADV7604_PAD_HDMI_PORT_B:
+ case ADV7604_PAD_HDMI_PORT_C:
+ case ADV7604_PAD_HDMI_PORT_D:
+ return &adv76xx_timings_cap_digital;
+
+ case ADV7604_PAD_VGA_RGB:
+ case ADV7604_PAD_VGA_COMP:
+ default:
+ return &adv7604_timings_cap_analog;
+ }
}
const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
- adv76xx_get_dv_timings_cap(sd),
+ adv76xx_get_dv_timings_cap(sd, -1),
adv76xx_check_dv_timings, NULL))
continue;
if (vtotal(bt) != stdi->lcf + 1)
return -EINVAL;
return v4l2_enum_dv_timings_cap(timings,
- adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
+ adv76xx_get_dv_timings_cap(sd, timings->pad),
+ adv76xx_check_dv_timings, NULL);
}
static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
struct adv76xx_state *state = to_state(sd);
+ unsigned int pad = cap->pad;
if (cap->pad >= state->source_pad)
return -EINVAL;
- *cap = *adv76xx_get_dv_timings_cap(sd);
+ *cap = *adv76xx_get_dv_timings_cap(sd, pad);
+ cap->pad = pad;
+
return 0;
}
static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
- is_digital_input(sd) ? 250000 : 1000000,
- adv76xx_check_dv_timings, NULL);
+ v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
+ is_digital_input(sd) ? 250000 : 1000000,
+ adv76xx_check_dv_timings, NULL);
}
static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
bt = &timings->bt;
- if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+ if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
adv76xx_check_dv_timings, NULL))
return -ERANGE;
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
- goto err_unregister_v4l2_dev;
+ goto err_free_controls;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
if (__get_user(p, &up->menu_info))
return -EFAULT;
- umenus = compat_ptr(p);
- if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
- kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
- if (kmenus == NULL)
- return -EFAULT;
- kp->menu_info = kmenus;
-
- if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
+ kp->menu_info = compat_ptr(p);
return 0;
}
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus = kp->menu_info;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
__put_user(kp->menu_count, &up->menu_count))
if (__clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
- if (kp->menu_count == 0)
- return 0;
-
- if (get_user(p, &up->menu_info))
- return -EFAULT;
- umenus = compat_ptr(p);
-
- if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
return 0;
}
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
if (__get_user(p, &up->data))
return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- kdata = compat_alloc_user_space(kp->size);
- if (kdata == NULL)
- return -EFAULT;
- kp->data = kdata;
-
- if (copy_in_user(kdata, udata, kp->size))
- return -EFAULT;
+ kp->data = compat_ptr(p);
return 0;
}
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata = kp->data;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
- if (kp->size == 0)
- return 0;
-
- if (get_user(p, &up->data))
- return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- if (copy_in_user(udata, kdata, kp->size))
- return -EFAULT;
-
return 0;
}
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct uvc_fh *handle = file->private_data;
union {
struct uvc_xu_control_mapping xmap;
struct uvc_xu_control_query xqry;
} karg;
void __user *up = compat_ptr(arg);
- mm_segment_t old_fs;
long ret;
switch (cmd) {
case UVCIOC_CTRL_MAP32:
- cmd = UVCIOC_CTRL_MAP;
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
+ if (ret)
+ return ret;
+ ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+ if (ret)
+ return ret;
+ ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+ if (ret)
+ return ret;
+
break;
case UVCIOC_CTRL_QUERY32:
- cmd = UVCIOC_CTRL_QUERY;
ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
+ if (ret)
+ return ret;
+ ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
+ if (ret)
+ return ret;
+ ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+ if (ret)
+ return ret;
break;
default:
return -ENOIOCTLCMD;
}
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = video_ioctl2(file, cmd, (unsigned long)&karg);
- set_fs(old_fs);
-
- if (ret < 0)
- return ret;
-
- switch (cmd) {
- case UVCIOC_CTRL_MAP:
- ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
- break;
-
- case UVCIOC_CTRL_QUERY:
- ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
- break;
- }
-
return ret;
}
#endif
* The determine_valid_ioctls() call already should ensure
* that this can never happen, but just in case...
*/
- if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+ if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
return -ENOTTY;
if (ops->vidioc_cropcap)
/*
* Media Controller ancillary functions
*
- * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
* Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (C) 2006-2010 Nokia Corporation
* Copyright (c) 2016 Intel Corporation.
gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
- GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
+ GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
p->cycle2cyclesamecsen);
break;
case MAX77620:
fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
+ break;
default:
return -EINVAL;
}
break;
case MAX77620:
fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
+ break;
default:
return -EINVAL;
}
/* synchronized under device mutex */
if (waitqueue_active(&cl->wait)) {
cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
- wake_up_interruptible(&cl->wait);
+ wake_up(&cl->wait);
}
}
goto idata_err;
}
- if (!idata->buf_bytes)
+ if (!idata->buf_bytes) {
+ idata->buf = NULL;
return idata;
+ }
idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro))
+ if (gpio_is_valid(gpio_ro)) {
ret = mmc_gpio_request_ro(mmc, gpio_ro);
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto out;
- } else {
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+ gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
}
if (gpio_is_valid(gpio_cd))
/* detect availability of ELM module. Won't be present pre-OMAP4 */
info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
- if (!info->elm_of_node)
- dev_dbg(dev, "ti,elm-id not in DT\n");
+ if (!info->elm_of_node) {
+ info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+ if (!info->elm_of_node)
+ dev_dbg(dev, "ti,elm-id not in DT\n");
+ }
/* select ecc-scheme for NAND */
if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
*/
static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
{
- struct kstat stat;
int err, minor;
+ struct path path;
+ struct kstat stat;
/* Probably this is an MTD character device node path */
- err = vfs_stat(mtd_dev, &stat);
+ err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vfs_getattr(&path, &stat);
+ path_put(&path);
if (err)
return ERR_PTR(err);
return ERR_PTR(-EINVAL);
minor = MINOR(stat.rdev);
+
if (minor & 1)
/*
* Just do not think the "/dev/mtdrX" devices support is need,
int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
struct ubi_volume *vol = ubi->volumes[idx];
struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
- if (err) {
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+ ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
- data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf + offset, 0xFF, len);
memcpy(ubi->peb_buf + offset, buf, len);
+ data_size = offset + len;
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+ if (err) {
+ mutex_unlock(&ubi->buf_mutex);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
+ struct path path;
struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = vfs_stat(pathname, &stat);
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return ERR_PTR(error);
+
+ error = vfs_getattr(&path, &stat);
+ path_put(&path);
if (error)
return ERR_PTR(error);
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ 0, 0, 0, 0, 0, 0
+};
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+ MULTICAST_LACPDU_ADDR;
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
}
}
+static int __agg_active_ports(struct aggregator *agg)
+{
+ struct port *port;
+ int active = 0;
+
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (port->is_enabled)
+ active++;
+ }
+
+ return active;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
+ int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
- if (aggregator->num_of_ports) {
+ if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
- bandwidth = aggregator->num_of_ports;
+ bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
- bandwidth = aggregator->num_of_ports * 10;
+ bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
- bandwidth = aggregator->num_of_ports * 100;
+ bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
- bandwidth = aggregator->num_of_ports * 1000;
+ bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
- bandwidth = aggregator->num_of_ports * 2500;
+ bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_10000MBPS:
- bandwidth = aggregator->num_of_ports * 10000;
+ bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_20000MBPS:
- bandwidth = aggregator->num_of_ports * 20000;
+ bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_40000MBPS:
- bandwidth = aggregator->num_of_ports * 40000;
+ bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_56000MBPS:
- bandwidth = aggregator->num_of_ports * 56000;
+ bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
- bandwidth = aggregator->num_of_ports * 100000;
+ bandwidth = nports * 100000;
break;
default:
bandwidth = 0; /* to silence the compiler */
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
- if (curr->num_of_ports > best->num_of_ports)
+ if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
- if (curr->num_of_ports < best->num_of_ports)
+ if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
/*FALLTHROUGH*/
if (!port)
return 0;
- return netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev);
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev))
+ return 1;
+ }
+
+ return 0;
}
/**
agg->is_active = 0;
- if (agg->num_of_ports && agg_device_up(agg))
+ if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
* answering partner.
*/
if (active && active->lag_ports &&
- active->lag_ports->is_enabled &&
+ __agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
- aggregator->partner_system = null_mac_addr;
+ eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
if (aggregator) {
ad_clear_agg(aggregator);
- aggregator->aggregator_mac_address = null_mac_addr;
+ eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
- if (temp_aggregator->num_of_ports == 0) {
+ if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
+ struct aggregator *agg;
struct port *port;
+ bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
+ agg = __get_first_agg(port);
+ ad_agg_selection_logic(agg, &dummy);
+
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
- if (active->num_of_ports < bond->params.min_links) {
+ if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
}
/* check for initial state */
+ new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
---help---
- This driver supports the Geschwister Schneider USB/CAN devices.
+ This driver supports the Geschwister Schneider and bytewerk.org
+ candleLight USB CAN interfaces USB/CAN devices
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
+ - Kvaser Mini PCI Express 2xHS
+ - Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
- Kvaser USBcan Rugged ("USBcan Rev B")
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
*
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
*
* Many thanks to all socketcan devs!
*
#define USB_GSUSB_1_VENDOR_ID 0x1d50
#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
#define GSUSB_ENDPOINT_IN 1
#define GSUSB_ENDPOINT_OUT 2
static const struct usb_device_id gs_usb_table[] = {
{ USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+ USB_CANDLELIGHT_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
MODULE_DESCRIPTION(
"Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
MODULE_LICENSE("GPL v2");
#define USB_CAN_R_PRODUCT_ID 39
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
}
/* Kvaser USBCan-II devices */
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
/* USBCANII family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+ if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
- phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;
unsigned int rx_ringsz;
unsigned int rxbuf_size;
- struct page *rx_page;
- unsigned int rx_page_offset;
- unsigned int rx_frag_size;
-
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
}
}
-static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
-{
- struct sk_buff *skb;
- struct page *page;
-
- if (alx->rx_frag_size > PAGE_SIZE)
- return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
-
- page = alx->rx_page;
- if (!page) {
- alx->rx_page = page = alloc_page(gfp);
- if (unlikely(!page))
- return NULL;
- alx->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + alx->rx_page_offset,
- alx->rx_frag_size);
- if (likely(skb)) {
- alx->rx_page_offset += alx->rx_frag_size;
- if (alx->rx_page_offset >= PAGE_SIZE)
- alx->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
-
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = alx_alloc_skb(alx, gfp);
+ /*
+ * When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
+
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
-
return count;
}
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
- if (alx->rx_page) {
- put_page(alx->rx_page);
- alx->rx_page = NULL;
- }
-
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
alx->dev->name, alx);
if (!err)
goto out;
-
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
- unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
-
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
-
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
if (err) {
netdev_err(dev, "rx buffer allocation failed\n");
dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
return;
}
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ data[i] = *(unsigned long *)p;
}
}
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
-
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+
return features;
}
{
struct bnxt *bp = netdev_priv(dev);
u16 start = eeprom->offset, length = eeprom->len;
- int rc;
+ int rc = 0;
memset(data, 0, eeprom->len);
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
+ int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ if (!sq->sqs_mode) {
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+ tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+ tl4 += (svf * NIC_TL4_PER_LMAC);
+ tl4 += (bgx * NIC_TL4_PER_BGX);
+ }
tl4 += sq_idx;
- if (sq->sqs_mode)
- tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
unsigned int entry;
void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) {
+ dev->stats.tx_errors++;
+ goto out_no_free;
+ }
+
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
+out_no_free:
return NETDEV_TX_OK;
}
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
if (netdev->mem_end) {
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
} else {
/* Allocate buffer memory */
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
- goto error;
+ goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
- goto error;
+ goto free;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
- goto error;
+ goto free;
}
/* Allow the platform setup code to pass in a MAC address. */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
+ ge_rst_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/seq_file.h>
+#include <linux/workqueue.h>
#include "ibmvnic.h"
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
ibmvnic_send_crq(adapter, &crq);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
+
return 0;
bounce_map_failed:
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
goto reg_failed;
}
- scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
- dev_err(dev, "Error mapping irq\n");
- goto map_irq_failed;
- }
-
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
return scrq;
-map_irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (adapter->tx_scrq[i]) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
+ irq_dispose_mapping(adapter->tx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
if (adapter->rx_scrq[i]) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
+ irq_dispose_mapping(adapter->rx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
adapter->requested_caps = 0;
}
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
return IRQ_HANDLED;
}
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int i = 0, j = 0;
+ int rc = 0;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ scrq = adapter->tx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_tx_irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ scrq = adapter->rx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_rx_irq_failed;
+ }
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", scrq);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+ return rc;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ release_sub_crqs_no_irqs(adapter);
+ return rc;
+}
+
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int total_queues;
int more = 0;
- int i, j;
- int rc;
+ int i;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
- rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", adapter->tx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
- adapter->tx_scrq[i]->irq, rc);
- goto req_tx_irq_failed;
- }
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
- rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", adapter->rx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
- adapter->rx_scrq[i]->irq, rc);
- goto req_rx_irq_failed;
- }
}
memset(&crq, 0, sizeof(crq));
return;
-req_rx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
- i = adapter->req_tx_queues;
-req_tx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
rx_failed:
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry(error_buff, &adapter->errors, list)
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
+ release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
out:
if (atomic_read(&adapter->running_cap_queries) == 0)
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
}
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry(error_buff, &adapter->errors,
- list) {
+ list_for_each_entry_safe(error_buff, tmp2,
+ &adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);
dev_info(dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvnic_send_crq_init_complete(adapter);
- if (rc == 0)
- send_version_xchg(adapter);
+ if (!rc)
+ schedule_work(&adapter->vnic_crq_init);
else
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
.release = single_release,
};
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ vnic_crq_init);
+ struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ send_version_xchg(adapter);
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+ }
+ } while (adapter->renegotiate);
+ rc = init_sub_crq_irqs(adapter);
+
+ if (rc)
+ goto task_failed;
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(dev,
+ "failed to register netdev rc=%d\n", rc);
+ goto register_failed;
+ }
+ dev_info(dev, "ibmvnic registered\n");
+
+ return;
+
+register_failed:
+ release_sub_crqs(adapter);
+task_failed:
+ dev_err(dev, "Passive initialization was not successful\n");
+}
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
+ unsigned long timeout = msecs_to_jiffies(30000);
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
+ INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
spin_lock_init(&adapter->stats_lock);
rc = ibmvnic_init_crq_queue(adapter);
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+ return 0;
do {
- adapter->renegotiate = false;
-
- init_sub_crqs(adapter, 0);
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
-
if (adapter->renegotiate) {
- release_sub_crqs(adapter);
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout))
+ return 0;
}
} while (adapter->renegotiate);
- /* if init_sub_crqs is partially successful, retry */
- while (!adapter->tx_scrq || !adapter->rx_scrq) {
- init_sub_crqs(adapter, 1);
-
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+ goto free_debugfs;
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_debugfs;
+ goto free_sub_crqs;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
+free_sub_crqs:
+ release_sub_crqs(adapter);
free_debugfs:
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+
+ struct work_struct vnic_crq_init;
};
}
/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
return features;
}
if (!vsi || !macaddr)
return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter
+ * is added as part of add VSI for any newly created VSI except
+ * FDIR VSI
+ */
+ if (is_broadcast_ether_addr(macaddr))
+ return NULL;
+
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
aq_ret, pf->hw.aq.asq_last_status);
}
}
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- }
}
out:
/* if something went wrong then set the changed flag so we try again */
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{
struct i40e_q_vector *q_vector;
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int v_idx, num_q_vectors;
- int err;
+ int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
else
return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask);
+
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err)
goto err_out;
+ current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+ if (unlikely(current_cpu >= nr_cpu_ids))
+ current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
+ i40e_status aq_ret = 0;
u8 laa_macaddr[ETH_ALEN];
bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
+ /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+ if (vsi->type != I40E_VSI_FDIR) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (aq_ret) {
+ ret = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status;
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
- if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
- I40E_RX_PTYPE_INNER_PROT_UDP |
- I40E_RX_PTYPE_INNER_PROT_SCTP))
- tunnel = true;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status;
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
- if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
- I40E_RX_PTYPE_INNER_PROT_UDP |
- I40E_RX_PTYPE_INNER_PROT_SCTP))
- tunnel = true;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0;
+ return min(work_done, budget - 1);
}
/**
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
return 0;
err_free_irq:
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
return 0;
err_free_bus:
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- dma_addr_t phy_ring_head, phy_ring_tail;
+ dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
+ ð->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head +
+ phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
return nfrags;
}
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
mtk_stop_queue(eth);
- if (unlikely(atomic_read(&ring->free_count) >
- ring->thresh))
- mtk_wake_queue(eth);
- }
+
spin_unlock_irqrestore(ð->page_lock, flags);
return NETDEV_TX_OK;
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
}
mtk_tx_unmap(eth->dev, tx_buf);
- ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
if (!total)
return 0;
- if (atomic_read(&ring->free_count) > ring->thresh)
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
return total;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
- MAX_SKB_FRAGS);
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS,
+ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
- eth->netdev[id]->watchdog_timeo = HZ;
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
* @rx_ring: Pointer to the memore holding info about the RX ring
* @rx_napi: The NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
struct mtk_rx_ring rx_ring;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
- spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
tx_size == priv->tx_ring[0]->size)
return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
+ mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
!channel->tx_count || !channel->rx_count)
return -EINVAL;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
+ mlx4_en_safe_replace_resources(priv, tmp);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
+ if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
+ goto out;
+ }
}
- if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
- en_dbg(HW, priv, "failed adding vlan %d\n", vid);
- mutex_unlock(&mdev->state_lock);
+ err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+ if (err)
+ en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0;
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
}
mutex_unlock(&mdev->state_lock);
- return 0;
+ return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
return -ENOMEM;
}
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+ rtnl_lock();
+ netif_device_detach(dev);
+ mlx4_en_close(dev);
+ rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src,
+ struct mlx4_en_port_profile *prof)
+{
+ memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->tx_ring_num = prof->tx_ring_num;
+ dst->rx_ring_num = prof->rx_ring_num;
+ dst->flags = prof->flags;
+ dst->mdev = src->mdev;
+ dst->port = src->port;
+ dst->dev = src->dev;
+ dst->prof = prof;
+ dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+ dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_ring)
+ return -ENOMEM;
+
+ dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_cq) {
+ kfree(dst->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src)
+{
+ memcpy(dst->rx_ring, src->rx_ring,
+ sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+ memcpy(dst->rx_cq, src->rx_cq,
+ sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+ memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->tx_ring_num = src->tx_ring_num;
+ dst->rx_ring_num = src->rx_ring_num;
+ dst->tx_ring = src->tx_ring;
+ dst->tx_cq = src->tx_cq;
+ memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof)
+{
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+ if (mlx4_en_alloc_resources(tmp)) {
+ en_warn(priv,
+ "%s: Resource allocation failed, using previous configuration\n",
+ __func__);
+ kfree(tmp->tx_ring);
+ kfree(tmp->tx_cq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp)
+{
+ mlx4_en_free_resources(priv);
+ mlx4_en_update_priv(priv, tmp);
+}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ bool shutdown = mdev->dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- unregister_netdev(dev);
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+ unregister_netdev(dev);
}
if (priv->allocated)
mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv);
+#endif
+
mlx4_en_free_resources(priv);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- free_netdev(dev);
+ if (!shutdown)
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
- (skb->ip_summed == CHECKSUM_PARTIAL) &&
- (ip_hdr(skb)->version != 4))
- features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->vxlan_port ||
+ (ip_hdr(skb)->version != 4) ||
+ (udp_hdr(skb)->dest != priv->vxlan_port))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
return features;
}
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
return -EINVAL;
}
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+ ts_config.rx_filter,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type;
- priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+ mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
out:
mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
+ kfree(tmp);
+ if (!err)
+ netdev_features_change(dev);
return err;
}
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv);
-#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+ /* Notify mlx4 clients that the kernel is being shut down */
+ persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
+ }
mutex_unlock(&persist->interface_state_mutex);
}
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
+ u8 num_tx_rings_p_up;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
int inline_thold;
+ struct hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode, int node);
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
- case MLX5_CMD_OP_2ERR_QP:
- case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
+
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}
pr_debug("\n");
}
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
+ struct mlx5_cmd_work_ent,
+ cb_timeout_work);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+ cmd);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
}
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
-}
-
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
- err = ent->ret;
- } else {
- if (!wait_for_completion_timeout(&ent->done, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
+
+ err = ent->ret;
+
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
if (!callback)
init_completion(&ent->done);
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
goto out_free;
}
- if (!callback) {
- err = wait_func(dev, ent);
- if (err == -ETIMEDOUT)
- goto out;
-
- ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
- stats = &cmd->stats[op];
- spin_lock_irq(&stats->lock);
- stats->sum += ds;
- ++stats->n;
- spin_unlock_irq(&stats->lock);
- }
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
- "fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
- free_cmd(ent);
- }
+ if (callback)
+ goto out;
- return err;
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), ds);
+ *status = ent->status;
out_free:
free_cmd(ent);
return err;
}
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
-
down(&cmd->pages_sem);
- flush_workqueue(cmd->wq);
-
- cmd->mode = CMD_MODE_EVENTS;
+ cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
- int i;
-
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
-
- down(&cmd->pages_sem);
-
- flush_workqueue(cmd->wq);
- cmd->mode = CMD_MODE_POLLING;
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+ if (ent->callback)
+ cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
struct mlx5e_params {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_FLUSH_TIMEOUT,
};
struct mlx5e_cq {
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
u16 ix);
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
struct mlx5e_cq cq;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
+ mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
+ MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
};
enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
+ struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
struct mlx5_core_dev *mdev;
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ if (!ets->tc_tx_bw[i])
+ return -EINVAL;
+
bw_sum += ets->tc_tx_bw[i];
+ }
}
if (bw_sum != 0 && bw_sum != 100)
#define MLX5E_NUM_SQ_STATS(priv) \
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+ (hweight8(mlx5e_query_pfc_combined(priv)) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
/* SW counters */
for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */
for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].name);
+ vport_stats_desc[i].format);
/* PPORT counters */
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].name);
+ pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].name);
+ pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].name);
+ pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
- prio,
- pport_per_prio_traffic_stats_desc[i].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
}
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
- prio, pport_per_prio_pfc_stats_desc[i].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, prio);
}
}
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
- rq_stats_desc[j].name);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "tx%d_%s",
- priv->channeltc_to_txq_map[i][tc],
- sq_stats_desc[j].name);
+ sq_stats_desc[j].format,
+ priv->channeltc_to_txq_map[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
+ MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- else
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
+ }
}
static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ int err;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ if (err)
+ netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+}
+
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- s->lro_packets += rq_stats->lro_packets;
- s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_sw += rq_stats->csum_sw;
- s->rx_csum_inner += rq_stats->csum_inner;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
- s->tso_packets += sq_stats->tso_packets;
- s->tso_bytes += sq_stats->tso_bytes;
- s->tso_inner_packets += sq_stats->tso_inner_packets;
- s->tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
- s->tx_csum_inner += sq_stats->csum_offload_inner;
- tx_offload_none += sq_stats->csum_offload_none;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ tx_offload_none += sq_stats->csum_none;
}
}
/* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none -
- s->rx_csum_sw;
+ s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+ s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
- s->link_down_events = MLX5_GET(ppcnt_reg,
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
}
{
struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq->wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
+ int tout = 0;
+ int err;
+
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq))
- msleep(20);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+ tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+ if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
+ mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
+ int tout = 0;
+ int err;
+
if (sq->txq) {
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
/* prevent netif_tx_wake_queue */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_ERR);
+ if (err)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
- while (sq->cc != sq->pc) /* wait till sq is empty */
- msleep(20);
+ /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+ while (sq->cc != sq->pc &&
+ !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+ if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
+ mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
goto err_close_channels;
}
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_start_all_queues(priv->netdev);
+
kfree(cparam);
return 0;
{
int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
netdev_set_num_tc(netdev, ntc);
+ /* Map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+ netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
return features;
}
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool sched_work = false;
+ int i;
+
+ netdev_err(dev, "TX timeout detected\n");
+
+ for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+ if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ continue;
+ sched_work = true;
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ }
+
+ if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ schedule_work(&priv->tx_timeout_work);
+}
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
return -ENOMEM;
}
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct sk_buff *skb = rq->skb[ix];
+
+ if (skb) {
+ rq->skb[ix] = NULL;
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+}
+
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
return 0;
}
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+ wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
#define RQ_CANNOT_POST(rq) \
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- rq->stats.csum_sw++;
+ rq->stats.csum_complete++;
return;
}
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
- rq->stats.csum_inner++;
+ rq->stats.csum_unnecessary_inner++;
}
return;
}
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ return 0;
+
if (cq->decmprs_left)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
struct counter_desc {
- char name[ETH_GSTRING_LEN];
+ char format[ETH_GSTRING_LEN];
int offset; /* Byte offset */
};
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 lro_packets;
- u64 lro_bytes;
- u64 rx_csum_good;
+ u64 tx_tso_packets;
+ u64 tx_tso_bytes;
+ u64 tx_tso_inner_packets;
+ u64 tx_tso_inner_bytes;
+ u64 rx_lro_packets;
+ u64 rx_lro_bytes;
+ u64 rx_csum_unnecessary;
u64 rx_csum_none;
- u64 rx_csum_sw;
- u64 rx_csum_inner;
- u64 tx_csum_offload;
- u64 tx_csum_inner;
+ u64 rx_csum_complete;
+ u64 rx_csum_unnecessary_inner;
+ u64 tx_csum_partial;
+ u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_cqe_compress_pkts;
/* Special handling counters */
- u64 link_down_events;
+ u64 link_down_events_phy;
};
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
struct mlx5e_qcounter_stats {
};
static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_error_packets",
- VPORT_COUNTER_OFF(received_errors.packets) },
- { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
- { "tx_vport_error_packets",
- VPORT_COUNTER_OFF(transmit_errors.packets) },
- { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
{ "rx_vport_unicast_packets",
VPORT_COUNTER_OFF(received_eth_unicast.packets) },
{ "rx_vport_unicast_bytes",
};
static const struct counter_desc pport_802_3_stats_desc[] = {
- { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
- { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
- { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
- { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "unsupported_op_rx",
- PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "pause_ctrl_tx",
- PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
};
static const struct counter_desc pport_2863_stats_desc[] = {
- { "in_octets", PPORT_2863_OFF(if_in_octets) },
- { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
- { "in_discards", PPORT_2863_OFF(if_in_discards) },
- { "in_errors", PPORT_2863_OFF(if_in_errors) },
- { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
- { "out_octets", PPORT_2863_OFF(if_out_octets) },
- { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
- { "out_discards", PPORT_2863_OFF(if_out_discards) },
- { "out_errors", PPORT_2863_OFF(if_out_errors) },
- { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
- { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
- { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
- { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
};
static const struct counter_desc pport_2819_stats_desc[] = {
- { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
- { "octets", PPORT_2819_OFF(ether_stats_octets) },
- { "pkts", PPORT_2819_OFF(ether_stats_pkts) },
- { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
- { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
- { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
- { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
- { "fragments", PPORT_2819_OFF(ether_stats_fragments) },
- { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
- { "collisions", PPORT_2819_OFF(ether_stats_collisions) },
- { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "p1024to1518octets",
- PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "p1519to2047octets",
- PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "p2048to4095octets",
- PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "p4096to8191octets",
- PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "p8192to10239octets",
- PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
};
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+ { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
- u64 csum_sw;
- u64 csum_inner;
+ u64 csum_complete;
+ u64 csum_unnecessary_inner;
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
};
static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
};
struct mlx5e_sq_stats {
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
- u64 csum_offload_inner;
+ u64 csum_partial_inner;
u64 nop;
/* less likely accessed in data path */
- u64 csum_offload_none;
+ u64 csum_none;
u64 stopped;
u64 wake;
u64 dropped;
};
static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ /* channel_ix can be larger than num_channels since
+ * dev->num_real_tx_queues = num_channels * num_tc
+ */
+ if (channel_ix >= priv->params.num_channels)
+ channel_ix = reciprocal_scale(channel_ix,
+ priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
if (bf) {
u16 ihs = skb_headlen(skb);
return skb_headlen(skb);
}
- return MLX5E_MIN_INLINE;
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_offload_inner++;
+ sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
- sq->stats.csum_offload_none++;
+ sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
return mlx5e_sq_xmit(sq, skb);
}
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
sq = container_of(cq, struct mlx5e_sq, cq);
+ if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ return false;
+
npkts = 0;
nbytes = 0;
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
- if (IS_ERR_OR_NULL(fdb)) {
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
eth_zero_addr(dmac);
dmac[0] = 0x01;
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
goto out;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
goto out;
}
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
return err;
}
esw_fdb_set_vport_rule(esw,
mac,
vport_idx);
+ iter_vaddr->mc_promisc = true;
break;
case MLX5_ACTION_DEL:
if (!iter_vaddr)
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(vlan_grp)) {
+ if (IS_ERR(vlan_grp)) {
err = PTR_ERR(vlan_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(drop_grp)) {
+ if (IS_ERR(drop_grp)) {
err = PTR_ERR(drop_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
vport->vport, err);
vport->egress.drop_grp = drop_grp;
vport->egress.allowed_vlans_grp = vlan_grp;
out:
- kfree(flow_group_in);
+ kvfree(flow_group_in);
if (err && !IS_ERR_OR_NULL(vlan_grp))
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR_OR_NULL(acl)) {
+ if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
vport->vport, err);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
vport->vport, err);
mlx5_destroy_flow_table(vport->ingress.acl);
}
- kfree(flow_group_in);
+ kvfree(flow_group_in);
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+ if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+ if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
- if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+ if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handle_locked(vport);
-
vport->enabled = true;
/* only PF is trusted by default */
vport->trusted = (vport_num) ? false : true;
-
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
- int err = 0;
struct mlx5_vport *evport;
+ u64 node_guid;
+ int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
mutex_lock(&esw->state_lock);
if (evport->enabled)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
-
return err;
}
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
{
int err = 0;
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
err = mlx5_init_fc_stats(dev);
if (err)
return err;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev);
if (err)
goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(dev);
- if (err)
- goto err;
- }
- if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(dev);
- if (err)
- goto err;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
}
return 0;
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
+ mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return;
+ goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ trigger_cmd_completions(dev);
+ }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
+
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}
mlx5_pci_err_detected(dev->pdev, 0);
}
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
*/
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_health *health = &dev->priv.health;
const int niter = 100;
+ u32 last_count = 0;
u32 count;
- u16 did;
int i;
- /* Wait for firmware to be ready after reset */
- msleep(1000);
- for (i = 0; i < niter; i++) {
- if (pci_read_config_word(pdev, 2, &did)) {
- dev_warn(&pdev->dev, "failed reading config word\n");
- break;
- }
- if (did == pdev->device) {
- dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
- break;
- }
- msleep(50);
- }
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
- break;
+ if (last_count && last_count != count) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ return 0;
+ }
+ last_count = count;
}
msleep(50);
}
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+ return -ETIMEDOUT;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
- wait_vital(pdev);
+ err = wait_vital(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ return;
+ }
err = mlx5_load_one(dev, priv);
if (err)
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};
func_id, npages, err);
goto out_4k;
}
- dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
return err;
}
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_manage_pages_inbox *in, int in_size,
+ struct mlx5_manage_pages_outbox *out, int out_size)
+{
+ struct fw_page *fwp;
+ struct rb_node *p;
+ u32 npages;
+ u32 i = 0;
+
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+ (u32 *)out, out_size);
+
+ npages = be32_to_cpu(in->num_entries);
+
+ p = rb_first(&dev->priv.page_root);
+ while (p && i < npages) {
+ fwp = rb_entry(p, struct fw_page, rb_node);
+ out->pas[i] = cpu_to_be64(fwp->addr);
+ p = rb_next(p);
+ i++;
+ }
+
+ out->num_entries = cpu_to_be32(i);
+ return 0;
+}
+
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed reclaiming pages\n");
- goto out_free;
- }
- dev->priv.fw_pages -= npages;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
err = -EINVAL;
goto out_free;
}
- if (nclaimed)
- *nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
+
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- free_4k(dev, fwp->addr);
- nclaimed = 1;
- } else {
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
- }
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
}
} while (p);
+ WARN(dev->priv.fw_pages,
+ "FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.fw_pages);
+ WARN(dev->priv.vfs_pages,
+ "VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.vfs_pages);
+
return 0;
}
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
struct mlx5e_vxlan *vxlan;
int err;
+ if (mlx5e_vxlan_lookup_port(priv, port))
+ goto free_work;
+
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
* Configures the switch priority to buffer table.
*/
#define MLXSW_REG_PPTB_ID 0x500B
-#define MLXSW_REG_PPTB_LEN 0x0C
+#define MLXSW_REG_PPTB_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_pptb = {
.id = MLXSW_REG_PPTB_ID,
*/
MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
mlxsw_reg_pptb_local_port_set(payload, local_port);
mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
}
/* PBMC - Port Buffer Management Control Register
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_is_up)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
- return 0;
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *p_module,
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
return 0;
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width)
-{
- u8 lane;
-
- return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
- p_width, &lane);
-}
-
static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 module, u8 width, u8 lane)
{
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- u8 module, width, lane;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
int err;
- err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- &module, &width, &lane);
- if (err) {
- netdev_err(dev, "Failed to retrieve module information\n");
- return err;
- }
-
if (!mlxsw_sp_port->split)
err = snprintf(name, len, "p%d", module + 1);
else
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+ SUPPORTED_Autoneg;
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
- bool is_up;
int err;
speed = ethtool_cmd_speed(cmd);
return err;
}
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
+ if (!netif_running(dev))
return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
return 0;
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
return local_port - offset;
}
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
+{
+ u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
u8 module, cur_width, base_port;
int i;
int err;
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
} dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
pptb_pl);
}
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
pptb_pl);
}
return err;
memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
return 0;
}
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
- if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+ if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+ pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
}
memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
return 0;
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
- if ((intflags & EIR_TXIF) != 0) {
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
- locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ BUG_ON(!priv->tx_skb);
+
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);
netif_tx_wake_all_queues(nn->netdev);
- enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
}
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
goto err_free_exn;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
{
unsigned int r;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
netif_carrier_off(nn->netdev);
nn->link_up = false;
#define MEDIA_DA_TWINAX 0x3
#define MEDIA_BASE_T 0x4
#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
#define MEDIA_KR 0xf0
#define MEDIA_NOT_PRESENT 0xff
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
start.vport_id, start.mtu);
}
- qed_reset_vport_stats(cdev);
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
return 0;
}
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- if (IS_PF(hwfn->cdev)) {
- memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
- } else {
- qed_vf_get_link_params(hwfn, ¶ms);
- qed_vf_get_link_state(hwfn, &link);
- qed_vf_get_link_caps(hwfn, &link_caps);
+ if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
}
/* Set the link parameters to pass to protocol driver */
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- rmb();
-
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */
- barrier();
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- mmiowb();
+ wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
*p_en2 = *p_ent;
- kfree(p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
p_ent = p_en2;
}
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ kfree(p_ent);
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
+#ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;
if (!opcode)
return;
- ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
const struct efx_farch_register_test *regs,
size_t n_regs)
{
- unsigned address = 0, i, j;
+ unsigned address = 0;
+ int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
- int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
- memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, dev) {
- dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
}
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
priv->rx_tail_addr,
STMMAC_CHAN0);
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec ts;
+ struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
#endif
static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
+ struct geneve_dev *geneve = netdev_priv(dev);
/* The max_mtu calculation does not take account of GENEVE
* options, to avoid excluding potentially valid
* configurations.
*/
- int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
- - dev->hard_header_len;
+ int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
+ if (geneve->remote.sa.sa_family == AF_INET6)
+ max_mtu -= sizeof(struct ipv6hdr);
+ else
+ max_mtu -= sizeof(struct iphdr);
if (new_mtu < 68)
return -EINVAL;
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
err = geneve_configure(net, dev, &geneve_remote_unspec,
0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
- if (err)
- goto err;
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
if (err)
goto err;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
return dev;
err:
- free_netdev(dev);
+ geneve_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
dev_put(dev);
}
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+ unsigned char **iv,
+ struct scatterlist **sg)
+{
+ size_t size, iv_offset, sg_offset;
+ struct aead_request *req;
+ void *tmp;
+
+ size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+ iv_offset = size;
+ size += GCM_AES_IV_LEN;
+
+ size = ALIGN(size, __alignof__(struct scatterlist));
+ sg_offset = size;
+ size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+ if (!tmp)
+ return NULL;
+
+ *iv = (unsigned char *)(tmp + iv_offset);
+ *sg = (struct scatterlist *)(tmp + sg_offset);
+ req = tmp;
+
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
macsec_fill_sectag(hh, secy, pn);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
- macsec_fill_iv(iv, secy->sci, pn);
-
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
return ERR_PTR(-EINVAL);
}
- req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
+ macsec_fill_iv(iv, secy->sci, pn);
+
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
out:
macsec_rxsa_put(rx_sa);
dev_put(dev);
- return;
}
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
struct macsec_secy *secy)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
u16 icv_len = secy->icv_len;
if (!skb)
return ERR_PTR(-ENOMEM);
- req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
if (!req) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (!tfm || IS_ERR(tfm))
return NULL;
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.OutPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
+ skb->dev = macsec->real_dev;
len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
genl_unregister_family(&macsec_fam);
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
+ rcu_barrier();
}
module_init(macsec_init);
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret;
- u16 val, delay;
+ int ret, val;
+ u16 delay;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
}
if (phy_interface_is_rgmii(phydev)) {
- ret = phy_write(phydev, MII_DP83867_PHYCTRL,
- (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
}
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
+#include <linux/idr.h>
#define MII_REGS_NUM 29
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+static DEFINE_IDA(phy_fixed_ida);
+
static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
if (gpio_is_valid(fp->link_gpio))
gpio_free(fp->link_gpio);
kfree(fp);
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
}
}
}
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- spin_lock(&phy_fixed_addr_lock);
- if (phy_fixed_addr == PHY_MAX_ADDR) {
- spin_unlock(&phy_fixed_addr_lock);
- return ERR_PTR(-ENOSPC);
- }
- phy_addr = phy_fixed_addr++;
- spin_unlock(&phy_fixed_addr_lock);
+ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ if (phy_addr < 0)
+ return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
- if (ret < 0)
+ if (ret < 0) {
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
+ }
phy = get_phy_device(fmb->mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
list_del(&fp->node);
kfree(fp);
}
+ ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);
return 0;
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ err = marvell_set_polarity(phydev, phydev->mdix);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+ MII_M1111_PHY_LED_DIRECT);
+ if (err < 0)
+ return err;
+
+ err = genphy_config_aneg(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ int bmcr;
+
+ /* A write to speed/duplex bits (that is performed by
+ * genphy_config_aneg() call above) must be followed by
+ * a software reset. Otherwise, the write has no effect.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
- err = genphy_config_aneg(phydev);
-
- return err;
+ return genphy_config_aneg(phydev);
}
static int m88e1318_config_aneg(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+ int err, oldpage;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
+ if (err < 0)
+ return err;
+
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+ /* Set marvell,reg-init configuration from device tree */
+ return marvell_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
return err;
}
- return marvell_config_init(phydev);
+ return m88e1121_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1111_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
* in all capable mode before using it.
*/
if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
- int timeout = 50000;
-
- /* set "all capable" mode and reset the phy */
+ /* set "all capable" mode */
rc |= MII_LAN83C185_MODE_ALL;
phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
- phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- /* wait end of reset (max 500 ms) */
- do {
- udelay(10);
- if (timeout-- == 0)
- return -1;
- rc = phy_read(phydev, MII_BMCR);
- } while (rc & BMCR_RESET);
}
- return 0;
+
+ /* reset the phy */
+ return genphy_soft_reset(phydev);
}
static int lan911x_config_init(struct phy_device *phydev)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
goto err_dev_open;
}
+ netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
if (cdc_ncm_init(dev))
goto error2;
+ /* Some firmwares need a pause here or they will silently fail
+ * to set up the interface properly. This value was decided
+ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
+ * firmware.
+ */
+ usleep_range(10000, 20000);
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
/* Information for net-next */
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "3"
+#define NET_VERSION "5"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
+#define USB_BMU_RESET 0xd4b0
#define USB_UPS_CTRL 0xd800
#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN 0x01
+#define BMU_RESET_EP_OUT 0x02
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */
+#define AD_MASK 0xfee0
+#define EFUSE 0xcfdb
+#define PASS_THRU_MASK 0x1
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
int intr_interval;
return ret;
}
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ int ret = -EINVAL;
+ u32 ocp_data;
+ unsigned char buf[6];
+
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) != 0x1000)
+ return -ENODEV;
+
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1)
+ return -ENODEV;
+
+ /* returns _AUXMAC_#AABBCCDDEEFF# */
+ status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ obj = (union acpi_object *)buffer.pointer;
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+ if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid buffer when reading pass-thru MAC addr: "
+ "(%d, %d)\n",
+ obj->type, obj->string.length);
+ goto amacout;
+ }
+ if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+ strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid header when reading pass-thru MAC addr\n");
+ goto amacout;
+ }
+ ret = hex2bin(buf, obj->string.pointer + 9, 6);
+ if (!(ret == 0 && is_valid_ether_addr(buf))) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid MAC when reading pass-thru MAC addr: "
+ "%d, %pM\n", ret, buf);
+ ret = -EINVAL;
+ goto amacout;
+ }
+ memcpy(sa->sa_data, buf, 6);
+ ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+ netif_info(tp, probe, tp->netdev,
+ "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+ kfree(obj);
+ return ret;
+}
+
static int set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
if (tp->version == RTL_VER_01)
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
- else
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ else {
+ /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+ * or system doesn't provide valid _SB.AMAC this will be
+ * be expected to non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, &sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ }
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
static void r8153_set_rx_early_size(struct r8152 *tp)
{
u32 mtu = tp->netdev->mtu;
- u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+ u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
u32 ocp_data;
u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- if (!(ocp_data & LAN_WAKE_EN))
- return 0;
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
if (ocp_data & LINK_ON_WAKE_EN)
wolopts |= WAKE_PHY;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
if (wolopts & WAKE_UCAST)
ocp_data |= UWF_EN;
if (wolopts & WAKE_BCAST)
ocp_data |= BWF_EN;
if (wolopts & WAKE_MCAST)
ocp_data |= MWF_EN;
- if (wolopts & WAKE_ANY)
- ocp_data |= LAN_WAKE_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
if (enable) {
u32 ocp_data;
- r8153_u1u2en(tp, false);
- r8153_u2p3en(tp, false);
-
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
+ u32 ocp_data;
+
__rtl_set_wol(tp, tp->saved_wolopts);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+ rtl_runtime_suspend_enable(tp, enable);
+
+ if (enable) {
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ } else {
r8153_u2p3en(tp, true);
r8153_u1u2en(tp, true);
}
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
}
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+ ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
r8153_hw_phy_cfg(tp);
rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
{
r8153_aldps_en(tp, false);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
usb_enable_lpm(tp->udev);
}
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
- U1U2_SPDWN_EN | L1_SPDWN_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
- TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
- EEE_SPDWN_EN);
+ /* MAC clock speed down */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);
napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
- rtl_runtime_suspend_enable(tp, true);
+ tp->rtl_ops.autosuspend_en(tp, true);
} else {
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
if (tp->netdev->flags & IFF_UP)
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
case RTL_VER_03:
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->autosuspend_en = rtl8153_runtime_enable;
break;
default:
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size)
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ usbnet_pause_rx(dev);
usbnet_unlink_rx_urbs(dev);
+ usbnet_resume_rx(dev);
+ }
}
/* max qlen depend on hard_mtu and rx_urb_size */
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
- !timer_pending (&dev->delay) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
- BUG_ON(segCnt <= 1);
+ WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
dst_hold(&rt6->dst);
rt6->rt6i_table = rt6i_table;
- rt6->dst.output = vrf_output6;
+ rt6->dst.output = vrf_output6;
rcu_assign_pointer(vrf->rt6, rt6);
rc = 0;
if (!rth)
return -ENOMEM;
- rth->dst.output = vrf_output;
+ rth->dst.output = vrf_output;
rth->rt_table_id = vrf->tb_id;
rcu_assign_pointer(vrf->rth, rth);
return 0;
}
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
- u8 name_assign_type, struct vxlan_config *conf)
-{
- struct nlattr *tb[IFLA_MAX+1];
- struct net_device *dev;
- int err;
-
- memset(&tb, 0, sizeof(tb));
-
- dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
- if (IS_ERR(dev))
- return dev;
-
- err = vxlan_dev_configure(net, dev, conf);
- if (err < 0) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
.get_link_net = vxlan_get_link_net,
};
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0) {
+ LIST_HEAD(list_kill);
+
+ vxlan_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
- ar->running_fw->fw_file.fw_features,
+ fw_file->fw_features,
sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
return;
}
}
- ath10k_htt_rx_msdu_buff_replenish(htt);
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
peer = ath10k_peer_find(ar, vdev_id, addr);
if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
addr, vdev_id);
ath10k_wmi_peer_delete(ar, vdev_id, addr);
- spin_unlock_bh(&ar->data_lock);
return -ENOENT;
}
#define AR9300_NUM_GPIO 16
#define AR9330_NUM_GPIO 16
#define AR9340_NUM_GPIO 23
-#define AR9462_NUM_GPIO 10
+#define AR9462_NUM_GPIO 14
#define AR9485_NUM_GPIO 12
#define AR9531_NUM_GPIO 18
#define AR9550_NUM_GPIO 24
#define AR9561_NUM_GPIO 23
-#define AR9565_NUM_GPIO 12
+#define AR9565_NUM_GPIO 14
#define AR9580_NUM_GPIO 16
#define AR7010_NUM_GPIO 16
#define AR9300_GPIO_MASK 0x0000F4FF
#define AR9330_GPIO_MASK 0x0000F4FF
#define AR9340_GPIO_MASK 0x0000000F
-#define AR9462_GPIO_MASK 0x000003FF
+#define AR9462_GPIO_MASK 0x00003FFF
#define AR9485_GPIO_MASK 0x00000FFF
#define AR9531_GPIO_MASK 0x0000000F
#define AR9550_GPIO_MASK 0x0000000F
#define AR9561_GPIO_MASK 0x0000000F
-#define AR9565_GPIO_MASK 0x00000FFF
+#define AR9565_GPIO_MASK 0x00003FFF
#define AR9580_GPIO_MASK 0x0000F4FF
#define AR7010_GPIO_MASK 0x0000FFFF
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
+ int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
+ } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
+ if (err) {
+ brcmf_err("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
}
}
done:
brcmu_pkt_buf_free_skb(skb);
return;
}
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
brcmf_netif_rx(ifp, skb);
}
if (idx != 0)
return -ENOENT;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
struct sk_buff *tail;
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
/* not a data packet */
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return -EIO;
}
-#define SCAN_TIMEOUT (16 * HZ)
+#define SCAN_TIMEOUT (20 * HZ)
void iwl_mvm_scan_timeout(unsigned long data)
{
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
u8 sta_id = mvmvif->ap_sta_id;
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
* be the AP ID, and no station was passed by mac80211.
*/
- return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
}
return NULL;
struct ieee80211_key_seq seq;
const u8 *pn;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
!info->attrs[HWSIM_ATTR_FLAGS] ||
!info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_SIGNAL] ||
!info->attrs[HWSIM_ATTR_TX_INFO])
goto out;
for (i = 0; i < retry; i++) {
path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
- if (path_a_ok == 0x03) {
+ if (path_b_ok == 0x03) {
val32 = rtl8xxxu_read32(priv,
REG_RX_POWER_BEFORE_IQK_B_2);
result[t][6] = (val32 >> 16) & 0x3ff;
void rtl_addr_delay(u32 addr)
{
if (addr == 0xfe)
- msleep(50);
+ mdelay(50);
else if (addr == 0xfd)
msleep(5);
else if (addr == 0xfc)
rtl_addr_delay(addr);
} else {
rtl_set_rfreg(hw, rfpath, addr, mask, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_rfreg_delay);
rtl_addr_delay(addr);
} else {
rtl_set_bbreg(hw, addr, MASKDWORD, data);
- usleep_range(1, 2);
+ udelay(1);
}
}
EXPORT_SYMBOL(rtl_bb_delay);
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
+ unsigned long align;
+ enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
return -ENXIO;
}
+ align = le32_to_cpu(pfn_sb->align);
+ offset = le64_to_cpu(pfn_sb->dataoff);
+ if (align == 0)
+ align = 1UL << ilog2(offset);
+ mode = le32_to_cpu(pfn_sb->mode);
+
if (!nd_pfn->uuid) {
- /* from probe we allocate */
+ /*
+ * When probing a namepace via nd_pfn_probe() the uuid
+ * is NULL (see: nd_pfn_devinit()) we init settings from
+ * pfn_sb
+ */
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
if (!nd_pfn->uuid)
return -ENOMEM;
+ nd_pfn->align = align;
+ nd_pfn->mode = mode;
} else {
- /* from init we validate */
+ /*
+ * When probing a pfn / dax instance we validate the
+ * live settings against the pfn_sb
+ */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -ENODEV;
+
+ /*
+ * If the uuid validates, but other settings mismatch
+ * return EINVAL because userspace has managed to change
+ * the configuration without specifying new
+ * identification.
+ */
+ if (nd_pfn->align != align || nd_pfn->mode != mode) {
+ dev_err(&nd_pfn->dev,
+ "init failed, settings mismatch\n");
+ dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
+ nd_pfn->align, align, nd_pfn->mode,
+ mode);
+ return -EINVAL;
+ }
}
- if (nd_pfn->align == 0)
- nd_pfn->align = le32_to_cpu(pfn_sb->align);
- if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+ if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
- nd_pfn->align, nvdimm_namespace_capacity(ndns));
+ align, nvdimm_namespace_capacity(ndns));
return -EINVAL;
}
* namespace has changed since the pfn superblock was
* established.
*/
- offset = le64_to_cpu(pfn_sb->dataoff);
nsio = to_nd_namespace_io(&ndns->dev);
if (offset >= resource_size(&nsio->res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
return -EBUSY;
}
- if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
+ if ((align && !IS_ALIGNED(offset, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
- dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
- offset);
+ dev_err(&nd_pfn->dev,
+ "bad offset: %#llx dax disabled align: %#lx\n",
+ offset, align);
return -ENXIO;
}
res->start += start_pad;
res->end -= end_trunc;
- nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < SZ_8K)
return ERR_PTR(-EINVAL);
return nsa->ns_id - nsb->ns_id;
}
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns *ns;
-
- lockdep_assert_held(&ctrl->namespaces_mutex);
+ struct nvme_ns *ns, *ret = NULL;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid)
- return ns;
+ if (ns->ns_id == nsid) {
+ kref_get(&ns->kref);
+ ret = ns;
+ break;
+ }
if (ns->ns_id > nsid)
break;
}
- return NULL;
+ mutex_unlock(&ctrl->namespaces_mutex);
+ return ret;
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
int node = dev_to_node(ctrl->dev);
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
- list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_add_tail(&ns->list, &ctrl->namespaces);
+ mutex_unlock(&ctrl->namespaces_mutex);
+
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
static void nvme_ns_remove(struct nvme_ns *ns)
{
- lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+
+ mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
- synchronize_rcu();
+ mutex_unlock(&ns->ctrl->namespaces_mutex);
+
nvme_put_ns(ns);
}
{
struct nvme_ns *ns;
- ns = nvme_find_ns(ctrl, nsid);
+ ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
} else
nvme_alloc_ns(ctrl, nsid);
}
nvme_validate_ns(ctrl, nsid);
while (++prev < nsid) {
- ns = nvme_find_ns(ctrl, prev);
- if (ns)
+ ns = nvme_find_get_ns(ctrl, prev);
+ if (ns) {
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
}
}
nn -= j;
struct nvme_ns *ns, *next;
unsigned i;
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
if (nvme_identify_ctrl(ctrl, &id))
return;
- mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
}
nvme_scan_ns_sequential(ctrl, nn);
done:
+ mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
}
EXPORT_SYMBOL_GPL(nvme_queue_scan);
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
- mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
- mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
- if (!kref_get_unless_zero(&ns->kref))
- continue;
-
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
-
- nvme_put_ns(ns);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int bars;
+
if (dev->bar)
iounmap(dev->bar);
- pci_release_regions(to_pci_dev(dev->dev));
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ pci_release_selected_regions(pdev, bars);
}
static void nvme_pci_disable(struct nvme_dev *dev)
return 0;
release:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, bars);
return -ENODEV;
}
struct device_node **nodepp)
{
struct device_node *root;
- int offset = 0, depth = 0;
+ int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
unsigned int fpsizes[FDT_MAX_DEPTH];
struct device_node *nps[FDT_MAX_DEPTH];
if (nodepp)
*nodepp = NULL;
+ /*
+ * We're unflattening device sub-tree if @dad is valid. There are
+ * possibly multiple nodes in the first level of depth. We need
+ * set @depth to 1 to make fdt_next_node() happy as it bails
+ * immediately when negative @depth is found. Otherwise, the device
+ * nodes except the first one won't be unflattened successfully.
+ */
+ if (dad)
+ depth = initial_depth = 1;
+
root = dad;
fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
nps[depth] = dad;
+
for (offset = 0;
- offset >= 0 && depth >= 0;
+ offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
continue;
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
- * @index: zero-based index of the irq
- *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created.
+ * @index: zero-based index of the IRQ
*
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
*/
int of_irq_get(struct device_node *dev, int index)
{
EXPORT_SYMBOL_GPL(of_irq_get);
/**
- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
- * @name: irq name
+ * @name: IRQ name
*
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created, or error code in case of any other failure.
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
*/
int of_irq_get_byname(struct device_node *dev, const char *name)
{
}
/* Need adjust the alignment to satisfy the CMA requirement */
- if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
- align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+ if (IS_ENABLED(CONFIG_CMA)
+ && of_flat_dt_is_compatible(node, "shared-dma-pool")
+ && of_get_flat_dt_prop(node, "reusable", NULL)
+ && !of_get_flat_dt_prop(node, "no-map", NULL)) {
+ unsigned long order =
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+ align = max(align, (phys_addr_t)PAGE_SIZE << order);
+ }
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
else
pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
*(u16 *)buf);
- buf += 2;
+ buf += 4;
}
- len += 2;
+ len += 4;
/*
* If we have any Low Priority VCs and a VC Arbitration Table Offset
if (!ret)
ret = init_fn(pmu);
} else {
- ret = probe_current_pmu(pmu, probe_table);
cpumask_setall(&pmu->supported_cpus);
+ ret = probe_current_pmu(pmu, probe_table);
}
if (ret) {
}
usb2->phy = devm_phy_create(dev, NULL, &ops);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ if (IS_ERR(usb2->phy))
+ return PTR_ERR(usb2->phy);
phy_set_drvdata(usb2->phy, usb2);
platform_set_drvdata(pdev, usb2);
struct exynos_mipi_video_phy *state)
{
u32 val;
+ int ret;
+
+ ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+ if (ret)
+ return 0;
- regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
return val & data->resetn_val;
}
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
- miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst");
+ miphy_phy->miphy_rst =
+ of_reset_control_get_shared(node, "miphy-sw-rst");
if (IS_ERR(miphy_phy->miphy_rst)) {
dev_err(miphy_dev->dev,
extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
}
-static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
-{
- return !!(readl(ch->base + USB2_ADPCTRL) &
- USB2_ADPCTRL_OTGSESSVLD);
-}
-
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
{
- bool is_host = true;
-
- /* B-device? */
- if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
- is_host = false;
-
- if (is_host)
+ if (!rcar_gen3_check_id(ch))
rcar_gen3_init_for_host(ch);
else
rcar_gen3_init_for_peri(ch);
return -ENODEV;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
- if (IS_ERR(dp))
+ if (!dp)
return -ENOMEM;
dp->dev = dev;
phy_dev->dev = dev;
dev_set_drvdata(dev, phy_dev);
- phy_dev->rstc = devm_reset_control_get(dev, "global");
+ phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
if (IS_ERR(phy_dev->rstc)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstc);
}
- phy_dev->rstport = devm_reset_control_get(dev, "port");
+ phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
if (IS_ERR(phy_dev->rstport)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstport);
{
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
- void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+ void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
int i;
mutex_lock(&phy_data->mutex);
if (data->vbus_power_nb_registered)
power_supply_unreg_notifier(&data->vbus_power_nb);
- if (data->id_det_irq >= 0)
+ if (data->id_det_irq > 0)
devm_free_irq(dev, data->id_det_irq, data);
- if (data->vbus_det_irq >= 0)
+ if (data->vbus_det_irq > 0)
devm_free_irq(dev, data->vbus_det_irq, data);
cancel_delayed_work_sync(&data->detect);
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
- if ((data->id_det_gpio && data->id_det_irq < 0) ||
- (data->vbus_det_gpio && data->vbus_det_irq < 0))
+ if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+ (data->vbus_det_gpio && data->vbus_det_irq <= 0))
data->phy0_poll = true;
- if (data->id_det_irq >= 0) {
+ if (data->id_det_irq > 0) {
ret = devm_request_irq(dev, data->id_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
}
}
- if (data->vbus_det_irq >= 0) {
+ if (data->vbus_det_irq > 0) {
ret = devm_request_irq(dev, data->vbus_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
ret = ti_pipe3_dpll_wait_lock(phy);
}
- /* Program the DPLL only if not locked */
+ /* SATA has issues if re-programmed when locked */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (!(val & PLL_LOCK))
- if (ti_pipe3_dpll_program(phy))
- return -EINVAL;
+ if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
+ "ti,phy-pipe3-sata"))
+ return ret;
+
+ /* Program the DPLL */
+ ret = ti_pipe3_dpll_program(phy);
+ if (ret) {
+ ti_pipe3_disable_clocks(phy);
+ return -EINVAL;
+ }
return ret;
}
twl4030_usb_set_mode(twl, twl->usb_mode);
if (twl->usb_mode == T2_USB_MODE_ULPI)
twl4030_i2c_access(twl, 0);
- schedule_delayed_work(&twl->id_workaround_work, 0);
+ twl->linkstat = MUSB_UNKNOWN;
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
return 0;
}
struct twl4030_usb *twl = _twl;
enum musb_vbus_id_status status;
bool status_changed = false;
+ int err;
status = twl4030_usb_linkstat(twl);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
}
- musb_mailbox(status);
+ err = musb_mailbox(status);
+ if (err)
+ twl->linkstat = MUSB_UNKNOWN;
}
/* don't schedule during sleep - irq works right then */
struct twl4030_usb *twl = phy_get_drvdata(phy);
pm_runtime_get_sync(twl->dev);
- schedule_delayed_work(&twl->id_workaround_work, 0);
+ twl->linkstat = MUSB_UNKNOWN;
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
if (cable_present(twl->linkstat))
pm_runtime_put_noidle(twl->dev);
pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put_sync_suspend(twl->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(twl->dev);
pm_runtime_disable(twl->dev);
/* autogate 60MHz ULPI clock,
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
-obj-$(CONFIG_PINCTRL_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
pin_reg = &info->pin_regs[pin_id];
if (pin_reg->mux_reg == -1) {
- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
info->pins[pin_id].name);
- return -EINVAL;
+ continue;
}
if (info->flags & SHARE_MUX_CONF_REG) {
static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
-static const unsigned int byt_score_plt_clk4_pins[] = { 99 };
-static const unsigned int byt_score_plt_clk5_pins[] = { 100 };
-static const unsigned int byt_score_plt_clk3_pins[] = { 101 };
+static const unsigned int byt_score_plt_clk3_pins[] = { 99 };
+static const unsigned int byt_score_plt_clk4_pins[] = { 100 };
+static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
SIMPLE_FUNC("plt_clk", 1),
};
}
EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned num_maps)
+{
+ pinctrl_utils_free_map(pctldev, map, num_maps);
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_free_map);
+
#endif
else
mask &= ~soc_mask;
pcs->write(mask, pcswi->reg);
+
+ /* flush posted write */
+ mask = pcs->read(pcswi->reg);
raw_spin_unlock(&pcs->lock);
}
-obj-y += pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
goto exit;
}
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
config DELL_LAPTOP
tristate "Dell Laptop Extras"
- depends on X86
depends on DELL_SMBIOS
depends on DMI
depends on BACKLIGHT_CLASS_DEVICE
config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
- depends on INPUT && X86
+ depends on INPUT
select INPUT_POLLDEV
default n
help
config ACPI_CMPC
tristate "CMPC Laptop Extras"
- depends on X86 && ACPI
+ depends on ACPI
depends on RFKILL || RFKILL=n
select INPUT
select BACKLIGHT_CLASS_DEVICE
config INTEL_PMC_CORE
bool "Intel PMC Core driver"
- depends on X86 && PCI
+ depends on PCI
---help---
The Intel Platform Controller Hub for Intel Core SoCs provides access
to Power Management Controller registers via a PCI interface. This
config IBM_RTL
tristate "Device driver to enable PRTL support"
- depends on X86 && PCI
+ depends on PCI
---help---
Enable support for IBM Premium Real Time Mode (PRTM).
This module will allow you the enter and exit PRTM in the BIOS via
config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
- depends on X86
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 7, { KEY_CAMERA } },
+ { KE_KEY, 8, { KEY_MICMUTE } },
{ KE_KEY, 11, { KEY_F16 } },
{ KE_KEY, 13, { KEY_WLAN } },
{ KE_KEY, 16, { KEY_PROG1 } },
break;
case 13:
case 11:
+ case 8:
case 7:
case 6:
ideapad_input_report(priv, vpc_bit);
static u32 hotkey_orig_mask; /* events the BIOS had enabled */
static u32 hotkey_all_mask; /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */
static u32 hotkey_reserved_mask; /* events better left disabled */
static u32 hotkey_driver_mask; /* events needed by the driver */
static u32 hotkey_user_mask; /* events visible to userspace */
static DEVICE_ATTR_RO(hotkey_all_mask);
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
/* sysfs hotkey recommended_mask --------------------------------------- */
static ssize_t hotkey_recommended_mask_show(struct device *dev,
struct device_attribute *attr,
&dev_attr_wakeup_hotunplug_complete.attr,
&dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_adaptive_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
&dev_attr_hotkey_source_mask.attr,
if (!tp_features.hotkey)
return 1;
- /*
- * Check if we have an adaptive keyboard, like on the
- * Lenovo Carbon X1 2014 (2nd Gen).
- */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- tp_features.has_adaptive_kbd = true;
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &adaptive_kbd_attr_group);
- if (res)
- goto err_exit;
- }
- }
-
quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
ARRAY_SIZE(tpacpi_hotkey_qtable));
A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) != 1) {
- pr_err("unknown version of the HKEY interface: 0x%x\n",
- hkeyv);
- pr_err("please report this to %s\n", TPACPI_MAIL);
- } else {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "firmware HKEY interface version: 0x%x\n",
+ hkeyv);
+
+ switch (hkeyv >> 8) {
+ case 1:
/*
* MHKV 0x100 in A31, R40, R40e,
* T4x, X31, and later
*/
- vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "firmware HKEY interface version: 0x%x\n",
- hkeyv);
/* Paranoia check AND init hotkey_all_mask */
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- pr_err("missing MHKA handler, "
- "please report this to %s\n",
+ pr_err("missing MHKA handler, please report this to %s\n",
TPACPI_MAIL);
/* Fallback: pre-init for FN+F3,F4,F12 */
hotkey_all_mask = 0x080cU;
} else {
tp_features.hotkey_mask = 1;
}
+ break;
+
+ case 2:
+ /*
+ * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+ */
+
+ /* Paranoia check AND init hotkey_all_mask */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "dd", 1)) {
+ pr_err("missing MHKA handler, please report this to %s\n",
+ TPACPI_MAIL);
+ /* Fallback: pre-init for FN+F3,F4,F12 */
+ hotkey_all_mask = 0x080cU;
+ } else {
+ tp_features.hotkey_mask = 1;
+ }
+
+ /*
+ * Check if we have an adaptive keyboard, like on the
+ * Lenovo Carbon X1 2014 (2nd Gen).
+ */
+ if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+ "MHKA", "dd", 2)) {
+ if (hotkey_adaptive_all_mask != 0) {
+ tp_features.has_adaptive_kbd = true;
+ res = sysfs_create_group(
+ &tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+ if (res)
+ goto err_exit;
+ }
+ } else {
+ tp_features.has_adaptive_kbd = false;
+ hotkey_adaptive_all_mask = 0x0U;
+ }
+ break;
+
+ default:
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
+ break;
}
}
WARN_ON(tzd == NULL);
psy = tzd->devdata;
- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ if (ret)
+ return ret;
/* Convert tenths of degree Celsius to milli degree Celsius. */
- if (!ret)
- *temp = val.intval * 100;
+ *temp = val.intval * 100;
return ret;
}
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_charger *charger;
+ struct power_supply_config cfg = {};
int ret;
dev_dbg(&pdev->dev, "%s\n", __func__);
charger->tps = tps;
charger->dev = &pdev->dev;
+ cfg.of_node = pdev->dev.of_node;
+ cfg.drv_data = charger;
+
charger->ac = devm_power_supply_register(&pdev->dev,
&tps65217_charger_desc,
- NULL);
+ &cfg);
if (IS_ERR(charger->ac)) {
dev_err(&pdev->dev, "failed: power supply register\n");
return PTR_ERR(charger->ac);
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
- if (strcmp(pardev->name, KBUILD_MODNAME))
+ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
{
int err;
- if (!pwm)
+ if (!pwm || !state || !state->period ||
+ state->duty_cycle > state->period)
return -EINVAL;
if (!memcmp(state, &pwm->state, sizeof(*state)))
chip->chip.of_pwm_n_cells = 3;
chip->chip.can_sleep = 1;
- ret = pwmchip_add(&chip->chip);
+ ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
goto unlock;
}
- pwm_apply_state(pwm, &state);
+ ret = pwm_apply_state(pwm, &state);
unlock:
mutex_unlock(&export->lock);
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
- if (!sreg->sel) {
+ if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
}
unsigned int val;
int ret;
+ if (!rinfo)
+ return 0;
+
switch (fps_src) {
case MAX77620_FPS_SRC_0:
case MAX77620_FPS_SRC_1:
int pd = rpdata->active_fps_pd_slot;
int ret = 0;
+ if (!rinfo)
+ return 0;
+
if (is_suspend) {
pu = rpdata->suspend_fps_pu_slot;
pd = rpdata->suspend_fps_pd_slot;
RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
- RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
+ .list_voltage = regulator_list_voltage_linear_range,
+
+ .get_voltage = rpm_reg_get_voltage,
+ .set_voltage = rpm_reg_set_voltage,
+
+ .set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
static const struct regulator_desc pm8941_lnldo = {
.fixed_uV = 1740000,
.n_voltages = 1,
- .ops = &rpm_smps_ldo_ops,
+ .ops = &rpm_smps_ldo_ops_fixed,
};
static const struct regulator_desc pm8941_switch = {
int ramp_delay)
{
struct tps51632_chip *tps = rdev_get_drvdata(rdev);
- int bit = ramp_delay/6000;
+ int bit;
int ret;
- if (bit)
- bit--;
+ if (ramp_delay == 0)
+ bit = 0;
+ else
+ bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
+
ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
if (ret < 0)
dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
qeth_l2_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
qeth_l3_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
} else {
struct scsi_cmnd *SCp;
- SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
+ SCp = SDp->current_cmnd;
if(unlikely(SCp == NULL)) {
sdev_printk(KERN_ERR, SDp,
"no saved request for untagged cmd\n");
slot->tag, slot);
} else {
slot->tag = SCSI_NO_TAG;
- /* must populate current_cmnd for scsi_host_find_tag to work */
+ /* save current command for reselection */
SCp->device->current_cmnd = SCp;
}
/* sanity check: some of the commands generated by the mid-layer
ioa_cfg->intr_flag = IPR_USE_MSI;
else {
ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->clear_isr = 1;
ioa_cfg->nvectors = 1;
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
if (!vha->flags.online)
return;
- if (rsp->msix->cpuid != smp_processor_id()) {
+ if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
/* if kernel does not notify qla of IRQ's CPU change,
* then set it here.
*/
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
- vmax = 8; /* max length of vendor */
+ vmax = sizeof(devinfo->vendor);
vskip = vendor;
while (vmax > 0 && *vskip == ' ') {
vmax--;
while (vmax > 0 && vskip[vmax - 1] == ' ')
--vmax;
- mmax = 16; /* max length of model */
+ mmax = sizeof(devinfo->model);
mskip = model;
while (mmax > 0 && *mskip == ' ') {
mmax--;
* Behave like the older version of get_device_flags.
*/
if (memcmp(devinfo->vendor, vskip, vmax) ||
- devinfo->vendor[vmax])
+ (vmax < sizeof(devinfo->vendor) &&
+ devinfo->vendor[vmax]))
continue;
if (memcmp(devinfo->model, mskip, mmax) ||
- devinfo->model[mmax])
+ (mmax < sizeof(devinfo->model) &&
+ devinfo->model[mmax]))
continue;
return devinfo;
} else {
*/
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
{
- scmd->device->host->host_failed--;
scmd->eh_eflags = 0;
list_move_tail(&scmd->eh_entry, done_q);
}
else
scsi_unjam_host(shost);
+ /* All scmds have been handled */
+ shost->host_failed = 0;
+
/*
* Note - if the above fails completely, the action is to take
* individual devices offline and flush the queue of any
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
- rw_max = q->limits.io_opt =
- sdkp->opt_xfer_blocks * sdp->sector_size;
- else
+ logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ } else
rw_max = BLK_DEF_MAX_SECTORS;
/* Combine with controller limits */
return blocks << (ilog2(sdev->sector_size) - 9);
}
+static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks * sdev->sector_size;
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
struct spi_device *spi,
struct spi_transfer *xfer)
{
- int ret = 1;
+ int ret = 0;
struct rockchip_spi *rs = spi_master_get_devdata(master);
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
spi_enable_chip(rs, 1);
ret = rockchip_spi_prepare_dma(rs);
}
+ /* successful DMA prepare means the transfer is in progress */
+ ret = ret ? ret : 1;
} else {
spi_enable_chip(rs, 1);
ret = rockchip_spi_pio_transfer(rs);
{
struct sun4i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
/* We don't support transfer larger than the FIFO */
if (tfr->len > SUN4I_FIFO_DEPTH)
- return -EINVAL;
+ return -EMSGSIZE;
+
+ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ return -EMSGSIZE;
reinit_completion(&sspi->done);
sspi->tx_buf = tfr->tx_buf;
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
- /* Fill the TX FIFO */
- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /*
+ * Fill the TX FIFO
+ * Filling the FIFO fully causes timeout for some reason
+ * at least on spi2 on A10s
+ */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
static int ti_qspi_remove(struct platform_device *pdev)
{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ int rc;
+
+ rc = spi_master_suspend(qspi->master);
+ if (rc)
+ return rc;
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
goto error_ret_mut;
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
mutex_unlock(&st->lock);
- if (ret)
+ if (ret < 0)
goto error_ret;
val = ret;
if (base_freq > 0)
{
struct spi_device *spi = to_spi_device(dev);
int i, ret;
- unsigned short *data;
+ unsigned short *data = buf;
__be16 *bdata = buf;
ret = spi_read(spi, buf, count * 2);
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
- if (val > 511)
- val = (val >> 1) | (1 << 9);
- else if (val > 1022)
+ if (val > 1022)
val = (val >> 2) | (3 << 9);
+ else if (val > 511)
+ val = (val >> 1) | (1 << 9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
return 0;
failed:
- if (ni)
+ if (ni) {
lnet_ni_decref(ni);
+ rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+ rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ }
rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
- rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
kiblnd_reject(cmid, &rej);
return -ECONNREFUSED;
if (!efuseTbl)
return;
- eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
if (!eFuseWord) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
goto eFuseWord_failed;
{
struct hal_ops *halfunc = &adapt->HalFunc;
- adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
+
+ adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
if (!adapt->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
goto free_power_table;
}
- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
- cpufreq_dev->id);
-
- cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
- if (IS_ERR(cool_dev))
- goto remove_idr;
-
/* Fill freq-table in descending order of frequencies */
for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
freq = find_next_max(table, freq);
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
+ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+ cpufreq_dev->id);
+
+ cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+ &cpufreq_cooling_ops);
+ if (IS_ERR(cool_dev))
+ goto remove_idr;
+
cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
fsi = tty->driver_data;
else
fsi = tty->link->driver_data;
- devpts_kill_index(fsi, tty->index);
- devpts_release(fsi);
+
+ if (fsi) {
+ devpts_kill_index(fsi, tty->index);
+ devpts_release(fsi);
+ }
}
static const struct tty_operations ptm_unix98_ops = {
static void do_compute_shiftstate(void)
{
- unsigned int i, j, k, sym, val;
+ unsigned int k, sym, val;
shift_state = 0;
memset(shift_down, 0, sizeof(shift_down));
- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
- if (!key_down[i])
+ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+ sym = U(key_maps[0][k]);
+ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
continue;
- k = i * BITS_PER_LONG;
-
- for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
- if (!test_bit(k, key_down))
- continue;
+ val = KVAL(sym);
+ if (val == KVAL(K_CAPSSHIFT))
+ val = KVAL(K_SHIFT);
- sym = U(key_maps[0][k]);
- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
- continue;
-
- val = KVAL(sym);
- if (val == KVAL(K_CAPSSHIFT))
- val = KVAL(K_SHIFT);
-
- shift_down[val]++;
- shift_state |= (1 << val);
- }
+ shift_down[val]++;
+ shift_state |= BIT(val);
}
}
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
vc->vc_panic_force_write = false;
+ vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
vc->vc_sw->con_init(vc, init);
if (!vc->vc_complement_mask)
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
return fsm->state_changed;
}
EXPORT_SYMBOL_GPL(otg_statemachine);
+MODULE_LICENSE("GPL");
* Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
* deallocated.
*
- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
- * freed. When hcd_release() is called for either hcd in a peer set
- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
- * block new peering attempts
+ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
+ * freed. When hcd_release() is called for either hcd in a peer set,
+ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
*/
static void hcd_release(struct kref *kref)
{
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
mutex_lock(&usb_port_peer_mutex);
- if (usb_hcd_is_primary_hcd(hcd)) {
- kfree(hcd->address0_mutex);
- kfree(hcd->bandwidth_mutex);
- }
if (hcd->shared_hcd) {
struct usb_hcd *peer = hcd->shared_hcd;
peer->shared_hcd = NULL;
- if (peer->primary_hcd == hcd)
- peer->primary_hcd = NULL;
+ peer->primary_hcd = NULL;
+ } else {
+ kfree(hcd->address0_mutex);
+ kfree(hcd->bandwidth_mutex);
}
mutex_unlock(&usb_port_peer_mutex);
kfree(hcd);
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* USB3503 */
+ { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Microsoft Wireless Laser Mouse 6000 Receiver */
{ USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
/* MAYA44USB sound device */
{ USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* ASUS Base Station(T100) */
+ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
{ USB_DEVICE(0x1908, 0x1315), .driver_info =
USB_QUIRK_HONOR_BNUMINTERFACES },
- /* INTEL VALUE SSD */
- { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
-
- /* USB3503 */
- { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
-
- /* ASUS Base Station(T100) */
- { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
/* Protocol and OTG Electrical Test Device */
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Acer C120 LED Projector */
+ { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+
/* Blackmagic Design Intensity Shuttle */
{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* INTEL VALUE SSD */
+ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
{ } /* terminating entry must be last */
};
DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
dev_name(hsotg->dev), ##__VA_ARGS__)
+#ifdef CONFIG_MIPS
+/*
+ * There are some MIPS machines that can run in either big-endian
+ * or little-endian mode and that use the dwc2 register without
+ * a byteswap in both ways.
+ * Unlike other architectures, MIPS apparently does not require a
+ * barrier before the __raw_writel() to synchronize with DMA but does
+ * require the barrier after the __raw_writel() to serialize a set of
+ * writes. This set of operations was added specifically for MIPS and
+ * should only be used there.
+ */
static inline u32 dwc2_readl(const void __iomem *addr)
{
u32 value = __raw_readl(addr);
pr_info("INFO:: wrote %08x to %p\n", value, addr);
#endif
}
+#else
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(const void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline void dwc2_writel(u32 value, void __iomem *addr)
+{
+ writel(value, addr);
+
+#ifdef DWC2_LOG_WRITES
+ pr_info("info:: wrote %08x to %p\n", value, addr);
+#endif
+}
+#endif
/* Maximum number of Endpoints/HostChannels */
#define MAX_EPS_CHANNELS 16
return 1;
}
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
/**
* get_ep_head - return the first request on the endpoint
case USB_ENDPOINT_HALT:
halted = ep->halted;
- dwc2_hsotg_ep_sethalt(&ep->ep, set);
+ dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
* dwc2_hsotg_ep_sethalt - set halt on a given endpoint
* @ep: The endpoint to set halt.
* @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ * the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
*/
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
return 0;
}
+ if (hs_ep->isochronous) {
+ dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+ return -EINVAL;
+ }
+
+ if (!now && value && !list_empty(&hs_ep->queue)) {
+ dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+ ep->name);
+ return -EAGAIN;
+ }
+
if (hs_ep->dir_in) {
epreg = DIEPCTL(index);
epctl = dwc2_readl(hs->regs + epreg);
int ret = 0;
spin_lock_irqsave(&hs->lock, flags);
- ret = dwc2_hsotg_ep_sethalt(ep, value);
+ ret = dwc2_hsotg_ep_sethalt(ep, value, false);
spin_unlock_irqrestore(&hs->lock, flags);
return ret;
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
+#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10)
#define DWC3_DEPCMD_CMDIOC (1 << 8)
platform_set_drvdata(pdev, exynos);
- ret = dwc3_exynos_register_phys(exynos);
- if (ret) {
- dev_err(dev, "couldn't register PHYs\n");
- return ret;
- }
-
exynos->dev = dev;
exynos->clk = devm_clk_get(dev, "usbdrd30");
goto err3;
}
+ ret = dwc3_exynos_register_phys(exynos);
+ if (ret) {
+ dev_err(dev, "couldn't register PHYs\n");
+ goto err4;
+ }
+
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto err4;
+ goto err5;
}
} else {
dev_err(dev, "no device node, failed to add dwc3 core\n");
ret = -ENODEV;
- goto err4;
+ goto err5;
}
return 0;
+err5:
+ platform_device_unregister(exynos->usb2_phy);
+ platform_device_unregister(exynos->usb3_phy);
err4:
regulator_disable(exynos->vdd10);
err3:
switch (dwc3_data->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID
+ val &= ~(USB3_DELAY_VBUSVALID
| USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
| USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
- val |= USB3_DEVICE_NOT_HOST;
+ /*
+ * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+ * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+ * of the pico PHY to 1.
+ */
+
+ val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
break;
case USB_DR_MODE_HOST:
dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
- dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
+ dwc3_data->rstc_pwrdn =
+ devm_reset_control_get_exclusive(dev, "powerdown");
if (IS_ERR(dwc3_data->rstc_pwrdn)) {
dev_err(&pdev->dev, "could not get power controller\n");
ret = PTR_ERR(dwc3_data->rstc_pwrdn);
/* Manage PowerDown */
reset_control_deassert(dwc3_data->rstc_pwrdn);
- dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset");
+ dwc3_data->rstc_rst =
+ devm_reset_control_get_shared(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
dev_err(&pdev->dev, "could not get reset controller\n");
ret = PTR_ERR(dwc3_data->rstc_rst);
return ret;
}
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+ /*
+ * As of core revision 2.60a the recommended programming model
+ * is to set the ClearPendIN bit when issuing a Clear Stall EP
+ * command for IN endpoints. This is to prevent an issue where
+ * some (non-compliant) hosts may not send ACK TPs for pending
+ * IN transfers due to a mishandled error condition. Synopsys
+ * STAR 9000614252.
+ */
+ if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+ cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+ memset(¶ms, 0, sizeof(params));
+
+ return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
+}
+
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
struct dwc3_trb *trb)
{
else
dep->flags |= DWC3_EP_STALL;
} else {
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_CLEARSTALL, ¶ms);
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
struct dwc3_ep *dep;
- struct dwc3_gadget_ep_cmd_params params;
int ret;
dep = dwc->eps[epnum];
dep->flags &= ~DWC3_EP_STALL;
- memset(¶ms, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_CLEARSTALL, ¶ms);
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
WARN_ON_ONCE(ret);
}
}
}
break;
}
- req->length = value;
- req->context = cdev;
- req->zero = value < w_length;
- value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
- if (value < 0) {
- DBG(cdev, "ep_queue --> %d\n", value);
- req->status = 0;
- composite_setup_complete(gadget->ep0, req);
+
+ if (value >= 0) {
+ req->length = value;
+ req->context = cdev;
+ req->zero = value < w_length;
+ value = composite_ep0_queue(cdev, req,
+ GFP_ATOMIC);
+ if (value < 0) {
+ DBG(cdev, "ep_queue --> %d\n", value);
+ req->status = 0;
+ composite_setup_complete(gadget->ep0,
+ req);
+ }
}
return value;
}
.owner = THIS_MODULE,
.name = "configfs-gadget",
},
+ .match_existing_only = 1,
};
static struct config_group *gadgets_make(
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ !d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
+ struct ffs_ep *eps_ptr;
/* Make it a single chunk, less management later on */
vla_group(d);
ffs->raw_descs_length);
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
- for (ret = ffs->eps_count; ret; --ret) {
- struct ffs_ep *ptr;
-
- ptr = vla_ptr(vlabuf, d, eps);
- ptr[ret].num = -1;
- }
+ eps_ptr = vla_ptr(vlabuf, d, eps);
+ for (i = 0; i < ffs->eps_count; i++)
+ eps_ptr[i].num = -1;
/* Save pointers
* d_eps == vlabuf, func->eps used to kfree vlabuf later
goto error;
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
- if (c->cdev->use_os_string)
+ if (c->cdev->use_os_string) {
for (i = 0; i < ffs->interfaces_count; ++i) {
struct usb_os_desc *desc;
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
- ret = ffs_do_os_descs(ffs->ms_os_descs_count,
- vla_ptr(vlabuf, d, raw_descs) +
- fs_len + hs_len + ss_len,
- d_raw_descs__sz - fs_len - hs_len - ss_len,
- __ffs_func_bind_do_os_desc, func);
- if (unlikely(ret < 0))
- goto error;
+ ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) +
+ fs_len + hs_len + ss_len,
+ d_raw_descs__sz - fs_len - hs_len -
+ ss_len,
+ __ffs_func_bind_do_os_desc, func);
+ if (unlikely(ret < 0))
+ goto error;
+ }
func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0;
.wMaxPacketSize = cpu_to_le16(512)
};
-static struct usb_qualifier_descriptor dev_qualifier = {
- .bLength = sizeof(dev_qualifier),
- .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
- .bcdUSB = cpu_to_le16(0x0200),
- .bDeviceClass = USB_CLASS_PRINTER,
- .bNumConfigurations = 1
-};
-
static struct usb_descriptor_header *hs_printer_function[] = {
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &hs_ep_in_desc,
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].tpg == tpg)
break;
- if (i < TPG_INSTANCES)
+ if (i < TPG_INSTANCES) {
tpg_instances[i].tpg = NULL;
- opts = container_of(tpg_instances[i].func_inst,
- struct f_tcm_opts, func_inst);
- mutex_lock(&opts->dep_lock);
- if (opts->has_dep)
- module_put(opts->dependent);
- else
- configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
- mutex_unlock(&opts->dep_lock);
+ opts = container_of(tpg_instances[i].func_inst,
+ struct f_tcm_opts, func_inst);
+ mutex_lock(&opts->dep_lock);
+ if (opts->has_dep)
+ module_put(opts->dependent);
+ else
+ configfs_undepend_item_unlocked(
+ &opts->func_inst.group.cg_item);
+ mutex_unlock(&opts->dep_lock);
+ }
mutex_unlock(&tpg_instances_lock);
kfree(tpg);
NULL,
};
-static struct usb_qualifier_descriptor devqual_desc = {
- .bLength = sizeof devqual_desc,
- .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
-
- .bcdUSB = cpu_to_le16(0x200),
- .bDeviceClass = USB_CLASS_MISC,
- .bDeviceSubClass = 0x02,
- .bDeviceProtocol = 0x01,
- .bNumConfigurations = 1,
- .bRESERVED = 0,
-};
-
static struct usb_interface_assoc_descriptor iad_desc = {
.bLength = sizeof iad_desc,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_cur_lay3 c;
+ memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
c.dCUR = p_srate;
* USB 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the configuration descriptor.
+ * That means alternate endpoint descriptors (bigger packets).
*/
struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
struct usb_ep *ep = dev->gadget->ep0;
struct usb_request *req = dev->req;
- if ((retval = setup_req (ep, req, 0)) == 0)
- retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+ if ((retval = setup_req (ep, req, 0)) == 0) {
+ spin_unlock_irq (&dev->lock);
+ retval = usb_ep_queue (ep, req, GFP_KERNEL);
+ spin_lock_irq (&dev->lock);
+ }
dev->state = STATE_DEV_CONNECTED;
/* assume that was SET_CONFIGURATION */
w_length);
if (value < 0)
break;
+
+ spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
- GFP_ATOMIC);
+ GFP_KERNEL);
+ spin_lock (&dev->lock);
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
if (value >= 0 && dev->state != STATE_DEV_SETUP) {
req->length = value;
req->zero = value < w_length;
- value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+
+ spin_unlock (&dev->lock);
+ value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
}
+ return value;
}
/* device stalls when value < 0 */
}
}
- list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
- driver->function);
+ if (!driver->match_existing_only) {
+ list_add_tail(&driver->pending, &gadget_driver_pending_list);
+ pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+ driver->function);
+ ret = 0;
+ }
+
mutex_unlock(&udc_lock);
- return 0;
+ return ret;
found:
ret = udc_bind_to_driver(udc, driver);
mutex_unlock(&udc_lock);
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ /**
+ * Protect the system from crashing at system shutdown in cases where
+ * usb host is not added yet from OTG controller driver.
+ * As ehci_setup() not done yet, so stop accessing registers or
+ * variables initialized in ehci_setup()
+ */
+ if (!ehci->sbrn)
+ return;
+
spin_lock_irq(&ehci->lock);
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int ports = HCS_N_PORTS (ehci->hcs_params);
- u32 __iomem *status_reg = &ehci->regs->port_status[
- (wIndex & 0xff) - 1];
- u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+ u32 __iomem *status_reg, *hostpc_reg;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
+ /*
+ * Avoid underflow while calculating (wIndex & 0xff) - 1.
+ * The compiler might deduce that wIndex can never be 0 and then
+ * optimize away the tests for !wIndex below.
+ */
+ temp = wIndex & 0xff;
+ temp -= (temp > 0);
+ status_reg = &ehci->regs->port_status[temp];
+ hostpc_reg = &ehci->regs->hostpc[temp];
+
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
static int ehci_msm_pm_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
bool do_wakeup = device_may_wakeup(dev);
dev_dbg(dev, "ehci-msm PM suspend\n");
- return ehci_suspend(hcd, do_wakeup);
+ /* Only call ehci_suspend if ehci_setup has been done */
+ if (ehci->sbrn)
+ return ehci_suspend(hcd, do_wakeup);
+
+ return 0;
}
static int ehci_msm_pm_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
dev_dbg(dev, "ehci-msm PM resume\n");
- ehci_resume(hcd, false);
+
+ /* Only call ehci_resume if ehci_setup has been done */
+ if (ehci->sbrn)
+ ehci_resume(hcd, false);
return 0;
}
+
#else
#define ehci_msm_pm_suspend NULL
#define ehci_msm_pm_resume NULL
priv->clk48 = NULL;
}
- priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+ priv->pwr =
+ devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
if (err == -EPROBE_DEFER)
priv->pwr = NULL;
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+ priv->rst =
+ devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
if (err == -EPROBE_DEFER)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+ bool has_utmi_pad_registers = false;
phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
if (!phy_np)
return -ENOENT;
+ if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
+ has_utmi_pad_registers = true;
+
if (!usb1_reset_attempted) {
struct reset_control *usb1_reset;
- usb1_reset = of_reset_control_get(phy_np, "usb");
+ if (!has_utmi_pad_registers)
+ usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+ else
+ usb1_reset = tegra->rst;
+
if (IS_ERR(usb1_reset)) {
dev_warn(&pdev->dev,
"can't get utmi-pads reset from the PHY\n");
reset_control_assert(usb1_reset);
udelay(1);
reset_control_deassert(usb1_reset);
+
+ if (!has_utmi_pad_registers)
+ reset_control_put(usb1_reset);
}
- reset_control_put(usb1_reset);
usb1_reset_attempted = true;
}
- if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) {
+ if (!has_utmi_pad_registers) {
reset_control_assert(tegra->rst);
udelay(1);
reset_control_deassert(tegra->rst);
{
int branch;
- ed->state = ED_OPER;
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
+
+ ed->state = ED_OPER;
return 0;
}
priv->clk48 = NULL;
}
- priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+ priv->pwr =
+ devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
goto err_put_clks;
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+ priv->rst =
+ devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
goto err_put_clks;
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
#define PCI_VENDOR_ID_ETRON 0x1b6f
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
ret = clk_prepare_enable(clk);
if (ret)
goto put_hcd;
+ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto put_hcd;
}
xhci = hcd_to_xhci(hcd);
temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+
+ /*
+ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+ * but the completion event in never sent. Use the cmd timeout timer to
+ * handle those cases. Use twice the time to cover the bit polling retry
+ */
+ mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
xhci_err(xhci, "Stopped the command ring failed, "
"maybe the host is dead\n");
+ del_timer(&xhci->cmd_timer);
xhci->xhc_state |= XHCI_STATE_DYING;
xhci_quiesce(xhci);
xhci_halt(xhci);
int ret;
unsigned long flags;
u64 hw_ring_state;
- struct xhci_command *cur_cmd = NULL;
+ bool second_timeout = false;
xhci = (struct xhci_hcd *) data;
/* mark this command to be cancelled */
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->current_cmd) {
- cur_cmd = xhci->current_cmd;
- cur_cmd->status = COMP_CMD_ABORT;
+ if (xhci->current_cmd->status == COMP_CMD_ABORT)
+ second_timeout = true;
+ xhci->current_cmd->status = COMP_CMD_ABORT;
}
-
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) {
-
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Command timeout\n");
ret = xhci_abort_cmd_ring(xhci);
}
return;
}
+
+ /* command ring failed to restart, or host removed. Bail out */
+ if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+ xhci_cleanup_command_queue(xhci);
+ return;
+ }
+
/* command timeout on stopped ring, ring can't be aborted */
xhci_dbg(xhci, "Command timeout on stopped ring\n");
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
writel(irq_pending, &xhci->ir_set->irq_pending);
}
- if (xhci->xhc_state & XHCI_STATE_DYING) {
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
"Shouldn't IRQs be disabled?\n");
/* Clear the event handler busy flag (RW1C);
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_HALTED)
- return;
-
mutex_lock(&xhci->mutex);
- spin_lock_irq(&xhci->lock);
- xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- /* Make sure the xHC is halted for a USB3 roothub
- * (xhci_stop() could be called as part of failed init).
- */
- xhci_halt(xhci);
- xhci_reset(xhci);
- spin_unlock_irq(&xhci->lock);
+ if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
+ spin_lock_irq(&xhci->lock);
+
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ xhci_halt(xhci);
+ xhci_reset(xhci);
+
+ spin_unlock_irq(&xhci->lock);
+ }
+
+ if (!usb_hcd_is_primary_hcd(hcd)) {
+ mutex_unlock(&xhci->mutex);
+ return;
+ }
xhci_cleanup_msix(xhci);
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
xhci_print_registers(xhci);
- xhci->quirks = quirks;
+ xhci->quirks |= quirks;
get_quirks(dev, xhci);
musb_platform_try_idle(musb, 0);
}
-static void musb_shutdown(struct platform_device *pdev)
-{
- struct musb *musb = dev_to_musb(&pdev->dev);
- unsigned long flags;
-
- pm_runtime_get_sync(musb->controller);
-
- musb_host_cleanup(musb);
- musb_gadget_cleanup(musb);
-
- spin_lock_irqsave(&musb->lock, flags);
- musb_platform_disable(musb);
- musb_generic_disable(musb);
- spin_unlock_irqrestore(&musb->lock, flags);
-
- musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
- musb_platform_exit(musb);
-
- pm_runtime_put(musb->controller);
- /* FIXME power down */
-}
-
-
/*-------------------------------------------------------------------------*/
/*
#define use_dma 0
#endif
-static void (*musb_phy_callback)(enum musb_vbus_id_status status);
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
/*
* musb_mailbox - optional phy notifier function
* Optionally gets called from the USB PHY. Note that the USB PHY must be
* disabled at the point the phy_callback is registered or unregistered.
*/
-void musb_mailbox(enum musb_vbus_id_status status)
+int musb_mailbox(enum musb_vbus_id_status status)
{
if (musb_phy_callback)
- musb_phy_callback(status);
+ return musb_phy_callback(status);
+ return -ENODEV;
};
EXPORT_SYMBOL_GPL(musb_mailbox);
musb_readl = musb_default_readl;
musb_writel = musb_default_writel;
- /* We need musb_read/write functions initialized for PM */
- pm_runtime_use_autosuspend(musb->controller);
- pm_runtime_set_autosuspend_delay(musb->controller, 200);
- pm_runtime_enable(musb->controller);
-
/* The musb_platform_init() call:
* - adjusts musb->mregs
* - sets the musb->isr
if (musb->ops->phy_callback)
musb_phy_callback = musb->ops->phy_callback;
+ /*
+ * We need musb_read/write functions initialized for PM.
+ * Note that at least 2430 glue needs autosuspend delay
+ * somewhere above 300 ms for the hardware to idle properly
+ * after disconnecting the cable in host mode. Let's use
+ * 500 ms for some margin.
+ */
+ pm_runtime_use_autosuspend(musb->controller);
+ pm_runtime_set_autosuspend_delay(musb->controller, 500);
+ pm_runtime_enable(musb->controller);
pm_runtime_get_sync(musb->controller);
status = usb_phy_init(musb->xceiv);
if (status)
goto fail5;
- pm_runtime_put(musb->controller);
-
- /*
- * For why this is currently needed, see commit 3e43a0725637
- * ("usb: musb: core: add pm_runtime_irq_safe()")
- */
- pm_runtime_irq_safe(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
usb_phy_shutdown(musb->xceiv);
err_usb_phy_init:
+ pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
fail2:
if (musb->irq_wake)
musb_platform_exit(musb);
fail1:
- pm_runtime_disable(musb->controller);
dev_err(musb->controller,
"musb_init_controller failed with status %d\n", status);
{
struct device *dev = &pdev->dev;
struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
/* this gets called on rmmod.
* - Host mode: host may still be active
* - OTG mode: both roles are deactivated (or never-activated)
*/
musb_exit_debugfs(musb);
- musb_shutdown(pdev);
- musb_phy_callback = NULL;
-
- if (musb->dma_controller)
- musb_dma_controller_destroy(musb->dma_controller);
-
- usb_phy_shutdown(musb->xceiv);
cancel_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
+ pm_runtime_get_sync(musb->controller);
+ musb_host_cleanup(musb);
+ musb_gadget_cleanup(musb);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ pm_runtime_dont_use_autosuspend(musb->controller);
+ pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
+ musb_platform_exit(musb);
+ musb_phy_callback = NULL;
+ if (musb->dma_controller)
+ musb_dma_controller_destroy(musb->dma_controller);
+ usb_phy_shutdown(musb->xceiv);
musb_free(musb);
device_init_wakeup(dev, 0);
return 0;
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
- musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+ if (musb->context.devctl & MUSB_DEVCTL_SESSION)
+ musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
for (i = 0; i < musb->config->num_eps; ++i) {
struct musb_hw_ep *hw_ep;
},
.probe = musb_probe,
.remove = musb_remove,
- .shutdown = musb_shutdown,
};
module_platform_driver(musb_driver);
dma_addr_t *dma_addr, u32 *len);
void (*pre_root_reset_end)(struct musb *musb);
void (*post_root_reset_end)(struct musb *musb);
- void (*phy_callback)(enum musb_vbus_id_status status);
+ int (*phy_callback)(enum musb_vbus_id_status status);
};
/*
struct work_struct irq_work;
struct delayed_work deassert_reset_work;
struct delayed_work finish_resume_work;
+ struct delayed_work gadget_work;
u16 hwvers;
u16 intrrxe;
return usb_phy_set_power(musb->xceiv, mA);
}
+static void musb_gadget_work(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+
+ musb = container_of(work, struct musb, gadget_work.work);
+ pm_runtime_get_sync(musb->controller);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_pullup(musb, musb->softconnect);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+}
+
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct musb *musb = gadget_to_musb(gadget);
is_on = !!is_on;
- pm_runtime_get_sync(musb->controller);
-
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
spin_lock_irqsave(&musb->lock, flags);
if (is_on != musb->softconnect) {
musb->softconnect = is_on;
- musb_pullup(musb, is_on);
+ schedule_delayed_work(&musb->gadget_work, 0);
}
spin_unlock_irqrestore(&musb->lock, flags);
- pm_runtime_put(musb->controller);
-
return 0;
}
#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
musb->g.is_otg = 0;
#endif
-
+ INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
musb_g_init_endpoints(musb);
musb->is_active = 0;
{
if (musb->port_mode == MUSB_PORT_MODE_HOST)
return;
+
+ cancel_delayed_work_sync(&musb->gadget_work);
usb_del_gadget_udc(&musb->g);
}
if (musb->xceiv->last_event == USB_EVENT_ID)
musb_platform_set_vbus(musb, 1);
- if (musb->xceiv->last_event == USB_EVENT_NONE)
- pm_runtime_put(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
- if (musb->xceiv->last_event == USB_EVENT_NONE)
- pm_runtime_get_sync(musb->controller);
+ pm_runtime_get_sync(musb->controller);
/*
* REVISIT always use otg_set_peripheral() here too;
* that currently misbehaves.
*/
- pm_runtime_put(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
}
}
}
- if (qh != NULL && qh->is_ready) {
+ /*
+ * The pipe must be broken if current urb->status is set, so don't
+ * start next urb.
+ * TODO: to minimize the risk of regression, only check urb->status
+ * for RX, until we have a test case to understand the behavior of TX.
+ */
+ if ((!status || !is_in) && qh && qh->is_ready) {
dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
musb_writew(ep->regs, MUSB_TXCSR, 0);
/* scrub all previous state, clearing toggle */
- } else {
- csr = musb_readw(ep->regs, MUSB_RXCSR);
- if (csr & MUSB_RXCSR_RXPKTRDY)
- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
- musb_readw(ep->regs, MUSB_RXCOUNT));
-
- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
}
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
ep->rx_reinit = 0;
}
-static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
+static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
struct urb *urb, u32 offset,
u32 *length, u8 *mode)
}
channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
-
- return 0;
}
-static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
- struct musb_hw_ep *hw_ep,
- struct musb_qh *qh,
- struct urb *urb,
- u32 offset,
- u32 *length,
- u8 *mode)
+static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ u32 offset,
+ u32 *length,
+ u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
- if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
- return -ENODEV;
-
channel->actual_len = 0;
/*
* to identify the zero-length-final-packet case.
*/
*mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-
- return 0;
}
static bool musb_tx_dma_program(struct dma_controller *dma,
struct dma_channel *channel = hw_ep->tx_channel;
u16 pkt_size = qh->maxpacket;
u8 mode;
- int res;
if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
- res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
- offset, &length, &mode);
+ musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+ &length, &mode);
+ else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+ musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
+ &length, &mode);
else
- res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
- offset, &length, &mode);
- if (res)
return false;
qh->segsize = length;
if (is_in) {
dma = is_dma_capable() ? ep->rx_channel : NULL;
- /* clear nak timeout bit */
+ /*
+ * Need to stop the transaction by clearing REQPKT first
+ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
+ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
+ */
rx_csr = musb_readw(epio, MUSB_RXCSR);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
struct urb *urb,
size_t len)
{
- struct dma_channel *channel = hw_ep->tx_channel;
+ struct dma_channel *channel = hw_ep->rx_channel;
void __iomem *epio = hw_ep->regs;
dma_addr_t *buf;
u32 length, res;
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ rx_csr &= ~MUSB_RXCSR_H_ERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
enum musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
struct device *control_otghs;
+ bool cable_connected;
+ bool enabled;
+ bool powered;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
static struct omap2430_glue *_glue;
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
-{
- struct musb *musb = (void *)_musb;
- unsigned long flags;
- u8 power;
- u8 devctl;
-
- spin_lock_irqsave(&musb->lock, flags);
-
- switch (musb->xceiv->otg->state) {
- case OTG_STATE_A_WAIT_BCON:
-
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- MUSB_DEV_MODE(musb);
- } else {
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
- MUSB_HST_MODE(musb);
- }
- break;
- case OTG_STATE_A_SUSPEND:
- /* finish RESUME signaling? */
- if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
- power = musb_readb(musb->mregs, MUSB_POWER);
- power &= ~MUSB_POWER_RESUME;
- dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
- musb_writeb(musb->mregs, MUSB_POWER, power);
- musb->is_active = 1;
- musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
- | MUSB_PORT_STAT_RESUME);
- musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
- usb_hcd_poll_rh_status(musb->hcd);
- /* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv->otg->state = OTG_STATE_A_HOST;
- }
- break;
- case OTG_STATE_A_HOST:
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE)
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- else
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
- default:
- break;
- }
- spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-
-static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
- unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
- static unsigned long last_timer;
-
- if (timeout == 0)
- timeout = default_timeout;
-
- /* Never idle if active, or when VBUS timeout is not set as host */
- if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
- dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&musb_idle_timer);
- last_timer = jiffies;
- return;
- }
-
- if (time_after(last_timer, timeout)) {
- if (!timer_pending(&musb_idle_timer))
- last_timer = timeout;
- else {
- dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
- return;
- }
- }
- last_timer = timeout;
-
- dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- (unsigned long)jiffies_to_msecs(timeout - jiffies));
- mod_timer(&musb_idle_timer, timeout);
-}
-
static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
{
struct usb_otg *otg = musb->xceiv->otg;
musb_readb(musb->mregs, MUSB_DEVCTL));
}
-static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
-{
- u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
- return 0;
-}
-
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
-static void omap2430_musb_mailbox(enum musb_vbus_id_status status)
+/*
+ * We can get multiple cable events so we need to keep track
+ * of the power state. Only keep power enabled if USB cable is
+ * connected and a gadget is started.
+ */
+static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ bool power_up;
+ int res;
+
+ if (glue->enabled != enabled)
+ glue->enabled = enabled;
+
+ if (glue->cable_connected != cable)
+ glue->cable_connected = cable;
+
+ power_up = glue->enabled && glue->cable_connected;
+ if (power_up == glue->powered) {
+ dev_warn(musb->controller, "power state already %i\n",
+ power_up);
+ return;
+ }
+
+ glue->powered = power_up;
+
+ if (power_up) {
+ res = pm_runtime_get_sync(musb->controller);
+ if (res < 0) {
+ dev_err(musb->controller, "could not enable: %i", res);
+ glue->powered = false;
+ }
+ } else {
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
{
struct omap2430_glue *glue = _glue;
if (!glue) {
pr_err("%s: musb core is not yet initialized\n", __func__);
- return;
+ return -EPROBE_DEFER;
}
glue->status = status;
if (!glue_to_musb(glue)) {
pr_err("%s: musb core is not yet ready\n", __func__);
- return;
+ return -EPROBE_DEFER;
}
schedule_work(&glue->omap_musb_mailbox_work);
+
+ return 0;
}
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
struct usb_otg *otg = musb->xceiv->otg;
+ bool cable_connected;
+
+ cable_connected = ((glue->status == MUSB_ID_GROUND) ||
+ (glue->status == MUSB_VBUS_VALID));
+
+ if (cable_connected)
+ omap2430_set_power(musb, glue->enabled, cable_connected);
switch (glue->status) {
case MUSB_ID_GROUND:
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
musb->xceiv->last_event = USB_EVENT_ID;
if (musb->gadget_driver) {
- pm_runtime_get_sync(dev);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_HOST);
omap2430_musb_set_vbus(musb, 1);
otg->default_a = false;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->xceiv->last_event = USB_EVENT_VBUS;
- if (musb->gadget_driver)
- pm_runtime_get_sync(dev);
omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
dev_dbg(dev, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
- if (musb->gadget_driver) {
+ if (musb->gadget_driver)
omap2430_musb_set_vbus(musb, 0);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
if (data->interface_type == MUSB_INTERFACE_UTMI)
otg_set_vbus(musb->xceiv->otg, 0);
dev_dbg(dev, "ID float\n");
}
+ if (!cable_connected)
+ omap2430_set_power(musb, glue->enabled, cable_connected);
+
atomic_notifier_call_chain(&musb->xceiv->notifier,
musb->xceiv->last_event, NULL);
}
{
struct omap2430_glue *glue = container_of(mailbox_work,
struct omap2430_glue, omap_musb_mailbox_work);
- struct musb *musb = glue_to_musb(glue);
- struct device *dev = musb->controller;
- pm_runtime_get_sync(dev);
omap_musb_set_mailbox(glue);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
}
static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
return PTR_ERR(musb->phy);
}
musb->isr = omap2430_musb_interrupt;
-
- /*
- * Enable runtime PM for musb parent (this driver). We can't
- * do it earlier as struct musb is not yet allocated and we
- * need to touch the musb registers for runtime PM.
- */
- pm_runtime_enable(glue->dev);
- status = pm_runtime_get_sync(glue->dev);
- if (status < 0)
- goto err1;
-
- status = pm_runtime_get_sync(dev);
- if (status < 0) {
- dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
- pm_runtime_put_sync(glue->dev);
- goto err1;
- }
+ phy_init(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
-
if (glue->status != MUSB_UNKNOWN)
omap_musb_set_mailbox(glue);
- phy_init(musb->phy);
- phy_power_on(musb->phy);
-
- pm_runtime_put_noidle(musb->controller);
- pm_runtime_put_noidle(glue->dev);
return 0;
-
-err1:
- return status;
}
static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
+ if (!WARN_ON(!musb->phy))
+ phy_power_on(musb->phy);
+
+ omap2430_set_power(musb, true, glue->cable_connected);
+
switch (glue->status) {
case MUSB_ID_GROUND:
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ if (!WARN_ON(!musb->phy))
+ phy_power_off(musb->phy);
+
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
+
+ omap2430_set_power(musb, false, glue->cable_connected);
}
static int omap2430_musb_exit(struct musb *musb)
{
- del_timer_sync(&musb_idle_timer);
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
phy_exit(musb->phy);
+ musb->phy = NULL;
+ cancel_work_sync(&glue->omap_musb_mailbox_work);
return 0;
}
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_mode = omap2430_musb_set_mode,
- .try_idle = omap2430_musb_try_idle,
-
.set_vbus = omap2430_musb_set_vbus,
.enable = omap2430_musb_enable,
goto err2;
}
- /*
- * Note that we cannot enable PM runtime yet for this
- * driver as we need struct musb initialized first.
- * See omap2430_musb_init above.
- */
+ pm_runtime_enable(glue->dev);
+ pm_runtime_use_autosuspend(glue->dev);
+ pm_runtime_set_autosuspend_delay(glue->dev, 500);
ret = platform_device_add(musb);
if (ret) {
static int omap2430_remove(struct platform_device *pdev)
{
- struct omap2430_glue *glue = platform_get_drvdata(pdev);
+ struct omap2430_glue *glue = platform_get_drvdata(pdev);
+ struct musb *musb = glue_to_musb(glue);
pm_runtime_get_sync(glue->dev);
- cancel_work_sync(&glue->omap_musb_mailbox_work);
platform_device_unregister(glue->musb);
+ omap2430_set_power(musb, false, false);
pm_runtime_put_sync(glue->dev);
+ pm_runtime_dont_use_autosuspend(glue->dev);
pm_runtime_disable(glue->dev);
return 0;
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- if (musb) {
- musb->context.otg_interfsel = musb_readl(musb->mregs,
- OTG_INTERFSEL);
+ if (!musb)
+ return 0;
- omap2430_low_level_exit(musb);
- }
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
+ omap2430_low_level_exit(musb);
return 0;
}
struct musb *musb = glue_to_musb(glue);
if (!musb)
- return -EPROBE_DEFER;
+ return 0;
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
},
};
+module_platform_driver(omap2430_driver);
+
MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("GPL v2");
-
-static int __init omap2430_init(void)
-{
- return platform_driver_register(&omap2430_driver);
-}
-subsys_initcall(omap2430_init);
-
-static void __exit omap2430_exit(void)
-{
- platform_driver_unregister(&omap2430_driver);
-}
-module_exit(omap2430_exit);
struct sunxi_glue {
struct device *dev;
- struct platform_device *musb;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
return;
if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
- struct musb *musb = platform_get_drvdata(glue->musb);
+ struct musb *musb = glue->musb;
unsigned long flags;
u8 devctl;
if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
musb->xceiv->otg->default_a = 1;
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
MUSB_HST_MODE(musb);
devctl |= MUSB_DEVCTL_SESSION;
} else {
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
- if (is_on)
+ if (is_on) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- else
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ } else {
clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ }
schedule_work(&glue->work);
}
if (ret)
goto error_unregister_notifier;
- if (musb->port_mode == MUSB_PORT_MODE_HOST) {
- ret = phy_power_on(glue->phy);
- if (ret)
- goto error_phy_exit;
- set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
- /* Stop musb work from turning vbus off again */
- set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- }
-
musb->isr = sunxi_musb_interrupt;
/* Stop the musb-core from doing runtime pm (not supported on sunxi) */
return 0;
-error_phy_exit:
- phy_exit(glue->phy);
error_unregister_notifier:
if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
return 0;
}
+static int sunxi_set_mode(struct musb *musb, u8 mode)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ int ret;
+
+ if (mode == MUSB_HOST) {
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ return ret;
+
+ set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ /* Stop musb work from turning vbus off again */
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return 0;
+}
+
static void sunxi_musb_enable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ glue->musb = musb;
+
/* musb_core does not call us in a balanced manner */
if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
return;
.exit = sunxi_musb_exit,
.enable = sunxi_musb_enable,
.disable = sunxi_musb_disable,
+ .set_mode = sunxi_set_mode,
.fifo_offset = sunxi_musb_fifo_offset,
.ep_offset = sunxi_musb_ep_offset,
.busctl_offset = sunxi_musb_busctl_offset,
pinfo.data = &pdata;
pinfo.size_data = sizeof(pdata);
- glue->musb = platform_device_register_full(&pinfo);
- if (IS_ERR(glue->musb)) {
- ret = PTR_ERR(glue->musb);
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
goto err_unregister_usb_phy;
}
struct sunxi_glue *glue = platform_get_drvdata(pdev);
struct platform_device *usb_phy = glue->usb_phy;
- platform_device_unregister(glue->musb); /* Frees glue ! */
+ platform_device_unregister(glue->musb_pdev);
usb_phy_generic_unregister(usb_phy);
return 0;
struct regulator *usb3v3;
+ /* used to check initial cable status after probe */
+ struct delayed_work get_status_work;
+
/* used to set vbus, in atomic path */
struct work_struct set_vbus_work;
twl->asleep = 1;
status = MUSB_VBUS_VALID;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
} else {
if (twl->linkstat != MUSB_UNKNOWN) {
status = MUSB_VBUS_OFF;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
if (twl->asleep) {
regulator_disable(twl->usb3v3);
twl->asleep = 0;
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = MUSB_ID_GROUND;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
} else {
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
return IRQ_HANDLED;
}
+static void twl6030_status_work(struct work_struct *work)
+{
+ struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
+ get_status_work.work);
+
+ twl6030_usb_irq(twl->irq2, twl);
+ twl6030_usbotg_irq(twl->irq1, twl);
+}
+
static int twl6030_enable_irq(struct twl6030_usb *twl)
{
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
REG_INT_MSK_STS_C);
- twl6030_usb_irq(twl->irq2, twl);
- twl6030_usbotg_irq(twl->irq1, twl);
return 0;
}
dev_warn(&pdev->dev, "could not create sysfs file\n");
INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+ INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
twl->asleep = 0;
twl6030_enable_irq(twl);
+ schedule_delayed_work(&twl->get_status_work, HZ);
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
return 0;
{
struct twl6030_usb *twl = platform_get_drvdata(pdev);
+ cancel_delayed_work(&twl->get_status_work);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
urblist_entry)
usb_unlink_urb(urbtrack->urb);
spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ parport_del_port(mos_parport->pp);
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
+ scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
static int vhci_get_frame_number(struct usb_hcd *hcd)
{
- pr_err("Not yet implemented\n");
+ dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
return 0;
}
config EBC_C384_WDT
tristate "WinSystems EBC-C384 Watchdog Timer"
- depends on X86 && ISA
+ depends on X86 && ISA_BUS_API
select WATCHDOG_CORE
help
Enables watchdog timer support for the watchdog timer on the
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-static void release_memory_resource(struct resource *resource);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
}
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+static void release_memory_resource(struct resource *resource)
+{
+ if (!resource)
+ return;
+
+ /*
+ * No need to reset region to identity mapped since we now
+ * know that no I/O can be in this region
+ */
+ release_resource(resource);
+ kfree(resource);
+}
+
static struct resource *additional_memory_resource(phys_addr_t size)
{
struct resource *res;
return res;
}
-static void release_memory_resource(struct resource *resource)
-{
- if (!resource)
- return;
-
- /*
- * No need to reset region to identity mapped since we now
- * know that no I/O can be in this region
- */
- release_resource(resource);
- kfree(resource);
-}
-
static enum bp_state reserve_additional_memory(void)
{
long credit;
return 0;
}
-static int __init check_prereq(void)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- if (!acpi_gbl_FADT.smi_command)
- return -ENODEV;
-
- if (c->x86_vendor == X86_VENDOR_INTEL) {
- if (!cpu_has(c, X86_FEATURE_EST))
- return -ENODEV;
- return 0;
- }
- if (c->x86_vendor == X86_VENDOR_AMD) {
- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
- * as we get compile warnings for the static functions.
- */
-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
-#define USE_HW_PSTATE 0x00000080
- u32 eax, ebx, ecx, edx;
- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
- return -ENODEV;
- return 0;
- }
- return -ENODEV;
-}
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;
static int __init xen_acpi_processor_init(void)
{
unsigned int i;
- int rc = check_prereq();
+ int rc;
- if (rc)
- return rc;
+ if (!xen_initial_domain())
+ return -ENODEV;
nr_acpi_bits = get_max_acpi_id() + 1;
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val);
if (err)
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
tmp_val = 0;
err = xen_pcibk_config_read(dev, field_start,
/* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes
*/
- if (value == ~PCI_ROM_ADDRESS_ENABLE)
+ if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
bar->which = 1;
else {
u32 tmpval;
(PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64))) {
bar_info->val = res[pos - 1].start >> 32;
- bar_info->len_val = res[pos - 1].end >> 32;
+ bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
return;
}
}
+ if (!res[pos].flags ||
+ (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
+ IORESOURCE_BUSY)))
+ return;
+
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = resource_size(&res[pos]);
+ bar_info->len_val = -resource_size(&res[pos]) |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
}
static void *bar_init(struct pci_dev *dev, int offset)
{
- struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~0);
- bar->which = 0;
return bar;
}
static void *rom_init(struct pci_dev *dev, int offset)
{
- struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
- bar->which = 0;
return bar;
}
rc = -ENOMEM;
goto out;
}
+ } else {
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+ if (&trans->list == &u->transactions)
+ return -ESRCH;
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
- kfree(trans);
+ if (msg_type == XS_TRANSACTION_START)
+ kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- BUG_ON(&trans->list == &u->transactions);
list_del(&trans->list);
-
kfree(trans);
}
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
{
void *ret;
- struct xsd_sockmsg req_msg = *msg;
+ enum xsd_sockmsg_type type = msg->type;
int err;
- if (req_msg.type == XS_TRANSACTION_START)
+ if (type == XS_TRANSACTION_START)
transaction_start();
mutex_lock(&xs_state.request_mutex);
mutex_unlock(&xs_state.request_mutex);
- if (IS_ERR(ret))
- return ret;
-
if ((msg->type == XS_TRANSACTION_END) ||
- ((req_msg.type == XS_TRANSACTION_START) &&
- (msg->type == XS_ERROR)))
+ ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
transaction_end();
return ret;
v9fs_proto_dotu(v9ses));
fid = file->private_data;
if (!fid) {
- fid = v9fs_fid_clone(file->f_path.dentry);
+ fid = v9fs_fid_clone(file_dentry(file));
if (IS_ERR(fid))
return PTR_ERR(fid);
* because we want write after unlink usecase
* to work.
*/
- fid = v9fs_writeback_fid(file->f_path.dentry);
+ fid = v9fs_writeback_fid(file_dentry(file));
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
* because we want write after unlink usecase
* to work.
*/
- fid = v9fs_writeback_fid(filp->f_path.dentry);
+ fid = v9fs_writeback_fid(file_dentry(filp));
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
struct p9_fid *fid, *inode_fid;
struct dentry *res = NULL;
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
struct posix_acl *pacl = NULL, *dacl = NULL;
struct dentry *res = NULL;
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
};
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */
-#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered
+#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
* for expiry, so RCU_walk is
- * not permitted
+ * not permitted. If it progresses to
+ * actual expiry attempt, the flag is
+ * not cleared when EXPIRING is set -
+ * in that case it gets cleared only
+ * when it comes to clearing EXPIRING.
*/
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
if (ino->flags & AUTOFS_INF_PENDING)
goto out;
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
- ino->flags |= AUTOFS_INF_NO_RCU;
+ ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
spin_lock(&sbi->fs_lock);
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
ino->flags |= AUTOFS_INF_EXPIRING;
- smp_mb();
- ino->flags &= ~AUTOFS_INF_NO_RCU;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
return root;
}
- ino->flags &= ~AUTOFS_INF_NO_RCU;
+ ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
}
out:
spin_unlock(&sbi->fs_lock);
while ((dentry = get_next_positive_subdir(dentry, root))) {
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
- if (ino->flags & AUTOFS_INF_NO_RCU)
+ if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
expired = NULL;
else
expired = should_expire(dentry, mnt, timeout, how);
continue;
}
ino = autofs4_dentry_ino(expired);
- ino->flags |= AUTOFS_INF_NO_RCU;
+ ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
spin_lock(&sbi->fs_lock);
goto found;
}
- ino->flags &= ~AUTOFS_INF_NO_RCU;
+ ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
if (expired != dentry)
dput(expired);
spin_unlock(&sbi->fs_lock);
found:
pr_debug("returning %p %pd\n", expired, expired);
ino->flags |= AUTOFS_INF_EXPIRING;
- smp_mb();
- ino->flags &= ~AUTOFS_INF_NO_RCU;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
- spin_lock(&sbi->lookup_lock);
- spin_lock(&expired->d_parent->d_lock);
- spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
- list_move(&expired->d_parent->d_subdirs, &expired->d_child);
- spin_unlock(&expired->d_lock);
- spin_unlock(&expired->d_parent->d_lock);
- spin_unlock(&sbi->lookup_lock);
return expired;
}
int status;
/* Block on any pending expire */
- if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
+ if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
return 0;
if (rcu_walk)
return -ECHILD;
ino = autofs4_dentry_ino(dentry);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
- ino->flags &= ~AUTOFS_INF_EXPIRING;
+ ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&sbi->fs_lock);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
- ino->flags &= ~AUTOFS_INF_EXPIRING;
+ ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
*/
struct inode *inode;
- if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
+ if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
return 0;
if (d_mountpoint(dentry))
return 0;
set_fs(KERNEL_DS);
mutex_lock(&sbi->pipe_mutex);
- wr = __vfs_write(file, data, bytes, &file->f_pos);
- while (bytes && wr) {
+ while (bytes) {
+ wr = __vfs_write(file, data, bytes, &file->f_pos);
+ if (wr <= 0)
+ break;
data += wr;
bytes -= wr;
- wr = __vfs_write(file, data, bytes, &file->f_pos);
}
mutex_unlock(&sbi->pipe_mutex);
goto end_coredump;
/* Align to page */
- if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+ if (!dump_skip(cprm, dataoff - cprm->pos))
goto end_coredump;
for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
goto end_coredump;
}
- if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+ if (!dump_skip(cprm, dataoff - cprm->pos))
goto end_coredump;
if (!elf_fdpic_dump_segments(cprm))
* This algorithm is recursive because the amount of used stack space
* is very small and the max recursion depth is limited.
*/
- indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+ indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
btrfsic_get_block_type(state, block),
block->logical_bytenr, block->dev_state->name,
block->dev_bytenr, block->mirror_num);
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
BUG_ON(tm->slot != 0);
- eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
+ eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
+ eb->len);
if (!eb_rewin) {
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
} else if (old_root) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- eb = alloc_dummy_extent_buffer(root->fs_info, logical);
+ eb = alloc_dummy_extent_buffer(root->fs_info, logical,
+ root->nodesize);
} else {
btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
eb = btrfs_clone_extent_buffer(eb_root);
trans->transid, root->fs_info->generation);
if (!should_cow_block(trans, root, buf)) {
+ trans->dirty = true;
*cow_ret = buf;
return 0;
}
if (!err) {
tmp = (struct btrfs_disk_key *)(kaddr + offset -
map_start);
- } else {
+ } else if (err == 1) {
read_extent_buffer(eb, &unaligned,
offset, sizeof(unaligned));
tmp = &unaligned;
+ } else {
+ return err;
}
} else {
if (!btrfs_buffer_uptodate(tmp, 0, 0))
ret = -EIO;
free_extent_buffer(tmp);
+ } else {
+ ret = PTR_ERR(tmp);
}
return ret;
}
* then we don't want to set the path blocking,
* so we test it here
*/
- if (!should_cow_block(trans, root, b))
+ if (!should_cow_block(trans, root, b)) {
+ trans->dirty = true;
goto cow_done;
+ }
/*
* must have write locks on this node and the
}
ret = key_search(b, key, level, &prev_cmp, &slot);
+ if (ret < 0)
+ goto done;
if (level != 0) {
int dec = 0;
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
- unsigned long count, int wait);
+ unsigned long count, u64 transid, int wait);
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
return 0;
}
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
- struct list_head *del_list)
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list)
{
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_item *item;
delayed_node = btrfs_get_delayed_node(inode);
if (!delayed_node)
- return;
+ return false;
+
+ /*
+ * We can only do one readdir with delayed items at a time because of
+ * item->readdir_list.
+ */
+ inode_unlock_shared(inode);
+ inode_lock(inode);
mutex_lock(&delayed_node->mutex);
item = __btrfs_first_delayed_insertion_item(delayed_node);
* requeue or dequeue this delayed node.
*/
atomic_dec(&delayed_node->refs);
+
+ return true;
}
-void btrfs_put_delayed_items(struct list_head *ins_list,
- struct list_head *del_list)
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list)
{
struct btrfs_delayed_item *curr, *next;
if (atomic_dec_and_test(&curr->refs))
kfree(curr);
}
+
+ /*
+ * The VFS is going to do up_read(), so we need to downgrade back to a
+ * read lock.
+ */
+ downgrade_write(&inode->i_rwsem);
}
int btrfs_should_delete_dir_index(struct list_head *del_list,
void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
/* Used for readdir() */
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
- struct list_head *del_list);
-void btrfs_put_delayed_items(struct list_head *ins_list,
- struct list_head *del_list);
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list);
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+ struct list_head *ins_list,
+ struct list_head *del_list);
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
struct inode *btree_inode = root->fs_info->btree_inode;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
+ if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, 0, WAIT_NONE, btree_get_extent, 0);
int ret;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
+ if (IS_ERR(buf))
return 0;
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
u64 bytenr)
{
if (btrfs_test_is_dummy_root(root))
- return alloc_test_extent_buffer(root->fs_info, bytenr);
+ return alloc_test_extent_buffer(root->fs_info, bytenr,
+ root->nodesize);
return alloc_extent_buffer(root->fs_info, bytenr);
}
int ret;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(buf))
+ return buf;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
if (ret) {
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(void)
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root;
root = btrfs_alloc_root(NULL, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(4096, 4096, 4096, root, NULL, 1);
+ /* We don't use the stripesize in selftest, set it as sectorsize */
+ __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+ BTRFS_ROOT_TREE_OBJECTID);
set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
root->alloc_bytenr = 0;
if (btrfs_need_cleaner_sleep(root))
goto sleep;
+ /*
+ * Do not do anything if we might cause open_ctree() to block
+ * before we have finished mounting the filesystem.
+ */
+ if (!root->fs_info->open)
+ goto sleep;
+
if (!mutex_trylock(&root->fs_info->cleaner_mutex))
goto sleep;
int num_backups_tried = 0;
int backup_index = 0;
int max_active;
- bool cleaner_mutex_locked = false;
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
nodesize = btrfs_super_nodesize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
- stripesize = btrfs_super_stripesize(disk_super);
+ stripesize = sectorsize;
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
goto fail_sysfs;
}
- /*
- * Hold the cleaner_mutex thread here so that we don't block
- * for a long time on btrfs_recover_relocation. cleaner_kthread
- * will wait for us to finish mounting the filesystem.
- */
- mutex_lock(&fs_info->cleaner_mutex);
- cleaner_mutex_locked = true;
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
"btrfs-cleaner");
if (IS_ERR(fs_info->cleaner_kthread))
ret = btrfs_cleanup_fs_roots(fs_info);
if (ret)
goto fail_qgroup;
- /* We locked cleaner_mutex before creating cleaner_kthread. */
+
+ mutex_lock(&fs_info->cleaner_mutex);
ret = btrfs_recover_relocation(tree_root);
+ mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
btrfs_warn(fs_info, "failed to recover relocation: %d",
ret);
goto fail_qgroup;
}
}
- mutex_unlock(&fs_info->cleaner_mutex);
- cleaner_mutex_locked = false;
location.objectid = BTRFS_FS_TREE_OBJECTID;
location.type = BTRFS_ROOT_ITEM_KEY;
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
fail_sysfs:
- if (cleaner_mutex_locked) {
- mutex_unlock(&fs_info->cleaner_mutex);
- cleaner_mutex_locked = false;
- }
btrfs_sysfs_remove_mounted(fs_info);
fail_fsdev_sysfs:
* Hint to catch really bogus numbers, bitflips or so, more exact checks are
* done later
*/
+ if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
+ btrfs_err(fs_info, "bytes_used is too small %llu",
+ btrfs_super_bytes_used(sb));
+ ret = -EINVAL;
+ }
+ if (!is_power_of_2(btrfs_super_stripesize(sb))) {
+ btrfs_err(fs_info, "invalid stripesize %u",
+ btrfs_super_stripesize(sb));
+ ret = -EINVAL;
+ }
if (btrfs_super_num_devices(sb) > (1UL << 31))
printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
btrfs_super_num_devices(sb));
void btrfs_free_fs_root(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(void);
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
#endif
/*
struct async_delayed_refs {
struct btrfs_root *root;
+ u64 transid;
int count;
int error;
int sync;
async = container_of(work, struct async_delayed_refs, work);
+ /* if the commit is already started, we don't need to wait here */
+ if (btrfs_transaction_blocked(async->root->fs_info))
+ goto done;
+
trans = btrfs_join_transaction(async->root);
if (IS_ERR(trans)) {
async->error = PTR_ERR(trans);
* wait on delayed refs
*/
trans->sync = true;
+
+ /* Don't bother flushing if we got into a different transaction */
+ if (trans->transid > async->transid)
+ goto end;
+
ret = btrfs_run_delayed_refs(trans, async->root, async->count);
if (ret)
async->error = ret;
-
+end:
ret = btrfs_end_transaction(trans, async->root);
if (ret && !async->error)
async->error = ret;
}
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
- unsigned long count, int wait)
+ unsigned long count, u64 transid, int wait)
{
struct async_delayed_refs *async;
int ret;
async->root = root->fs_info->tree_root;
async->count = count;
async->error = 0;
+ async->transid = transid;
if (wait)
async->sync = 1;
else
struct extent_buffer *buf;
buf = btrfs_find_create_tree_block(root, bytenr);
- if (!buf)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(buf))
+ return buf;
+
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
}
- trans->blocks_used++;
+ trans->dirty = true;
/* this returns a buffer locked for blocking */
return buf;
}
next = btrfs_find_tree_block(root->fs_info, bytenr);
if (!next) {
next = btrfs_find_create_tree_block(root, bytenr);
- if (!next)
- return -ENOMEM;
+ if (IS_ERR(next))
+ return PTR_ERR(next);
+
btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
level - 1);
reada = 1;
}
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
+ u64 start, u32 nodesize)
{
unsigned long len;
if (!fs_info) {
/*
* Called only from tests that don't always have a fs_info
- * available, but we know that nodesize is 4096
+ * available
*/
- len = 4096;
+ len = nodesize;
} else {
len = fs_info->tree_root->nodesize;
}
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
+ u64 start, u32 nodesize)
{
struct extent_buffer *eb, *exists = NULL;
int ret;
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
- eb = alloc_dummy_extent_buffer(fs_info, start);
+ eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
if (!eb)
return NULL;
eb->fs_info = fs_info;
int uptodate = 1;
int ret;
+ if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+ btrfs_err(fs_info, "bad tree block start %llu", start);
+ return ERR_PTR(-EINVAL);
+ }
+
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
- if (!p)
+ if (!p) {
+ exists = ERR_PTR(-ENOMEM);
goto free_eb;
+ }
spin_lock(&mapping->private_lock);
if (PagePrivate(p)) {
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
again:
ret = radix_tree_preload(GFP_NOFS);
- if (ret)
+ if (ret) {
+ exists = ERR_PTR(ret);
goto free_eb;
+ }
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
return ret;
}
+/*
+ * return 0 if the item is found within a page.
+ * return 1 if the item spans two pages.
+ * return -EINVAL otherwise.
+ */
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long min_len, char **map,
unsigned long *map_start,
PAGE_SHIFT;
if (i != end_i)
- return -EINVAL;
+ return 1;
if (i == 0) {
offset = start_offset;
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start);
+ u64 start, u32 nodesize);
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
u64 *end, u64 max_bytes);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start);
+ u64 start, u32 nodesize);
#endif
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
- if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) &&
- check_can_nocow(inode, pos, &write_bytes) > 0) {
- /*
- * For nodata cow case, no need to reserve
- * data space.
- */
- only_release_metadata = true;
- /*
- * our prealloc extent may be smaller than
- * write_bytes, so scale down.
- */
- num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_SIZE);
- reserve_bytes = round_up(write_bytes + sector_offset,
- root->sectorsize);
- goto reserve_metadata;
- }
-
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
- if (ret < 0)
- break;
+ if (ret < 0) {
+ if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_PREALLOC)) &&
+ check_can_nocow(inode, pos, &write_bytes) > 0) {
+ /*
+ * For nodata cow case, no need to reserve
+ * data space.
+ */
+ only_release_metadata = true;
+ /*
+ * our prealloc extent may be smaller than
+ * write_bytes, so scale down.
+ */
+ num_pages = DIV_ROUND_UP(write_bytes + offset,
+ PAGE_SIZE);
+ reserve_bytes = round_up(write_bytes +
+ sector_offset,
+ root->sectorsize);
+ } else {
+ break;
+ }
+ }
-reserve_metadata:
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
if (ret) {
if (!only_release_metadata)
#include "inode-map.h"
#include "volumes.h"
-#define BITS_PER_BITMAP (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
#define MAX_CACHE_BYTES_PER_GIG SZ_32K
struct btrfs_trim_range {
u64 offset)
{
u64 bitmap_start;
- u32 bytes_per_bitmap;
+ u64 bytes_per_bitmap;
bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
bitmap_start = offset - ctl->start;
- bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
+ bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
bitmap_start *= bytes_per_bitmap;
bitmap_start += ctl->start;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
- u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
- u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
+ u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+ u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
- max_bitmaps = max_t(u32, max_bitmaps, 1);
+ max_bitmaps = max_t(u64, max_bitmaps, 1);
ASSERT(ctl->total_bitmaps <= max_bitmaps);
* sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
* we add more bitmaps.
*/
- bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
+ bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
if (bitmap_bytes >= max_bytes) {
ctl->extents_thresh = 0;
if (tmp->offset + tmp->bytes < offset)
break;
if (offset + bytes < tmp->offset) {
- n = rb_prev(&info->offset_index);
+ n = rb_prev(&tmp->offset_index);
continue;
}
info = tmp;
if (offset + bytes < tmp->offset)
break;
if (tmp->offset + tmp->bytes < offset) {
- n = rb_next(&info->offset_index);
+ n = rb_next(&tmp->offset_index);
continue;
}
info = tmp;
return PTR_ERR_OR_ZERO(tfm);
}
+const char* btrfs_crc32c_impl(void)
+{
+ return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
void btrfs_hash_exit(void)
{
crypto_free_shash(tfm);
int __init btrfs_hash_init(void);
void btrfs_hash_exit(void);
+const char* btrfs_crc32c_impl(void);
u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
- BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
+ ASSERT(!ret);
+ if (ret) {
+ atomic_dec(&root->orphan_inodes);
+ clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+ &BTRFS_I(inode)->runtime_flags);
+ if (insert)
+ clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+ &BTRFS_I(inode)->runtime_flags);
+ return ret;
+ }
}
/* insert an orphan item to track this unlinked/truncated file */
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
+ trans->transid,
trans->delayed_ref_updates * 2, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
int name_len;
int is_curr = 0; /* ctx->pos points to the current index? */
bool emitted;
+ bool put = false;
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
INIT_LIST_HEAD(&del_list);
- btrfs_get_delayed_items(inode, &ins_list, &del_list);
+ put = btrfs_readdir_get_delayed_items(inode, &ins_list,
+ &del_list);
}
key.type = key_type;
nopos:
ret = 0;
err:
- if (key_type == BTRFS_DIR_INDEX_KEY)
- btrfs_put_delayed_items(&ins_list, &del_list);
+ if (put)
+ btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
- .iterate = btrfs_real_readdir,
+ .iterate_shared = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_compat_ioctl,
struct rb_node *prev = NULL;
struct btrfs_ordered_extent *test;
int ret = 1;
+ u64 orig_offset = offset;
spin_lock_irq(&tree->lock);
if (ordered) {
/* truncate file */
if (disk_i_size > i_size) {
- BTRFS_I(inode)->disk_i_size = i_size;
+ BTRFS_I(inode)->disk_i_size = orig_offset;
ret = 0;
goto out;
}
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
- if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
+ if (!trans->dirty && list_empty(&trans->new_bgs)) {
const char *errstr;
errstr = btrfs_decode_error(errno);
}
}
sb->s_flags &= ~MS_RDONLY;
+
+ fs_info->open = 1;
}
out:
wake_up_process(fs_info->transaction_kthread);
static void btrfs_print_mod_info(void)
{
- printk(KERN_INFO "Btrfs loaded"
+ printk(KERN_INFO "Btrfs loaded, crc32c=%s"
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
", integrity-checker=on"
#endif
- "\n");
+ "\n",
+ btrfs_crc32c_impl());
}
static int btrfs_run_sanity_tests(void)
{
- int ret;
-
+ int ret, i;
+ u32 sectorsize, nodesize;
+ u32 test_sectorsize[] = {
+ PAGE_SIZE,
+ };
ret = btrfs_init_test_fs();
if (ret)
return ret;
-
- ret = btrfs_test_free_space_cache();
- if (ret)
- goto out;
- ret = btrfs_test_extent_buffer_operations();
- if (ret)
- goto out;
- ret = btrfs_test_extent_io();
- if (ret)
- goto out;
- ret = btrfs_test_inodes();
- if (ret)
- goto out;
- ret = btrfs_test_qgroups();
- if (ret)
- goto out;
- ret = btrfs_test_free_space_tree();
+ for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+ sectorsize = test_sectorsize[i];
+ for (nodesize = sectorsize;
+ nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+ nodesize <<= 1) {
+ pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
+ sectorsize, nodesize);
+ ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_buffer_operations(sectorsize,
+ nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_io(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_inodes(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_qgroups(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ }
+ }
out:
btrfs_destroy_test_fs();
return ret;
if (IS_ERR(test_mnt)) {
printk(KERN_ERR "btrfs: cannot mount test file system\n");
unregister_filesystem(&test_type);
- return ret;
+ return PTR_ERR(test_mnt);
}
return 0;
}
}
struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length)
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
{
struct btrfs_block_group_cache *cache;
cache->key.objectid = 0;
cache->key.offset = length;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- cache->sectorsize = 4096;
- cache->full_stripe_len = 4096;
+ cache->sectorsize = sectorsize;
+ cache->full_stripe_len = sectorsize;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
struct btrfs_root;
struct btrfs_trans_handle;
-int btrfs_test_free_space_cache(void);
-int btrfs_test_extent_buffer_operations(void);
-int btrfs_test_extent_io(void);
-int btrfs_test_inodes(void);
-int btrfs_test_qgroups(void);
-int btrfs_test_free_space_tree(void);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
int btrfs_init_test_fs(void);
void btrfs_destroy_test_fs(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
void btrfs_free_dummy_root(struct btrfs_root *root);
struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length);
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
#else
-static inline int btrfs_test_free_space_cache(void)
+static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_extent_buffer_operations(void)
+static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
+ u32 nodesize)
{
return 0;
}
static inline void btrfs_destroy_test_fs(void)
{
}
-static inline int btrfs_test_extent_io(void)
+static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_inodes(void)
+static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_qgroups(void)
+static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
{
return 0;
}
-static inline int btrfs_test_free_space_tree(void)
+static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
{
return 0;
}
#include "../extent_io.h"
#include "../disk-io.h"
-static int test_btrfs_split_item(void)
+static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
{
struct btrfs_path *path;
struct btrfs_root *root;
test_msg("Running btrfs_split_item tests\n");
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
return PTR_ERR(root);
return -ENOMEM;
}
- path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
+ path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
+ nodesize);
if (!eb) {
test_msg("Could not allocate dummy buffer\n");
ret = -ENOMEM;
return ret;
}
-int btrfs_test_extent_buffer_operations(void)
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
{
- test_msg("Running extent buffer operation tests");
- return test_btrfs_split_item();
+ test_msg("Running extent buffer operation tests\n");
+ return test_btrfs_split_item(sectorsize, nodesize);
}
#include <linux/slab.h>
#include <linux/sizes.h>
#include "btrfs-tests.h"
+#include "../ctree.h"
#include "../extent_io.h"
#define PROCESS_UNLOCK (1 << 0)
return count;
}
-static int test_find_delalloc(void)
+static int test_find_delalloc(u32 sectorsize)
{
struct inode *inode;
struct extent_io_tree tmp;
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_delalloc(&tmp, 0, 4095, NULL);
+ set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
start = 0;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
test_msg("Should have found at least one delalloc\n");
goto out_bits;
}
- if (start != 0 || end != 4095) {
- test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
- start, end);
+ if (start != 0 || end != (sectorsize - 1)) {
+ test_msg("Expected start 0 end %u, got start %llu end %llu\n",
+ sectorsize - 1, start, end);
goto out_bits;
}
unlock_extent(&tmp, start, end);
test_msg("Couldn't find the locked page\n");
goto out_bits;
}
- set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
+ set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
* |--- delalloc ---|
* |--- search ---|
*/
- test_start = max_bytes + 4096;
+ test_start = max_bytes + sectorsize;
locked_page = find_lock_page(inode->i_mapping, test_start >>
PAGE_SHIFT);
if (!locked_page) {
return ret;
}
+/**
+ * test_bit_in_byte - Determine whether a bit is set in a byte
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit_in_byte(int nr, const u8 *addr)
+{
+ return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+}
+
static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
unsigned long len)
{
return -EINVAL;
}
- bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
- sizeof(long) * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
- test_msg("Setting straddling pages failed\n");
- return -EINVAL;
- }
+ /* Straddling pages test */
+ if (len > PAGE_SIZE) {
+ bitmap_set(bitmap,
+ (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+ sizeof(long) * BITS_PER_BYTE);
+ extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ test_msg("Setting straddling pages failed\n");
+ return -EINVAL;
+ }
- bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
- bitmap_clear(bitmap,
- (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
- sizeof(long) * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
- test_msg("Clearing straddling pages failed\n");
- return -EINVAL;
+ bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
+ bitmap_clear(bitmap,
+ (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+ sizeof(long) * BITS_PER_BYTE);
+ extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
+ extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ test_msg("Clearing straddling pages failed\n");
+ return -EINVAL;
+ }
}
/*
for (i = 0; i < len * BITS_PER_BYTE; i++) {
int bit, bit1;
- bit = !!test_bit(i, bitmap);
+ bit = !!test_bit_in_byte(i, (u8 *)bitmap);
bit1 = !!extent_buffer_test_bit(eb, 0, i);
if (bit1 != bit) {
test_msg("Testing bit pattern failed\n");
return 0;
}
-static int test_eb_bitmaps(void)
+static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
- unsigned long len = PAGE_SIZE * 4;
+ unsigned long len;
unsigned long *bitmap;
struct extent_buffer *eb;
int ret;
test_msg("Running extent buffer bitmap tests\n");
+ /*
+ * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
+ * BTRFS_MAX_METADATA_BLOCKSIZE.
+ */
+ len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
+ ? sectorsize * 4 : sectorsize;
+
bitmap = kmalloc(len, GFP_KERNEL);
if (!bitmap) {
test_msg("Couldn't allocate test bitmap\n");
/* Do it over again with an extent buffer which isn't page-aligned. */
free_extent_buffer(eb);
- eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
+ eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
if (!eb) {
test_msg("Couldn't allocate test extent buffer\n");
kfree(bitmap);
return ret;
}
-int btrfs_test_extent_io(void)
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
{
int ret;
test_msg("Running extent I/O tests\n");
- ret = test_find_delalloc();
+ ret = test_find_delalloc(sectorsize);
if (ret)
goto out;
- ret = test_eb_bitmaps();
+ ret = test_eb_bitmaps(sectorsize, nodesize);
out:
test_msg("Extent I/O tests finished\n");
return ret;
#include "../disk-io.h"
#include "../free-space-cache.h"
-#define BITS_PER_BITMAP (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
/*
* This test just does basic sanity checking, making sure we can add an extent
return 0;
}
-static int test_bitmaps(struct btrfs_block_group_cache *cache)
+static int test_bitmaps(struct btrfs_block_group_cache *cache,
+ u32 sectorsize)
{
u64 next_bitmap_offset;
int ret;
* The first bitmap we have starts at offset 0 so the next one is just
* at the end of the first bitmap.
*/
- next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+ next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
/* Test a bit straddling two bitmaps */
ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
}
/* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+ u32 sectorsize)
{
- u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+ u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
int ret;
test_msg("Running bitmap and extent tests\n");
* requests.
*/
static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+ u32 sectorsize)
{
int ret;
u64 offset;
* The goal is to test that the bitmap entry space stealing doesn't
* steal this space region.
*/
- ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096);
+ ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
return -ENOENT;
}
- if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) {
- test_msg("Cache free space is not 1Mb + 4Kb\n");
+ if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
+ test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
return -EINVAL;
}
return -EINVAL;
}
- /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
+ /*
+ * All that remains is a sectorsize free space region in a bitmap.
+ * Confirm.
+ */
ret = check_num_extents_and_bitmaps(cache, 1, 1);
if (ret)
return ret;
- if (cache->free_space_ctl->free_space != 4096) {
- test_msg("Cache free space is not 4Kb\n");
+ if (cache->free_space_ctl->free_space != sectorsize) {
+ test_msg("Cache free space is not %u\n", sectorsize);
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
- 0, 4096, 0,
+ 0, sectorsize, 0,
&max_extent_size);
if (offset != (SZ_128M + SZ_16M)) {
- test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
- offset);
+ test_msg("Failed to allocate %u, returned offset : %llu\n",
+ sectorsize, offset);
return -EINVAL;
}
* The goal is to test that the bitmap entry space stealing doesn't
* steal this space region.
*/
- ret = btrfs_add_free_space(cache, SZ_32M, 8192);
+ ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
/*
* Confirm that our extent entry didn't stole all free space from the
- * bitmap, because of the small 8Kb free space region.
+ * bitmap, because of the small 2 * sectorsize free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return -ENOENT;
}
- if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) {
- test_msg("Cache free space is not 1Mb + 8Kb\n");
+ if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
+ test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
return -EINVAL;
}
return -EINVAL;
}
- /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
+ /*
+ * All that remains is 2 * sectorsize free space region
+ * in a bitmap. Confirm.
+ */
ret = check_num_extents_and_bitmaps(cache, 1, 1);
if (ret)
return ret;
- if (cache->free_space_ctl->free_space != 8192) {
- test_msg("Cache free space is not 8Kb\n");
+ if (cache->free_space_ctl->free_space != 2 * sectorsize) {
+ test_msg("Cache free space is not %u\n", 2 * sectorsize);
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
- 0, 8192, 0,
+ 0, 2 * sectorsize, 0,
&max_extent_size);
if (offset != SZ_32M) {
- test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
+ test_msg("Failed to allocate %u, offset: %llu\n",
+ 2 * sectorsize,
offset);
return -EINVAL;
}
return 0;
}
-int btrfs_test_free_space_cache(void)
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
struct btrfs_block_group_cache *cache;
struct btrfs_root *root = NULL;
test_msg("Running btrfs free space cache tests\n");
- cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024);
+ /*
+ * For ppc64 (with 64k page size), bytes per bitmap might be
+ * larger than 1G. To make bitmap test available in ppc64,
+ * alloc dummy block group whose size cross bitmaps.
+ */
+ cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
+ + PAGE_SIZE, sectorsize);
if (!cache) {
test_msg("Couldn't run the tests\n");
return 0;
}
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out;
ret = test_extents(cache);
if (ret)
goto out;
- ret = test_bitmaps(cache);
+ ret = test_bitmaps(cache, sectorsize);
if (ret)
goto out;
- ret = test_bitmaps_and_extents(cache);
+ ret = test_bitmaps_and_extents(cache, sectorsize);
if (ret)
goto out;
- ret = test_steal_space_from_bitmap_to_extent(cache);
+ ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
out:
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
* Boston, MA 021110-1307, USA.
*/
+#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../disk-io.h"
* The test cases align their operations to this in order to hit some of the
* edge cases in the bitmap code.
*/
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
+#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *,
struct btrfs_path *);
-static int run_test(test_func_t test_func, int bitmaps)
+static int run_test(test_func_t test_func, int bitmaps,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root = NULL;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_path *path = NULL;
int ret;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate dummy root\n");
ret = PTR_ERR(root);
root->fs_info->free_space_root = root;
root->fs_info->tree_root = root;
- root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+ root->node = alloc_test_extent_buffer(root->fs_info,
+ nodesize, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
}
btrfs_set_header_level(root->node, 0);
btrfs_set_header_nritems(root->node, 0);
- root->alloc_bytenr += 8192;
+ root->alloc_bytenr += 2 * nodesize;
- cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE);
+ cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
if (!cache) {
test_msg("Couldn't allocate dummy block group cache\n");
ret = -ENOMEM;
return ret;
}
-static int run_test_both_formats(test_func_t test_func)
+static int run_test_both_formats(test_func_t test_func,
+ u32 sectorsize, u32 nodesize)
{
int ret;
- ret = run_test(test_func, 0);
+ ret = run_test(test_func, 0, sectorsize, nodesize);
if (ret)
return ret;
- return run_test(test_func, 1);
+ return run_test(test_func, 1, sectorsize, nodesize);
}
-int btrfs_test_free_space_tree(void)
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
{
test_func_t tests[] = {
test_empty_block_group,
test_msg("Running free space tree tests\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- int ret = run_test_both_formats(tests[i]);
+ int ret = run_test_both_formats(tests[i], sectorsize,
+ nodesize);
if (ret) {
- test_msg("%pf failed\n", tests[i]);
+ test_msg("%pf : sectorsize %u failed\n",
+ tests[i], sectorsize);
return ret;
}
}
* Boston, MA 021110-1307, USA.
*/
+#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../btrfs_inode.h"
* diagram of how the extents will look though this may not be possible we still
* want to make sure everything acts normally (the last number is not inclusive)
*
- * [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288]
- * [hole ][inline][ hole ][ regular ][regular1 split][ hole ]
+ * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split]
*
- * [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056]
- * [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ]
+ * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
+ * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written]
*
- * [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ]
- * [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent]
+ * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
+ * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1]
*
- * [81920-86016]
- * [ regular ]
+ * [69635-73731][ 73731 - 86019 ][86019-90115]
+ * [ regular ][ hole but no extent][ regular ]
*/
-static void setup_file_extents(struct btrfs_root *root)
+static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
{
int slot = 0;
u64 disk_bytenr = SZ_1M;
insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
slot);
slot++;
- offset = 4096;
+ offset = sectorsize;
/* Now another hole */
insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
offset += 4;
/* Now for a regular extent */
- insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
+ disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- disk_bytenr += 4096;
- offset += 4095;
+ disk_bytenr += sectorsize;
+ offset += sectorsize - 1;
/*
* Now for 3 extents that were split from a hole punch so we test
* offsets properly.
*/
- insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
- 0, slot);
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
+ BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, 4 * sectorsize,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 8192;
- disk_bytenr += 16384;
+ offset += 2 * sectorsize;
+ disk_bytenr += 4 * sectorsize;
/* Now for a unwritten prealloc extent */
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
- offset += 4096;
+ offset += sectorsize;
/*
* We want to jack up disk_bytenr a little more so the em stuff doesn't
* merge our records.
*/
- disk_bytenr += 8192;
+ disk_bytenr += 2 * sectorsize;
/*
* Now for a partially written prealloc extent, basically the same as
* the hole punch example above. Ram_bytes never changes when you mark
* extents written btw.
*/
- insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
- BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
+ disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
+ slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, 4 * sectorsize,
BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
- offset += 8192;
- disk_bytenr += 16384;
+ offset += 2 * sectorsize;
+ disk_bytenr += 4 * sectorsize;
/* Now a normal compressed extent */
- insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+ insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
+ disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
+ BTRFS_COMPRESS_ZLIB, slot);
slot++;
- offset += 8192;
+ offset += 2 * sectorsize;
/* No merges */
- disk_bytenr += 8192;
+ disk_bytenr += 2 * sectorsize;
/* Now a split compressed extent */
- insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG,
+ BTRFS_COMPRESS_ZLIB, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
+ offset += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0,
+ disk_bytenr + sectorsize, sectorsize,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 4096;
- insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
+ offset += sectorsize;
+ insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+ 2 * sectorsize, disk_bytenr, sectorsize,
BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
slot++;
- offset += 8192;
- disk_bytenr += 8192;
+ offset += 2 * sectorsize;
+ disk_bytenr += 2 * sectorsize;
/* Now extents that have a hole but no hole extent */
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
- offset += 16384;
- disk_bytenr += 4096;
- insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
- BTRFS_FILE_EXTENT_REG, 0, slot);
+ offset += 4 * sectorsize;
+ disk_bytenr += sectorsize;
+ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
}
static unsigned long prealloc_only = 0;
static unsigned long compressed_only = 0;
static unsigned long vacancy_only = 0;
-static noinline int test_btrfs_get_extent(void)
+static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
goto out;
}
- root->node = alloc_dummy_extent_buffer(NULL, 4096);
+ root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
/* First with no extents */
BTRFS_I(inode)->root = root;
- em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
if (IS_ERR(em)) {
em = NULL;
test_msg("Got an error when we shouldn't have\n");
* setup_file_extents, so if you change anything there you need to
* update the comment and update the expected values below.
*/
- setup_file_extents(root);
+ setup_file_extents(root, sectorsize);
em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
if (IS_ERR(em)) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected an inline, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4091) {
+
+ if (em->start != offset || em->len != (sectorsize - 5)) {
test_msg("Unexpected extent wanted start %llu len 1, got start "
"%llu len %llu\n", offset, em->start, em->len);
goto out;
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
free_extent_map(em);
/* Regular extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4095) {
+ if (em->start != offset || em->len != sectorsize - 1) {
test_msg("Unexpected extent wanted start %llu len 4095, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
free_extent_map(em);
/* The next 3 are split extents */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
free_extent_map(em);
/* Prealloc extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
free_extent_map(em);
/* The next 3 are a half written prealloc extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
free_extent_map(em);
/* Now for the compressed extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u,"
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
free_extent_map(em);
/* Split compressed extent */
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u,"
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
disk_bytenr, em->block_start);
goto out;
}
- if (em->start != offset || em->len != 8192) {
- test_msg("Unexpected extent wanted start %llu len 8192, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 2 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 2 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
free_extent_map(em);
/* A hole between regular extents but no hole extent */
- em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
* length of the actual hole, if this changes we'll have to change this
* test.
*/
- if (em->start != offset || em->len != 12288) {
- test_msg("Unexpected extent wanted start %llu len 12288, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != 3 * sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u, "
+ "got start %llu len %llu\n",
+ offset, 3 * sectorsize, em->start, em->len);
goto out;
}
if (em->flags != vacancy_only) {
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+ em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != offset || em->len != 4096) {
- test_msg("Unexpected extent wanted start %llu len 4096, got "
- "start %llu len %llu\n", offset, em->start, em->len);
+ if (em->start != offset || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %llu len %u,"
+ "got start %llu len %llu\n",
+ offset, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
return ret;
}
-static int test_hole_first(void)
+static int test_hole_first(u32 sectorsize, u32 nodesize)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
goto out;
}
- root->node = alloc_dummy_extent_buffer(NULL, 4096);
+ root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
* btrfs_get_extent.
*/
insert_inode_item_key(root);
- insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
- BTRFS_FILE_EXTENT_REG, 0, 1);
- em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
+ insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
+ sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
+ em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
- if (em->start != 0 || em->len != 4096) {
- test_msg("Unexpected extent wanted start 0 len 4096, got start "
- "%llu len %llu\n", em->start, em->len);
+ if (em->start != 0 || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start 0 len %u, "
+ "got start %llu len %llu\n",
+ sectorsize, em->start, em->len);
goto out;
}
if (em->flags != vacancy_only) {
}
free_extent_map(em);
- em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
+ em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
- if (em->block_start != 4096) {
+ if (em->block_start != sectorsize) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
- if (em->start != 4096 || em->len != 4096) {
- test_msg("Unexpected extent wanted start 4096 len 4096, got "
- "start %llu len %llu\n", em->start, em->len);
+ if (em->start != sectorsize || em->len != sectorsize) {
+ test_msg("Unexpected extent wanted start %u len %u, "
+ "got start %llu len %llu\n",
+ sectorsize, sectorsize, em->start, em->len);
goto out;
}
if (em->flags != 0) {
return ret;
}
-static int test_extent_accounting(void)
+static int test_extent_accounting(u32 sectorsize, u32 nodesize)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
return ret;
}
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE][4k] */
+ /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
- BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
+ BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
+ NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
+ /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
BTRFS_MAX_EXTENT_SIZE >> 1,
- (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+ (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
EXTENT_DELALLOC | EXTENT_DIRTY |
EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
NULL, GFP_KERNEL);
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE][4K] */
+ /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
- (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+ (BTRFS_MAX_EXTENT_SIZE >> 1)
+ + sectorsize - 1,
NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
}
/*
- * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
+ * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
*
* I'm artificially adding 2 to outstanding_extents because in the
* buffered IO case we'd add things up as we go, but I don't feel like
* doing that here, this isn't the interesting case we want to test.
*/
BTRFS_I(inode)->outstanding_extents += 2;
- ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
- (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
- NULL);
+ ret = btrfs_set_extent_delalloc(inode,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
+ (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
+ NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
goto out;
}
- /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
+ /*
+ * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+ */
BTRFS_I(inode)->outstanding_extents++;
- ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
- BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+ ret = btrfs_set_extent_delalloc(inode,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
/* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
- BTRFS_MAX_EXTENT_SIZE+4096,
- BTRFS_MAX_EXTENT_SIZE+8191,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
NULL, GFP_KERNEL);
* might fail and I'd rather satisfy my paranoia at this point.
*/
BTRFS_I(inode)->outstanding_extents++;
- ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
- BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+ ret = btrfs_set_extent_delalloc(inode,
+ BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
return ret;
}
-int btrfs_test_inodes(void)
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
{
int ret;
set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
test_msg("Running btrfs_get_extent tests\n");
- ret = test_btrfs_get_extent();
+ ret = test_btrfs_get_extent(sectorsize, nodesize);
if (ret)
return ret;
test_msg("Running hole first btrfs_get_extent test\n");
- ret = test_hole_first();
+ ret = test_hole_first(sectorsize, nodesize);
if (ret)
return ret;
test_msg("Running outstanding_extents tests\n");
- return test_extent_accounting();
+ return test_extent_accounting(sectorsize, nodesize);
}
* Boston, MA 021110-1307, USA.
*/
+#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../transaction.h"
return ret;
}
-static int test_no_shared_qgroup(struct btrfs_root *root)
+static int test_no_shared_qgroup(struct btrfs_root *root,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
btrfs_init_dummy_trans(&trans);
test_msg("Qgroup basic add\n");
- ret = btrfs_create_qgroup(NULL, fs_info, 5);
+ ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
* we can only call btrfs_qgroup_account_extent() directly to test
* quota.
*/
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
old_roots = NULL;
new_roots = NULL;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = remove_extent_item(root, 4096, 4096);
+ ret = remove_extent_item(root, nodesize, nodesize);
if (ret)
return -EINVAL;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return -EINVAL;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
* right, also remove one of the roots and make sure the exclusive count is
* adjusted properly.
*/
-static int test_multiple_refs(struct btrfs_root *root)
+static int test_multiple_refs(struct btrfs_root *root,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
test_msg("Qgroup multiple refs test\n");
- /* We have 5 created already from the previous test */
- ret = btrfs_create_qgroup(NULL, fs_info, 256);
+ /*
+ * We have BTRFS_FS_TREE_OBJECTID created already from the
+ * previous test.
+ */
+ ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
}
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+ ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FS_TREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = add_tree_ref(root, 4096, 4096, 0, 256);
+ ret = add_tree_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+ nodesize, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
return ret;
}
- ret = remove_extent_ref(root, 4096, 4096, 0, 256);
+ ret = remove_extent_ref(root, nodesize, nodesize, 0,
+ BTRFS_FIRST_FREE_OBJECTID);
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
- old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+ nodesize, old_roots, new_roots);
if (ret) {
test_msg("Couldn't account space for a qgroup %d\n", ret);
return ret;
}
- if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+ 0, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
- if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+ if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+ nodesize, nodesize)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
return 0;
}
-int btrfs_test_qgroups(void)
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root;
struct btrfs_root *tmp_root;
int ret = 0;
- root = btrfs_alloc_dummy_root();
+ root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
return PTR_ERR(root);
* Can't use bytenr 0, some things freak out
* *cough*backref walking code*cough*
*/
- root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
+ nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
}
btrfs_set_header_level(root->node, 0);
btrfs_set_header_nritems(root->node, 0);
- root->alloc_bytenr += 8192;
+ root->alloc_bytenr += 2 * nodesize;
- tmp_root = btrfs_alloc_dummy_root();
+ tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
goto out;
}
- tmp_root->root_key.objectid = 5;
+ tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
root->fs_info->fs_root = tmp_root;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
if (ret) {
goto out;
}
- tmp_root = btrfs_alloc_dummy_root();
+ tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
goto out;
}
- tmp_root->root_key.objectid = 256;
+ tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
if (ret) {
test_msg("Couldn't insert fs root %d\n", ret);
}
test_msg("Running qgroup tests\n");
- ret = test_no_shared_qgroup(root);
+ ret = test_no_shared_qgroup(root, sectorsize, nodesize);
if (ret)
goto out;
- ret = test_multiple_refs(root);
+ ret = test_multiple_refs(root, sectorsize, nodesize);
out:
btrfs_free_dummy_root(root);
return ret;
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
+ u64 transid = trans->transid;
unsigned long cur = trans->delayed_ref_updates;
int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
- btrfs_async_run_delayed_refs(root, cur,
+ btrfs_async_run_delayed_refs(root, cur, transid,
must_run_delayed_refs == 1);
}
return err;
return ret;
}
-/* Bisesctability fixup, remove in 4.8 */
-#ifndef btrfs_std_error
-#define btrfs_std_error btrfs_handle_fs_error
-#endif
-
/*
* Do all special snapshot related qgroup dirty hack.
*
switch_commit_roots(trans->transaction, fs_info);
ret = btrfs_write_and_wait_transaction(trans, src);
if (ret)
- btrfs_std_error(fs_info, ret,
+ btrfs_handle_fs_error(fs_info, ret,
"Error while writing out transaction for qgroup");
out:
u64 chunk_bytes_reserved;
unsigned long use_count;
unsigned long blocks_reserved;
- unsigned long blocks_used;
unsigned long delayed_ref_updates;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
bool can_flush_pending_bgs;
bool reloc_reserved;
bool sync;
+ bool dirty;
unsigned int type;
/*
* this root is only needed to validate that the root passed to
root_owner = btrfs_header_owner(parent);
next = btrfs_find_create_tree_block(root, bytenr);
- if (!next)
- return -ENOMEM;
+ if (IS_ERR(next))
+ return PTR_ERR(next);
if (*level == 1) {
ret = wc->process_func(root, next, wc, ptr_gen);
if (IS_ERR(uuid_root)) {
ret = PTR_ERR(uuid_root);
btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_end_transaction(trans, tree_root);
return ret;
}
if (type & BTRFS_BLOCK_GROUP_RAID5) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
- btrfs_super_stripesize(info->super_copy));
+ extent_root->stripesize);
data_stripes = num_stripes - 1;
}
if (type & BTRFS_BLOCK_GROUP_RAID6) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
- btrfs_super_stripesize(info->super_copy));
+ extent_root->stripesize);
data_stripes = num_stripes - 2;
}
return dev;
}
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
- struct extent_buffer *leaf,
- struct btrfs_chunk *chunk)
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk, u64 logical)
{
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
- struct map_lookup *map;
- struct extent_map *em;
- u64 logical;
u64 length;
u64 stripe_len;
- u64 devid;
- u8 uuid[BTRFS_UUID_SIZE];
- int num_stripes;
- int ret;
- int i;
+ u16 num_stripes;
+ u16 sub_stripes;
+ u64 type;
- logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
- /* Validation check */
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+
if (!num_stripes) {
btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
num_stripes);
"invalid chunk logical %llu", logical);
return -EIO;
}
+ if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+ btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+ btrfs_chunk_sector_size(leaf, chunk));
+ return -EIO;
+ }
if (!length || !IS_ALIGNED(length, root->sectorsize)) {
btrfs_err(root->fs_info,
"invalid chunk length %llu", length);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- btrfs_chunk_type(leaf, chunk)) {
+ type) {
btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
+ if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+ num_stripes != 1)) {
+ btrfs_err(root->fs_info,
+ "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+ num_stripes, sub_stripes,
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk)
+{
+ struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+ struct map_lookup *map;
+ struct extent_map *em;
+ u64 logical;
+ u64 length;
+ u64 stripe_len;
+ u64 devid;
+ u8 uuid[BTRFS_UUID_SIZE];
+ int num_stripes;
+ int ret;
+ int i;
+
+ logical = key->offset;
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+ ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+ if (ret)
+ return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
u32 array_size;
u32 len = 0;
u32 cur_offset;
+ u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
* overallocate but we can keep it as-is, only the first page is used.
*/
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
- if (!sb)
- return -ENOMEM;
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
/*
break;
}
+ type = btrfs_chunk_type(sb, chunk);
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+ btrfs_err(root->fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur_offset);
+ ret = -EIO;
+ break;
+ }
+
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
sb_array_offset += len;
cur_offset += len;
}
+ clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return ret;
out_short_read:
printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
len, cur_offset);
+ clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return -EIO;
}
struct btrfs_key found_key;
int ret;
int slot;
+ u64 total_dev = 0;
root = root->fs_info->chunk_root;
ret = read_one_dev(root, leaf, dev_item);
if (ret)
goto error;
+ total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
}
path->slots[0]++;
}
+
+ /*
+ * After loading chunk tree, we've got all device information,
+ * do another round of validation checks.
+ */
+ if (total_dev != root->fs_info->fs_devices->total_devices) {
+ btrfs_err(root->fs_info,
+ "super_num_devices %llu mismatch with num_devices %llu found here",
+ btrfs_super_num_devices(root->fs_info->super_copy),
+ total_dev);
+ ret = -EINVAL;
+ goto error;
+ }
+ if (btrfs_super_total_bytes(root->fs_info->super_copy) <
+ root->fs_info->fs_devices->total_rw_bytes) {
+ btrfs_err(root->fs_info,
+ "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
+ btrfs_super_total_bytes(root->fs_info->super_copy),
+ root->fs_info->fs_devices->total_rw_bytes);
+ ret = -EINVAL;
+ goto error;
+ }
ret = 0;
error:
unlock_chunks(root);
}
dentry = d_obtain_alias(inode);
- if (IS_ERR(dentry)) {
- iput(inode);
+ if (IS_ERR(dentry))
return dentry;
- }
err = ceph_init_dentry(dentry);
if (err < 0) {
dput(dentry);
return ERR_PTR(-ENOENT);
dentry = d_obtain_alias(inode);
- if (IS_ERR(dentry)) {
- iput(inode);
+ if (IS_ERR(dentry))
return dentry;
- }
err = ceph_init_dentry(dentry);
if (err < 0) {
dput(dentry);
dout("fh_to_parent %llx\n", cfh->parent_ino);
dentry = __get_parent(sb, NULL, cfh->ino);
- if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT)
+ if (unlikely(dentry == ERR_PTR(-ENOENT)))
dentry = __fh_to_dentry(sb, cfh->parent_ino);
return dentry;
}
if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
dn = ceph_finish_lookup(req, dentry, err);
if (IS_ERR(dn))
err = PTR_ERR(dn);
case SFM_SLASH:
*target = '\\';
break;
+ case SFM_SPACE:
+ *target = ' ';
+ break;
+ case SFM_PERIOD:
+ *target = '.';
+ break;
default:
return false;
}
return dest_char;
}
-static __le16 convert_to_sfm_char(char src_char)
+static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
{
__le16 dest_char;
case '|':
dest_char = cpu_to_le16(SFM_PIPE);
break;
+ case '.':
+ if (end_of_string)
+ dest_char = cpu_to_le16(SFM_PERIOD);
+ else
+ dest_char = 0;
+ break;
+ case ' ':
+ if (end_of_string)
+ dest_char = cpu_to_le16(SFM_SPACE);
+ else
+ dest_char = 0;
+ break;
default:
dest_char = 0;
}
/* see if we must remap this char */
if (map_chars == SFU_MAP_UNI_RSVD)
dst_char = convert_to_sfu_char(src_char);
- else if (map_chars == SFM_MAP_UNI_RSVD)
- dst_char = convert_to_sfm_char(src_char);
- else
+ else if (map_chars == SFM_MAP_UNI_RSVD) {
+ bool end_of_string;
+
+ if (i == srclen - 1)
+ end_of_string = true;
+ else
+ end_of_string = false;
+
+ dst_char = convert_to_sfm_char(src_char, end_of_string);
+ } else
dst_char = 0;
/*
* FIXME: We can not handle remapping backslash (UNI_SLASH)
#define SFM_LESSTHAN ((__u16) 0xF023)
#define SFM_PIPE ((__u16) 0xF027)
#define SFM_SLASH ((__u16) 0xF026)
+#define SFM_PERIOD ((__u16) 0xF028)
+#define SFM_SPACE ((__u16) 0xF029)
/*
* Mapping mechanism to use when one of the seven reserved characters is
extern mempool_t *cifs_mid_poolp;
struct workqueue_struct *cifsiod_wq;
+__u32 cifs_lock_secret;
/*
* Bumps refcount for cifs super block.
spin_lock_init(&cifs_file_list_lock);
spin_lock_init(&GlobalMid_Lock);
+ get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
+
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+extern __u32 cifs_lock_secret;
extern mempool_t *cifs_mid_poolp;
* server->ops->need_neg() == true. Also, no need to ping if
* we got a response recently.
*/
- if (!server->ops->need_neg || server->ops->need_neg(server) ||
+
+ if (server->tcpStatus == CifsNeedReconnect ||
+ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + echo_interval - HZ))
goto requeue_echo;
* Check for hashed negative dentry. We have already revalidated
* the dentry and it is fine. No need to perform another lookup.
*/
- if (!d_unhashed(direntry))
+ if (!d_in_lookup(direntry))
return -ENOENT;
res = cifs_lookup(inode, direntry, 0);
return rc;
}
+static __u32
+hash_lockowner(fl_owner_t owner)
+{
+ return cifs_lock_secret ^ hash32_ptr((const void *)owner);
+}
+
struct lock_to_push {
struct list_head llist;
__u64 offset;
else
type = CIFS_WRLCK;
lck = list_entry(el, struct lock_to_push, llist);
- lck->pid = flock->fl_pid;
+ lck->pid = hash_lockowner(flock->fl_owner);
lck->netfid = cfile->fid.netfid;
lck->length = length;
lck->type = type;
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
- rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
+ rc = CIFSSMBPosixLock(xid, tcon, netfid,
+ hash_lockowner(flock->fl_owner),
flock->fl_start, length, flock,
posix_lock_type, wait_flag);
return rc;
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
- current->tgid, flock->fl_start, length,
+ hash_lockowner(flock->fl_owner),
+ flock->fl_start, length,
NULL, posix_lock_type, wait_flag);
goto out;
}
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
+int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp);
sec_blob->DomainName.MaximumLength = 0;
}
-/* We do not malloc the blob, it is passed in pbuffer, because its
- maximum possible size is fixed and small, making this approach cleaner.
- This function returns the length of the data in the blob */
-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+static int size_of_ntlmssp_blob(struct cifs_ses *ses)
+{
+ int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
+ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+
+ if (ses->domainName)
+ sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ else
+ sz += 2;
+
+ if (ses->user_name)
+ sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ else
+ sz += 2;
+
+ return sz;
+}
+
+int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc;
- AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
+ AUTHENTICATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
+ rc = setup_ntlmv2_rsp(ses, nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+ *buflen = 0;
+ goto setup_ntlmv2_ret;
+ }
+ *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+ sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
+
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
}
- tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
sec_blob->LmChallengeResponse.BufferOffset =
sec_blob->LmChallengeResponse.Length = 0;
sec_blob->LmChallengeResponse.MaximumLength = 0;
- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->NtChallengeResponse.BufferOffset =
+ cpu_to_le32(tmp - *pbuffer);
if (ses->user_name != NULL) {
- rc = setup_ntlmv2_rsp(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
- goto setup_ntlmv2_ret;
- }
memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
ses->auth_key.len - CIFS_SESS_KEY_SIZE);
tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
}
if (ses->domainName == NULL) {
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = 0;
sec_blob->DomainName.MaximumLength = 0;
tmp += 2;
} else {
int len;
len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
- CIFS_MAX_USERNAME_LEN, nls_cp);
+ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = cpu_to_le16(len);
sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
if (ses->user_name == NULL) {
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
CIFS_MAX_USERNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = cpu_to_le16(len);
sec_blob->UserName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->WorkstationName.Length = 0;
sec_blob->WorkstationName.MaximumLength = 0;
tmp += 2;
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
&& !calc_seckey(ses)) {
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.MaximumLength =
cpu_to_le16(CIFS_CPHTXT_SIZE);
tmp += CIFS_CPHTXT_SIZE;
} else {
- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = 0;
sec_blob->SessionKey.MaximumLength = 0;
}
+ *buflen = tmp - *pbuffer;
setup_ntlmv2_ret:
- *buflen = tmp - pbuffer;
return rc;
}
rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
true : false, lnm_session_key);
+ if (rc)
+ goto out;
memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
bcc_ptr += CIFS_AUTH_RESP_SIZE;
struct cifs_ses *ses = sess_data->ses;
__u16 bytes_remaining;
char *bcc_ptr;
- char *ntlmsspblob = NULL;
+ unsigned char *ntlmsspblob = NULL;
u16 blob_len;
cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
/* Build security blob before we assemble the request */
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
smb_buf = (struct smb_hdr *)pSMB;
- /*
- * 5 is an empirical value, large enough to hold
- * authenticate message plus max 10 of av paris,
- * domain, user, workstation names, flags, etc.
- */
- ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
- GFP_KERNEL);
- if (!ntlmsspblob) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = build_ntlmssp_auth_blob(ntlmsspblob,
+ rc = build_ntlmssp_auth_blob(&ntlmsspblob,
&blob_len, ses, sess_data->nls_cp);
if (rc)
goto out_free_ntlmsspblob;
u16 blob_length = 0;
struct key *spnego_key = NULL;
char *security_blob = NULL;
- char *ntlmssp_blob = NULL;
+ unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
cifs_dbg(FYI, "Session Setup\n");
iov[1].iov_len = blob_length;
} else if (phase == NtLmAuthenticate) {
req->hdr.SessionId = ses->Suid;
- ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
- GFP_KERNEL);
- if (ntlmssp_blob == NULL) {
- rc = -ENOMEM;
- goto ssetup_exit;
- }
- rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
+ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
nls_cp);
if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
cifs_dbg(FYI, "In echo request\n");
+ if (server->tcpStatus == CifsNeedNegotiate) {
+ struct list_head *tmp, *tmp2;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(tmp, &server->smb_ses_list) {
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each(tmp2, &ses->tcon_list) {
+ tcon = list_entry(tmp2, struct cifs_tcon,
+ tcon_list);
+ /* add check for persistent handle reconnect */
+ if (tcon && tcon->need_reconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ rc = smb2_reconnect(SMB2_ECHO, tcon);
+ spin_lock(&cifs_tcp_ses_lock);
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
+ /* if no session, renegotiate failed above */
+ if (server->tcpStatus == CifsNeedNegotiate)
+ return -EIO;
+
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
if (rc)
return rc;
len = simple_write_to_buffer(buffer->bin_buffer,
buffer->bin_buffer_size, ppos, buf, count);
- if (len > 0)
- *ppos += len;
out:
mutex_unlock(&buffer->mutex);
return len;
return 0;
file->f_pos = pos;
cprm->written += n;
+ cprm->pos += n;
nr -= n;
}
return 1;
if (dump_interrupted() ||
file->f_op->llseek(file, nr, SEEK_CUR) < 0)
return 0;
+ cprm->pos += nr;
return 1;
} else {
while (nr > PAGE_SIZE) {
int dump_align(struct coredump_params *cprm, int align)
{
- unsigned mod = cprm->file->f_pos & (align - 1);
+ unsigned mod = cprm->pos & (align - 1);
if (align & (align - 1))
return 0;
return mod ? dump_skip(cprm, align - mod) : 1;
dax.addr += first;
size = map_len - first;
}
- max = min(pos + size, end);
+ /*
+ * pos + size is one past the last offset for IO,
+ * so pos + size can overflow loff_t at extreme offsets.
+ * Cast to u64 to catch this and get the true minimum.
+ */
+ max = min_t(u64, pos + size, end);
}
if (iov_iter_rw(iter) == WRITE) {
}
EXPORT_SYMBOL(d_drop);
+static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
+{
+ struct dentry *next;
+ /*
+ * Inform d_walk() and shrink_dentry_list() that we are no longer
+ * attached to the dentry tree
+ */
+ dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ if (unlikely(list_empty(&dentry->d_child)))
+ return;
+ __list_del_entry(&dentry->d_child);
+ /*
+ * Cursors can move around the list of children. While we'd been
+ * a normal list member, it didn't matter - ->d_child.next would've
+ * been updated. However, from now on it won't be and for the
+ * things like d_walk() it might end up with a nasty surprise.
+ * Normally d_walk() doesn't care about cursors moving around -
+ * ->d_lock on parent prevents that and since a cursor has no children
+ * of its own, we get through it without ever unlocking the parent.
+ * There is one exception, though - if we ascend from a child that
+ * gets killed as soon as we unlock it, the next sibling is found
+ * using the value left in its ->d_child.next. And if _that_
+ * pointed to a cursor, and cursor got moved (e.g. by lseek())
+ * before d_walk() regains parent->d_lock, we'll end up skipping
+ * everything the cursor had been moved past.
+ *
+ * Solution: make sure that the pointer left behind in ->d_child.next
+ * points to something that won't be moving around. I.e. skip the
+ * cursors.
+ */
+ while (dentry->d_child.next != &parent->d_subdirs) {
+ next = list_entry(dentry->d_child.next, struct dentry, d_child);
+ if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
+ break;
+ dentry->d_child.next = next->d_child.next;
+ }
+}
+
static void __dentry_kill(struct dentry *dentry)
{
struct dentry *parent = NULL;
}
/* if it was on the hash then remove it */
__d_drop(dentry);
- __list_del_entry(&dentry->d_child);
- /*
- * Inform d_walk() that we are no longer attached to the
- * dentry tree
- */
- dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ dentry_unlist(dentry, parent);
if (parent)
spin_unlock(&parent->d_lock);
dentry_iput(dentry);
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
next = tmp->next;
+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+ continue;
+
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
ret = enter(data, dentry);
struct dentry *dentry = __d_alloc(parent->d_sb, name);
if (!dentry)
return NULL;
-
+ dentry->d_flags |= DCACHE_RCUACCESS;
spin_lock(&parent->d_lock);
/*
* don't need child lock because it is not subject
}
EXPORT_SYMBOL(d_alloc);
+struct dentry *d_alloc_cursor(struct dentry * parent)
+{
+ struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
+ if (dentry) {
+ dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
+ dentry->d_parent = dget(parent);
+ }
+ return dentry;
+}
+
/**
* d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
* @sb: the superblock
{
BUG_ON(!d_unhashed(entry));
hlist_bl_lock(b);
- entry->d_flags |= DCACHE_RCUACCESS;
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
}
rcu_read_unlock();
goto retry;
}
- rcu_read_unlock();
/*
* No changes for the parent since the beginning of d_lookup().
* Since all removals from the chain happen with hlist_bl_lock(),
continue;
if (dentry->d_parent != parent)
continue;
- if (d_unhashed(dentry))
- continue;
if (parent->d_flags & DCACHE_OP_COMPARE) {
int tlen = dentry->d_name.len;
const char *tname = dentry->d_name.name;
if (dentry_cmp(dentry, str, len))
continue;
}
- dget(dentry);
hlist_bl_unlock(b);
- /* somebody is doing lookup for it right now; wait for it */
+ /* now we can try to grab a reference */
+ if (!lockref_get_not_dead(&dentry->d_lockref)) {
+ rcu_read_unlock();
+ goto retry;
+ }
+
+ rcu_read_unlock();
+ /*
+ * somebody is likely to be still doing lookup for it;
+ * wait for them to finish
+ */
spin_lock(&dentry->d_lock);
d_wait_lookup(dentry);
/*
dput(new);
return dentry;
}
+ rcu_read_unlock();
/* we can't take ->d_lock here; it's OK, though. */
new->d_flags |= DCACHE_PAR_LOOKUP;
new->d_wait = wq;
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
/* splicing a tree */
+ dentry->d_flags |= DCACHE_RCUACCESS;
dentry->d_parent = target->d_parent;
target->d_parent = target;
list_del_init(&target->d_child);
r = real_fops->open(inode, filp);
out:
- fops_put(real_fops);
debugfs_use_file_finish(srcu_idx);
return r;
}
if (real_fops->open) {
r = real_fops->open(inode, filp);
-
- if (filp->f_op != proxy_fops) {
+ if (r) {
+ replace_fops(filp, d_inode(dentry)->i_fop);
+ goto free_proxy;
+ } else if (filp->f_op != proxy_fops) {
/* No protection against file removal anymore. */
WARN(1, "debugfs file owner replaced proxy fops: %pd",
dentry);
* ecryptfs_to_hex
* @dst: Buffer to take hex character representation of contents of
* src; must be at least of size (src_size * 2)
- * @src: Buffer to be converted to a hex string respresentation
+ * @src: Buffer to be converted to a hex string representation
* @src_size: number of bytes to convert
*/
void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
* ecryptfs_from_hex
* @dst: Buffer to take the bytes from src hex; must be at least of
* size (src_size / 2)
- * @src: Buffer to be converted from a hex string respresentation to raw value
+ * @src: Buffer to be converted from a hex string representation to raw value
* @dst_size: size of dst buffer, or number of hex characters pairs to convert
*/
void ecryptfs_from_hex(char *dst, char *src, int dst_size)
};
/* Add support for additional ciphers by adding elements here. The
- * cipher_code is whatever OpenPGP applicatoins use to identify the
+ * cipher_code is whatever OpenPGP applications use to identify the
* ciphers. List in order of probability. */
static struct ecryptfs_cipher_code_str_map_elem
ecryptfs_cipher_code_str_map[] = {
*
* Common entry point for reading file metadata. From here, we could
* retrieve the header information from the header region of the file,
- * the xattr region of the file, or some other repostory that is
+ * the xattr region of the file, or some other repository that is
* stored separately from the file itself. The current implementation
* supports retrieving the metadata information from the file contents
* and from the xattr region.
return rc;
}
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct file *lower_file = ecryptfs_file_to_lower(file);
+ /*
+ * Don't allow mmap on top of file systems that don't support it
+ * natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
+ * allows recursive mounting, this will need to be extended.
+ */
+ if (!lower_file->f_op->mmap)
+ return -ENODEV;
+ return generic_file_mmap(file, vma);
+}
+
/**
* ecryptfs_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
/**
* ecryptfs_dir_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .mmap = generic_file_mmap,
+ .mmap = ecryptfs_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
struct ecryptfs_cache_info *info;
info = &ecryptfs_cache_infos[i];
- if (*(info->cache))
- kmem_cache_destroy(*(info->cache));
+ kmem_cache_destroy(*(info->cache));
}
}
goto out_free;
}
inode->i_state |= I_WB_SWITCH;
+ __iget(inode);
spin_unlock(&inode->i_lock);
- ihold(inode);
isw->inode = inode;
atomic_inc(&isw_nr_in_flight);
struct dentry *newent;
bool outarg_valid = true;
+ fuse_lock_inode(dir);
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
&outarg, &inode);
+ fuse_unlock_inode(dir);
if (err == -ENOENT) {
outarg_valid = false;
err = 0;
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
- if (d_unhashed(entry)) {
+ if (d_in_lookup(entry)) {
res = fuse_lookup(dir, entry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
FUSE_READDIR);
}
+ fuse_lock_inode(inode);
fuse_request_send(fc, req);
+ fuse_unlock_inode(inode);
nbytes = req->out.args[0].size;
err = req->out.h.error;
fuse_put_request(fc, req);
/** Miscellaneous bits describing inode state */
unsigned long state;
+
+ /** Lock for serializing lookup and readdir for back compatibility*/
+ struct mutex mutex;
};
/** FUSE inode state bits */
/** write-back cache policy (default is write-through) */
unsigned writeback_cache:1;
+ /** allow parallel lookups and readdir (default is serialized) */
+ unsigned parallel_dirops:1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
void fuse_set_initialized(struct fuse_conn *fc);
+void fuse_unlock_inode(struct inode *inode);
+void fuse_lock_inode(struct inode *inode);
+
#endif /* _FS_FUSE_I_H */
INIT_LIST_HEAD(&fi->queued_writes);
INIT_LIST_HEAD(&fi->writepages);
init_waitqueue_head(&fi->page_waitq);
+ mutex_init(&fi->mutex);
fi->forget = fuse_alloc_forget();
if (!fi->forget) {
kmem_cache_free(fuse_inode_cachep, inode);
struct fuse_inode *fi = get_fuse_inode(inode);
BUG_ON(!list_empty(&fi->write_files));
BUG_ON(!list_empty(&fi->queued_writes));
+ mutex_destroy(&fi->mutex);
kfree(fi->forget);
call_rcu(&inode->i_rcu, fuse_i_callback);
}
return 0;
}
+void fuse_lock_inode(struct inode *inode)
+{
+ if (!get_fuse_conn(inode)->parallel_dirops)
+ mutex_lock(&get_fuse_inode(inode)->mutex);
+}
+
+void fuse_unlock_inode(struct inode *inode)
+{
+ if (!get_fuse_conn(inode)->parallel_dirops)
+ mutex_unlock(&get_fuse_inode(inode)->mutex);
+}
+
static void fuse_umount_begin(struct super_block *sb)
{
fuse_abort_conn(get_fuse_conn_super(sb));
fc->async_dio = 1;
if (arg->flags & FUSE_WRITEBACK_CACHE)
fc->writeback_cache = 1;
+ if (arg->flags & FUSE_PARALLEL_DIROPS)
+ fc->parallel_dirops = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
} else {
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
- FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
+ FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
+ FUSE_PARALLEL_DIROPS;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
struct dentry *d;
bool excl = !!(flags & O_EXCL);
- if (!d_unhashed(dentry))
+ if (!d_in_lookup(dentry))
goto skip_lookup;
d = __gfs2_lookup(dir, dentry, file, opened);
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
extern int d_set_mounted(struct dentry *dentry);
extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
+extern struct dentry *d_alloc_cursor(struct dentry *);
/*
* read_write.c
BUG_ON(size & (size-1)); /* Must be a power of 2 */
- flags |= __GFP_REPEAT;
- if (size == PAGE_SIZE)
- ptr = (void *)__get_free_pages(flags, 0);
- else if (size > PAGE_SIZE) {
- int order = get_order(size);
-
- if (order < 3)
- ptr = (void *)__get_free_pages(flags, order);
- else
- ptr = vmalloc(size);
- } else
+ if (size < PAGE_SIZE)
ptr = kmem_cache_alloc(get_slab(size), flags);
+ else
+ ptr = (void *)__get_free_pages(flags, get_order(size));
/* Check alignment; SLUB has gotten this wrong in the past,
* and this can lead to user data corruption! */
void jbd2_free(void *ptr, size_t size)
{
- if (size == PAGE_SIZE) {
- free_pages((unsigned long)ptr, 0);
- return;
- }
- if (size > PAGE_SIZE) {
- int order = get_order(size);
-
- if (order < 3)
- free_pages((unsigned long)ptr, order);
- else
- vfree(ptr);
- return;
- }
- kmem_cache_free(get_slab(size), ptr);
+ if (size < PAGE_SIZE)
+ kmem_cache_free(get_slab(size), ptr);
+ else
+ free_pages((unsigned long)ptr, get_order(size));
};
/*
int dcache_dir_open(struct inode *inode, struct file *file)
{
- static struct qstr cursor_name = QSTR_INIT(".", 1);
-
- file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
+ file->private_data = d_alloc_cursor(file->f_path.dentry);
return file->private_data ? 0 : -ENOMEM;
}
}
EXPORT_SYMBOL(dcache_dir_close);
+/* parent is locked at least shared */
+static struct dentry *next_positive(struct dentry *parent,
+ struct list_head *from,
+ int count)
+{
+ unsigned *seq = &parent->d_inode->i_dir_seq, n;
+ struct dentry *res;
+ struct list_head *p;
+ bool skipped;
+ int i;
+
+retry:
+ i = count;
+ skipped = false;
+ n = smp_load_acquire(seq) & ~1;
+ res = NULL;
+ rcu_read_lock();
+ for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+ struct dentry *d = list_entry(p, struct dentry, d_child);
+ if (!simple_positive(d)) {
+ skipped = true;
+ } else if (!--i) {
+ res = d;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (skipped) {
+ smp_rmb();
+ if (unlikely(*seq != n))
+ goto retry;
+ }
+ return res;
+}
+
+static void move_cursor(struct dentry *cursor, struct list_head *after)
+{
+ struct dentry *parent = cursor->d_parent;
+ unsigned n, *seq = &parent->d_inode->i_dir_seq;
+ spin_lock(&parent->d_lock);
+ for (;;) {
+ n = *seq;
+ if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
+ break;
+ cpu_relax();
+ }
+ __list_del(cursor->d_child.prev, cursor->d_child.next);
+ if (after)
+ list_add(&cursor->d_child, after);
+ else
+ list_add_tail(&cursor->d_child, &parent->d_subdirs);
+ smp_store_release(seq, n + 2);
+ spin_unlock(&parent->d_lock);
+}
+
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *dentry = file->f_path.dentry;
if (offset != file->f_pos) {
file->f_pos = offset;
if (file->f_pos >= 2) {
- struct list_head *p;
struct dentry *cursor = file->private_data;
+ struct dentry *to;
loff_t n = file->f_pos - 2;
- spin_lock(&dentry->d_lock);
- /* d_lock not required for cursor */
- list_del(&cursor->d_child);
- p = dentry->d_subdirs.next;
- while (n && p != &dentry->d_subdirs) {
- struct dentry *next;
- next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (simple_positive(next))
- n--;
- spin_unlock(&next->d_lock);
- p = p->next;
- }
- list_add_tail(&cursor->d_child, p);
- spin_unlock(&dentry->d_lock);
+ inode_lock_shared(dentry->d_inode);
+ to = next_positive(dentry, &dentry->d_subdirs, n);
+ move_cursor(cursor, to ? &to->d_child : NULL);
+ inode_unlock_shared(dentry->d_inode);
}
}
return offset;
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
- struct list_head *p, *q = &cursor->d_child;
+ struct list_head *p = &cursor->d_child;
+ struct dentry *next;
+ bool moved = false;
if (!dir_emit_dots(file, ctx))
return 0;
- spin_lock(&dentry->d_lock);
- if (ctx->pos == 2)
- list_move(q, &dentry->d_subdirs);
- for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
- struct dentry *next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (!simple_positive(next)) {
- spin_unlock(&next->d_lock);
- continue;
- }
-
- spin_unlock(&next->d_lock);
- spin_unlock(&dentry->d_lock);
+ if (ctx->pos == 2)
+ p = &dentry->d_subdirs;
+ while ((next = next_positive(dentry, p, 1)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino, dt_type(d_inode(next))))
- return 0;
- spin_lock(&dentry->d_lock);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- /* next is still alive */
- list_move(q, p);
- spin_unlock(&next->d_lock);
- p = q;
+ break;
+ moved = true;
+ p = &next->d_child;
ctx->pos++;
}
- spin_unlock(&dentry->d_lock);
+ if (moved)
+ move_cursor(cursor, p);
return 0;
}
EXPORT_SYMBOL(dcache_readdir);
};
#endif
-static void lockd_svc_exit_thread(void)
+static void lockd_unregister_notifiers(void)
{
unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
+}
+
+static void lockd_svc_exit_thread(void)
+{
+ lockd_unregister_notifiers();
svc_exit_thread(nlmsvc_rqst);
}
* Note: svc_serv structures have an initial use count of 1,
* so we exit through here on both success and failure.
*/
-err_net:
+err_put:
svc_destroy(serv);
err_create:
mutex_unlock(&nlmsvc_mutex);
err_start:
lockd_down_net(serv, net);
- goto err_net;
+err_net:
+ lockd_unregister_notifiers();
+ goto err_put;
}
EXPORT_SYMBOL_GPL(lockd_up);
{
struct file_lock *fl, *my_fl = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(filp);
struct file_lock_context *ctx;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
}
if (*opened & FILE_CREATED)
fsnotify_create(dir, dentry);
- path->dentry = dentry;
- path->mnt = nd->path.mnt;
- return 1;
+ if (unlikely(d_is_negative(dentry))) {
+ error = -ENOENT;
+ } else {
+ path->dentry = dentry;
+ path->mnt = nd->path.mnt;
+ return 1;
+ }
}
}
dput(dentry);
int acc_mode = op->acc_mode;
unsigned seq;
struct inode *inode;
- struct path save_parent = { .dentry = NULL, .mnt = NULL };
struct path path;
- bool retried = false;
int error;
nd->flags &= ~LOOKUP_PARENT;
return -EISDIR;
}
-retry_lookup:
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
error = mnt_want_write(nd->path.mnt);
if (!error)
got_write = false;
}
+ error = follow_managed(&path, nd);
+ if (unlikely(error < 0))
+ return error;
+
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
return -EEXIST;
}
- error = follow_managed(&path, nd);
- if (unlikely(error < 0))
- return error;
-
seq = 0; /* out of RCU mode, so the value doesn't matter */
inode = d_backing_inode(path.dentry);
finish_lookup:
if (unlikely(error))
return error;
- if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
- path_to_nameidata(&path, nd);
- } else {
- save_parent.dentry = nd->path.dentry;
- save_parent.mnt = mntget(path.mnt);
- nd->path.dentry = path.dentry;
-
- }
+ path_to_nameidata(&path, nd);
nd->inode = inode;
nd->seq = seq;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
finish_open:
error = complete_walk(nd);
- if (error) {
- path_put(&save_parent);
+ if (error)
return error;
- }
audit_inode(nd->name, nd->path.dentry, 0);
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
error = vfs_open(&nd->path, file, current_cred());
- if (!error) {
- *opened |= FILE_OPENED;
- } else {
- if (error == -EOPENSTALE)
- goto stale_open;
+ if (error)
goto out;
- }
+ *opened |= FILE_OPENED;
opened:
error = open_check_o_direct(file);
if (!error)
}
if (got_write)
mnt_drop_write(nd->path.mnt);
- path_put(&save_parent);
return error;
-
-stale_open:
- /* If no saved parent or already retried then can't retry */
- if (!save_parent.dentry || retried)
- goto out;
-
- BUG_ON(save_parent.dentry != dir);
- path_put(&nd->path);
- nd->path = save_parent;
- nd->inode = dir->d_inode;
- save_parent.mnt = NULL;
- save_parent.dentry = NULL;
- if (got_write) {
- mnt_drop_write(nd->path.mnt);
- got_write = false;
- }
- retried = true;
- goto retry_lookup;
}
static int do_tmpfile(struct nameidata *nd, unsigned flags,
goto out_unlock;
lock_mount_hash();
+ event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
}
if (type->fs_flags & FS_USERNS_VISIBLE) {
- if (!fs_fully_visible(type, &mnt_flags))
+ if (!fs_fully_visible(type, &mnt_flags)) {
+ put_filesystem(type);
return -EPERM;
+ }
}
}
if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
+ /* Don't miss readonly hidden in the superblock flags */
+ if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
+ mnt_flags |= MNT_LOCK_READONLY;
+
/* Verify the mount flags are equal to or more permissive
* than the proposed new mount.
*/
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
struct inode *inode = child->mnt_mountpoint->d_inode;
/* Only worry about locked mounts */
- if (!(mnt_flags & MNT_LOCKED))
+ if (!(child->mnt.mnt_flags & MNT_LOCKED))
continue;
/* Is the directory permanetly empty? */
if (!is_empty_dir_inode(inode))
static
int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
{
+ struct inode *inode;
struct nfs_inode *nfsi;
if (d_really_is_negative(dentry))
return 0;
- nfsi = NFS_I(d_inode(dentry));
+ inode = d_inode(dentry);
+ if (is_bad_inode(inode) || NFS_STALE(inode))
+ return 0;
+
+ nfsi = NFS_I(inode);
if (entry->fattr->fileid == nfsi->fileid)
return 1;
if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
struct dentry *res;
- struct dentry *parent;
struct inode *inode = NULL;
struct nfs_fh *fhandle = NULL;
struct nfs_fattr *fattr = NULL;
if (IS_ERR(label))
goto out;
- parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
trace_nfs_lookup_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
struct dentry *res;
struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
unsigned int lookup_flags = 0;
+ bool switched = false;
int err;
/* Expect a negative dentry */
/* NFS only supports OPEN on regular files */
if ((open_flags & O_DIRECTORY)) {
- if (!d_unhashed(dentry)) {
+ if (!d_in_lookup(dentry)) {
/*
* Hashed negative dentry with O_DIRECTORY: dentry was
* revalidated and is fine, no need to perform lookup
attr.ia_size = 0;
}
+ if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
+ d_drop(dentry);
+ switched = true;
+ dentry = d_alloc_parallel(dentry->d_parent,
+ &dentry->d_name, &wq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ if (unlikely(!d_in_lookup(dentry)))
+ return finish_no_open(file, dentry);
+ }
+
ctx = create_nfs_open_context(dentry, open_flags);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
err = PTR_ERR(inode);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
+ d_drop(dentry);
switch (err) {
case -ENOENT:
- d_drop(dentry);
d_add(dentry, NULL);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
break;
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
+ if (unlikely(switched)) {
+ d_lookup_done(dentry);
+ dput(dentry);
+ }
return err;
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
- err = PTR_ERR(res);
+ if (switched) {
+ d_lookup_done(dentry);
+ if (!res)
+ res = dentry;
+ else
+ dput(dentry);
+ }
if (IS_ERR(res))
- goto out;
-
+ return PTR_ERR(res);
return finish_no_open(file, res);
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
result = wait_for_completion_killable(&dreq->completion);
+ if (!result) {
+ result = dreq->count;
+ WARN_ON_ONCE(dreq->count < 0);
+ }
if (!result)
result = dreq->error;
- if (!result)
- result = dreq->count;
out:
return (ssize_t) result;
if (dreq->iocb) {
long res = (long) dreq->error;
- if (!res)
+ if (dreq->count != 0) {
res = (long) dreq->count;
+ WARN_ON_ONCE(dreq->count < 0);
+ }
dreq->iocb->ki_complete(dreq->iocb, res, 0);
}
struct nfs_fattr *fattr = desc->fattr;
set_nfs_fileid(inode, fattr->fileid);
+ inode->i_mode = fattr->mode;
nfs_copy_fh(NFS_FH(inode), desc->fh);
return 0;
}
call_close |= is_wronly;
else if (is_wronly)
calldata->arg.fmode |= FMODE_WRITE;
+ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
+ call_close |= is_rdwr;
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (calldata->arg.fmode == 0)
- call_close |= is_rdwr;
-
if (!nfs4_valid_open_stateid(state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
break;
}
lo = NFS_I(inode)->layout;
- if (lo && nfs4_stateid_match(&lgp->args.stateid,
- &lo->plh_stateid)) {
+ if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
+ nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
LIST_HEAD(head);
/*
pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
+ status = -EAGAIN;
+ goto out;
} else
spin_unlock(&inode->i_lock);
- status = -EAGAIN;
- goto out;
}
status = nfs4_handle_exception(server, status, exception);
.flags = RPC_TASK_ASYNC,
};
struct pnfs_layout_segment *lseg = NULL;
- struct nfs4_exception exception = { .timeout = *timeout };
+ struct nfs4_exception exception = {
+ .inode = inode,
+ .timeout = *timeout,
+ };
int status = 0;
dprintk("--> %s\n", __func__);
}
spin_unlock(&state->state_lock);
}
- nfs4_put_open_state(state);
clear_bit(NFS_STATE_RECLAIM_NOGRACE,
&state->flags);
+ nfs4_put_open_state(state);
spin_lock(&sp->so_lock);
goto restart;
}
list_del_init(&lseg->pls_list);
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
atomic_dec(&lo->plh_refcount);
- if (list_empty(&lo->plh_segs))
+ if (list_empty(&lo->plh_segs)) {
+ set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+ }
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
INIT_LIST_HEAD(&lo->plh_bulk_destroy);
lo->plh_inode = ino;
lo->plh_lc_cred = get_rpccred(ctx->cred);
+ lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
return lo;
}
pnfs_find_alloc_layout(struct inode *ino,
struct nfs_open_context *ctx,
gfp_t gfp_flags)
+ __releases(&ino->i_lock)
+ __acquires(&ino->i_lock)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *new = NULL;
* stateid, or it has been invalidated, then we must use the open
* stateid.
*/
- if (lo->plh_stateid.seqid == 0 ||
- test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+ if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
/*
* The first layoutget for the file. Need to serialize per
}
/* Helper function for pnfs_generic_commit_pagelist to catch an empty
- * page list. This can happen when two commits race. */
+ * page list. This can happen when two commits race.
+ *
+ * This must be called instead of nfs_init_commit - call one or the other, but
+ * not both!
+ */
static bool
pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
struct nfs_commit_data *data,
if (list_empty(pages)) {
if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
wake_up_atomic_t(&cinfo->mds->rpcs_out);
- nfs_commitdata_release(data);
+ /* don't call nfs_commitdata_release - it tries to put
+ * the open_context which is not acquired until nfs_init_commit
+ * which has not been called on @data */
+ WARN_ON_ONCE(data->context);
+ nfs_commit_free(data);
return true;
}
nfs_list_remove_request(new);
nfs_readpage_release(new);
error = desc->pgio->pg_error;
- goto out_unlock;
+ goto out;
}
return 0;
out_error:
error = PTR_ERR(new);
-out_unlock:
unlock_page(page);
+out:
return error;
}
return error;
}
-#define NFSD_MDS_PR_KEY 0x0100000000000000
+#define NFSD_MDS_PR_KEY 0x0100000000000000ULL
/*
* We use the client ID as a unique key for the reservations.
goto out;
inode = d_inode(fh->fh_dentry);
- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
- error = -EOPNOTSUPP;
- goto out_errno;
- }
error = fh_want_write(fh);
if (error)
goto out_errno;
- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+ fh_lock(fh);
+
+ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
if (error)
- goto out_drop_write;
- error = inode->i_op->set_acl(inode, argp->acl_default,
- ACL_TYPE_DEFAULT);
+ goto out_drop_lock;
+ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
if (error)
- goto out_drop_write;
+ goto out_drop_lock;
+
+ fh_unlock(fh);
fh_drop_write(fh);
posix_acl_release(argp->acl_access);
posix_acl_release(argp->acl_default);
return nfserr;
-out_drop_write:
+out_drop_lock:
+ fh_unlock(fh);
fh_drop_write(fh);
out_errno:
nfserr = nfserrno(error);
goto out;
inode = d_inode(fh->fh_dentry);
- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
- error = -EOPNOTSUPP;
- goto out_errno;
- }
error = fh_want_write(fh);
if (error)
goto out_errno;
- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+ fh_lock(fh);
+
+ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
if (error)
- goto out_drop_write;
- error = inode->i_op->set_acl(inode, argp->acl_default,
- ACL_TYPE_DEFAULT);
+ goto out_drop_lock;
+ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
-out_drop_write:
+out_drop_lock:
+ fh_unlock(fh);
fh_drop_write(fh);
out_errno:
nfserr = nfserrno(error);
dentry = fhp->fh_dentry;
inode = d_inode(dentry);
- if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
- return nfserr_attrnotsupp;
-
if (S_ISDIR(inode->i_mode))
flags = NFS4_ACL_DIR;
if (host_error < 0)
goto out_nfserr;
- host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
+ fh_lock(fhp);
+
+ host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
if (host_error < 0)
- goto out_release;
+ goto out_drop_lock;
if (S_ISDIR(inode->i_mode)) {
- host_error = inode->i_op->set_acl(inode, dpacl,
- ACL_TYPE_DEFAULT);
+ host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
}
-out_release:
+out_drop_lock:
+ fh_unlock(fhp);
+
posix_acl_release(pacl);
posix_acl_release(dpacl);
out_nfserr:
}
}
-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
-{
- struct rpc_xprt *xprt;
-
- if (args->protocol != XPRT_TRANSPORT_BC_TCP)
- return rpc_create(args);
-
- xprt = args->bc_xprt->xpt_bc_xprt;
- if (xprt) {
- xprt_get(xprt);
- return rpc_create_xprt(args, xprt);
- }
-
- return rpc_create(args);
-}
-
static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
{
int maxtime = max_cb_time(clp->net);
args.authflavor = ses->se_cb_sec.flavor;
}
/* Create RPC client */
- client = create_backchannel_client(&args);
+ client = rpc_create(&args);
if (IS_ERR(client)) {
dprintk("NFSD: couldn't create callback client: %ld\n",
PTR_ERR(client));
}
static struct nfs4_ol_stateid *
-init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
- struct nfsd4_open *open)
+init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_ol_stateid *retstp = NULL;
+ struct nfs4_ol_stateid *stp;
+
+ stp = open->op_stp;
+ /* We are moving these outside of the spinlocks to avoid the warnings */
+ mutex_init(&stp->st_mutex);
+ mutex_lock(&stp->st_mutex);
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
+
+ open->op_stp = NULL;
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
stp->st_access_bmap = 0;
stp->st_deny_bmap = 0;
stp->st_openstp = NULL;
- init_rwsem(&stp->st_rwsem);
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
- return retstp;
+ if (retstp) {
+ mutex_lock(&retstp->st_mutex);
+ /* To keep mutex tracking happy */
+ mutex_unlock(&stp->st_mutex);
+ stp = retstp;
+ }
+ return stp;
}
/*
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL;
struct nfs4_ol_stateid *stp = NULL;
- struct nfs4_ol_stateid *swapstp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
*/
if (stp) {
/* Stateid was found, this is an OPEN upgrade */
- down_read(&stp->st_rwsem);
+ mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
- stp = open->op_stp;
- open->op_stp = NULL;
- swapstp = init_open_stateid(stp, fp, open);
- if (swapstp) {
- nfs4_put_stid(&stp->st_stid);
- stp = swapstp;
- down_read(&stp->st_rwsem);
+ /* stp is returned locked. */
+ stp = init_open_stateid(fp, open);
+ /* See if we lost the race to some other thread */
+ if (stp->st_access_bmap != 0) {
status = nfs4_upgrade_open(rqstp, fp, current_fh,
stp, open);
if (status) {
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
goto upgrade_out;
}
- down_read(&stp->st_rwsem);
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
release_open_stateid(stp);
goto out;
}
}
upgrade_out:
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
- up_read(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
if (nfsd4_has_session(&resp->cstate)) {
if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
* revoked delegations are kept only for free_stateid.
*/
return nfserr_bad_stateid;
- down_write(&stp->st_rwsem);
+ mutex_lock(&stp->st_mutex);
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
if (status != nfs_ok)
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
return status;
}
return status;
oo = openowner(stp->st_stateowner);
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
return nfserr_bad_stateid;
}
oo = openowner(stp->st_stateowner);
status = nfserr_bad_stateid;
if (oo->oo_flags & NFS4_OO_CONFIRMED) {
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
goto put_stateid;
}
oo->oo_flags |= NFS4_OO_CONFIRMED;
nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
status = nfs_ok;
put_stateid:
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
- init_rwsem(&stp->st_rwsem);
+ mutex_init(&stp->st_mutex);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
spin_lock(&fp->fi_lock);
&open_stp, nn);
if (status)
goto out;
- up_write(&open_stp->st_rwsem);
+ mutex_unlock(&open_stp->st_mutex);
open_sop = openowner(open_stp->st_stateowner);
status = nfserr_bad_stateid;
if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new);
if (status == nfs_ok)
- down_write(&lock_stp->st_rwsem);
+ mutex_lock(&lock_stp->st_mutex);
} else {
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
- up_write(&lock_stp->st_rwsem);
+ mutex_unlock(&lock_stp->st_mutex);
/*
* If this is a new, never-before-used stateid, and we are
fput:
fput(filp);
put_stateid:
- up_write(&stp->st_rwsem);
+ mutex_unlock(&stp->st_mutex);
nfs4_put_stid(&stp->st_stid);
out:
nfsd4_bump_seqid(cstate, status);
unsigned char st_access_bmap;
unsigned char st_deny_bmap;
struct nfs4_ol_stateid *st_openstp;
- struct rw_semaphore st_rwsem;
+ struct mutex st_mutex;
};
static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
return 0;
bytes = le16_to_cpu(sbp->s_bytes);
- if (bytes > BLOCK_SIZE)
+ if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
return 0;
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
sumoff);
ccflags-y := -Ifs/ocfs2
-ccflags-y += -DCATCH_BH_JBD_RACES
-
obj-$(CONFIG_OCFS2_FS) += \
ocfs2.o \
ocfs2_stackglue.o
lock_buffer(bh);
if (buffer_jbd(bh)) {
+#ifdef CATCH_BH_JBD_RACES
mlog(ML_ERROR,
"block %llu had the JBD bit set "
"while I was in lock_buffer!",
(unsigned long long)bh->b_blocknr);
BUG();
+#else
+ unlock_buffer(bh);
+ continue;
+#endif
}
clear_buffer_uptodate(bh);
err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
} else {
const struct cred *old_cred;
+ struct cred *override_cred;
old_cred = ovl_override_creds(dentry->d_sb);
- err = ovl_create_over_whiteout(dentry, inode, &stat, link,
- hardlink);
+ err = -ENOMEM;
+ override_cred = prepare_creds();
+ if (override_cred) {
+ override_cred->fsuid = old_cred->fsuid;
+ override_cred->fsgid = old_cred->fsgid;
+ put_cred(override_creds(override_cred));
+ put_cred(override_cred);
+ err = ovl_create_over_whiteout(dentry, inode, &stat,
+ link, hardlink);
+ }
revert_creds(old_cred);
}
struct dentry *upper;
struct dentry *opaquedir = NULL;
int err;
+ int flags = 0;
if (WARN_ON(!workdir))
return -EROFS;
if (err)
goto out_dput;
- whiteout = ovl_whiteout(workdir, dentry);
- err = PTR_ERR(whiteout);
- if (IS_ERR(whiteout))
+ upper = lookup_one_len(dentry->d_name.name, upperdir,
+ dentry->d_name.len);
+ err = PTR_ERR(upper);
+ if (IS_ERR(upper))
goto out_unlock;
- upper = ovl_dentry_upper(dentry);
- if (!upper) {
- upper = lookup_one_len(dentry->d_name.name, upperdir,
- dentry->d_name.len);
- err = PTR_ERR(upper);
- if (IS_ERR(upper))
- goto kill_whiteout;
-
- err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
- dput(upper);
- if (err)
- goto kill_whiteout;
- } else {
- int flags = 0;
+ err = -ESTALE;
+ if ((opaquedir && upper != opaquedir) ||
+ (!opaquedir && ovl_dentry_upper(dentry) &&
+ upper != ovl_dentry_upper(dentry))) {
+ goto out_dput_upper;
+ }
- if (opaquedir)
- upper = opaquedir;
- err = -ESTALE;
- if (upper->d_parent != upperdir)
- goto kill_whiteout;
+ whiteout = ovl_whiteout(workdir, dentry);
+ err = PTR_ERR(whiteout);
+ if (IS_ERR(whiteout))
+ goto out_dput_upper;
- if (is_dir)
- flags |= RENAME_EXCHANGE;
+ if (d_is_dir(upper))
+ flags = RENAME_EXCHANGE;
- err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
- if (err)
- goto kill_whiteout;
+ err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+ if (err)
+ goto kill_whiteout;
+ if (flags)
+ ovl_cleanup(wdir, upper);
- if (is_dir)
- ovl_cleanup(wdir, upper);
- }
ovl_dentry_version_inc(dentry->d_parent);
out_d_drop:
d_drop(dentry);
dput(whiteout);
+out_dput_upper:
+ dput(upper);
out_unlock:
unlock_rename(workdir, upperdir);
out_dput:
if (err)
goto out;
+ if (attr->ia_valid & ATTR_SIZE) {
+ struct inode *realinode = d_inode(ovl_dentry_real(dentry));
+
+ err = -ETXTBSY;
+ if (atomic_read(&realinode->i_writecount) < 0)
+ goto out_drop_write;
+ }
+
err = ovl_copy_up(dentry);
if (!err) {
+ struct inode *winode = NULL;
+
upperdentry = ovl_dentry_upper(dentry);
+ if (attr->ia_valid & ATTR_SIZE) {
+ winode = d_inode(upperdentry);
+ err = get_write_access(winode);
+ if (err)
+ goto out_drop_write;
+ }
+
+ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+ attr->ia_valid &= ~ATTR_MODE;
+
inode_lock(upperdentry->d_inode);
err = notify_change(upperdentry, attr, NULL);
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
inode_unlock(upperdentry->d_inode);
+
+ if (winode)
+ put_write_access(winode);
}
+out_drop_write:
ovl_drop_write(dentry);
out:
return err;
err = vfs_getattr(&realpath, &stat);
if (err)
- return err;
+ goto out_dput;
+ err = -ESTALE;
if ((stat.mode ^ inode->i_mode) & S_IFMT)
- return -ESTALE;
+ goto out_dput;
inode->i_mode = stat.mode;
inode->i_uid = stat.uid;
inode->i_gid = stat.gid;
- return generic_permission(inode, mask);
+ err = generic_permission(inode, mask);
+ goto out_dput;
}
/* Careful in RCU walk mode */
return err;
}
-static bool ovl_need_xattr_filter(struct dentry *dentry,
- enum ovl_path_type type)
-{
- if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
- return S_ISDIR(dentry->d_inode->i_mode);
- else
- return false;
-}
-
ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size)
{
- struct path realpath;
- enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+ struct dentry *realdentry = ovl_dentry_real(dentry);
- if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+ if (ovl_is_private_xattr(name))
return -ENODATA;
- return vfs_getxattr(realpath.dentry, name, value, size);
+ return vfs_getxattr(realdentry, name, value, size);
}
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
- struct path realpath;
- enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+ struct dentry *realdentry = ovl_dentry_real(dentry);
ssize_t res;
int off;
- res = vfs_listxattr(realpath.dentry, list, size);
+ res = vfs_listxattr(realdentry, list, size);
if (res <= 0 || size == 0)
return res;
- if (!ovl_need_xattr_filter(dentry, type))
- return res;
-
/* filter out private xattrs */
for (off = 0; off < res;) {
char *s = list + off;
goto out;
err = -ENODATA;
- if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+ if (ovl_is_private_xattr(name))
goto out_drop_write;
if (!OVL_TYPE_UPPER(type)) {
if (!inode)
return NULL;
- mode &= S_IFMT;
-
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ mode &= S_IFMT;
switch (mode) {
case S_IFDIR:
inode->i_private = oe;
{
to->i_uid = from->i_uid;
to->i_gid = from->i_gid;
+ to->i_mode = from->i_mode;
}
/* dir.c */
if (err < 0)
goto out_put_workdir;
- if (!err) {
- pr_err("overlayfs: upper fs needs to support d_type.\n");
- err = -EINVAL;
- goto out_put_workdir;
- }
+ /*
+ * We allowed this configuration and don't want to
+ * break users over kernel upgrade. So warn instead
+ * of erroring out.
+ */
+ if (!err)
+ pr_warn("overlayfs: upper fs needs to support d_type.\n");
}
}
return error;
}
-static int
-posix_acl_xattr_set(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+int
+set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
{
- struct posix_acl *acl = NULL;
- int ret;
-
if (!IS_POSIXACL(inode))
return -EOPNOTSUPP;
if (!inode->i_op->set_acl)
return -EOPNOTSUPP;
- if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- return value ? -EACCES : 0;
+ if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ return acl ? -EACCES : 0;
if (!inode_owner_or_capable(inode))
return -EPERM;
+ if (acl) {
+ int ret = posix_acl_valid(acl);
+ if (ret)
+ return ret;
+ }
+ return inode->i_op->set_acl(inode, acl, type);
+}
+EXPORT_SYMBOL(set_posix_acl);
+
+static int
+posix_acl_xattr_set(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct posix_acl *acl = NULL;
+ int ret;
+
if (value) {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
-
- if (acl) {
- ret = posix_acl_valid(acl);
- if (ret)
- goto out;
- }
}
-
- ret = inode->i_op->set_acl(inode, acl, handler->flags);
-out:
+ ret = set_posix_acl(inode, handler->flags, acl);
posix_acl_release(acl);
return ret;
}
if (IS_ERR(sb))
return ERR_CAST(sb);
+ /*
+ * procfs isn't actually a stacking filesystem; however, there is
+ * too much magic going on inside it to permit stacking things on
+ * top of it
+ */
+ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
if (!proc_parse_options(options, ns)) {
deactivate_locked_super(sb);
return ERR_PTR(-EINVAL);
unsigned long safe_mask = 0;
unsigned int commit_max_age = (unsigned int)-1;
struct reiserfs_journal *journal = SB_JOURNAL(s);
- char *new_opts = kstrdup(arg, GFP_KERNEL);
+ char *new_opts;
int err;
char *qf_names[REISERFS_MAXQUOTAS];
unsigned int qfmt = 0;
int i;
#endif
+ new_opts = kstrdup(arg, GFP_KERNEL);
+ if (arg && !new_opts)
+ return -ENOMEM;
+
sync_filesystem(s);
reiserfs_write_lock(s);
}
out_ok_unlocked:
- replace_mount_options(s, new_opts);
+ if (new_opts)
+ replace_mount_options(s, new_opts);
return 0;
out_err_unlock:
#include "ubifs.h"
#include <linux/mount.h>
#include <linux/slab.h>
+#include <linux/migrate.h>
static int read_block(struct inode *inode, void *addr, unsigned int block,
struct ubifs_data_node *dn)
return ret;
}
+#ifdef CONFIG_MIGRATION
+static int ubifs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc;
+
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
+
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ SetPagePrivate(newpage);
+ }
+
+ migrate_page_copy(newpage, page);
+ return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
{
/*
.write_end = ubifs_write_end,
.invalidatepage = ubifs_invalidatepage,
.set_page_dirty = ubifs_set_page_dirty,
+#ifdef CONFIG_MIGRATION
+ .migratepage = ubifs_migrate_page,
+#endif
.releasepage = ubifs_releasepage,
};
map = &UDF_SB(sb)->s_partmaps[partition];
/* map to sparable/physical partition desc */
phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
- map->s_partition_num, ext_offset + offset);
+ map->s_type_specific.s_metadata.s_phys_partition_ref,
+ ext_offset + offset);
}
brelse(epos.bh);
mdata = &map->s_type_specific.s_metadata;
inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
- /* We shouldn't mount such media... */
- BUG_ON(!inode);
+ if (!inode)
+ return 0xFFFFFFFF;
+
retblk = udf_try_read_meta(inode, block, partition, offset);
if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
- mdata->s_mirror_file_loc, map->s_partition_num);
+ mdata->s_mirror_file_loc,
+ mdata->s_phys_partition_ref);
+ if (IS_ERR(mdata->s_mirror_fe))
+ mdata->s_mirror_fe = NULL;
mdata->s_flags |= MF_MIRROR_FE_LOADED;
}
}
struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
- u32 meta_file_loc, u32 partition_num)
+ u32 meta_file_loc, u32 partition_ref)
{
struct kernel_lb_addr addr;
struct inode *metadata_fe;
addr.logicalBlockNum = meta_file_loc;
- addr.partitionReferenceNum = partition_num;
+ addr.partitionReferenceNum = partition_ref;
metadata_fe = udf_iget_special(sb, &addr);
return metadata_fe;
}
-static int udf_load_metadata_files(struct super_block *sb, int partition)
+static int udf_load_metadata_files(struct super_block *sb, int partition,
+ int type1_index)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
map = &sbi->s_partmaps[partition];
mdata = &map->s_type_specific.s_metadata;
+ mdata->s_phys_partition_ref = type1_index;
/* metadata address */
udf_debug("Metadata file location: block = %d part = %d\n",
- mdata->s_meta_file_loc, map->s_partition_num);
+ mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
- map->s_partition_num);
+ mdata->s_phys_partition_ref);
if (IS_ERR(fe)) {
/* mirror file entry */
udf_debug("Mirror metadata file location: block = %d part = %d\n",
- mdata->s_mirror_file_loc, map->s_partition_num);
+ mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
- map->s_partition_num);
+ mdata->s_phys_partition_ref);
if (IS_ERR(fe)) {
udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
*/
if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
addr.logicalBlockNum = mdata->s_bitmap_file_loc;
- addr.partitionReferenceNum = map->s_partition_num;
+ addr.partitionReferenceNum = mdata->s_phys_partition_ref;
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
p = (struct partitionDesc *)bh->b_data;
partitionNumber = le16_to_cpu(p->partitionNumber);
- /* First scan for TYPE1, SPARABLE and METADATA partitions */
+ /* First scan for TYPE1 and SPARABLE partitions */
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
udf_debug("Searching map: (%d == %d)\n",
goto out_bh;
if (map->s_partition_type == UDF_METADATA_MAP25) {
- ret = udf_load_metadata_files(sb, i);
+ ret = udf_load_metadata_files(sb, i, type1_idx);
if (ret < 0) {
udf_err(sb, "error loading MetaData partition map %d\n",
i);
__u32 s_bitmap_file_loc;
__u32 s_alloc_unit_size;
__u16 s_align_unit_size;
+ /*
+ * Partition Reference Number of the associated physical / sparable
+ * partition
+ */
+ __u16 s_phys_partition_ref;
int s_flags;
struct inode *s_metadata_fe;
struct inode *s_mirror_fe;
goto out_put_tmp_file;
}
+ if (f.file->f_op != &xfs_file_operations ||
+ tmp.file->f_op != &xfs_file_operations) {
+ error = -EINVAL;
+ goto out_put_tmp_file;
+ }
+
ip = XFS_I(file_inode(f.file));
tip = XFS_I(file_inode(tmp.file));
/* ACPI PCI Interrupt Link (pci_link.c) */
+int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
/*
* Optionally support group module level code.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
#include <asm-generic/qspinlock_types.h>
+/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
+#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
- * queued_spin_lock_slowpath() can ACQUIRE the lock before
- * issuing the unordered store that sets _Q_LOCKED_VAL.
- *
- * See both smp_cond_acquire() sites for more detail.
- *
- * This however means that in code like:
- *
- * spin_lock(A) spin_lock(B)
- * spin_unlock_wait(B) spin_is_locked(A)
- * do_something() do_something()
- *
- * Both CPUs can end up running do_something() because the store
- * setting _Q_LOCKED_VAL will pass through the loads in
- * spin_unlock_wait() and/or spin_is_locked().
+ * See queued_spin_unlock_wait().
*
- * Avoid this by issuing a full memory barrier between the spin_lock()
- * and the loads in spin_unlock_wait() and spin_is_locked().
- *
- * Note that regular mutual exclusion doesn't care about this
- * delayed store.
+ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+ * isn't immediately observable.
*/
- smp_mb();
- return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+ return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
}
#endif
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
- /* See queued_spin_is_locked() */
- smp_mb();
- while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
- cpu_relax();
-}
-
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
#define INIT_TEXT \
*(.init.text) \
+ *(.text.startup) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
+ *(.fini_array) \
+ *(.dtors) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
+ *(.text.exit) \
MEM_DISCARD(exit.text)
#define EXIT_CALL \
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
+ RK3399_EDP,
};
+static inline bool is_rockchip(enum analogix_dp_devtype type)
+{
+ return type == RK3288_DP || type == RK3399_EDP;
+}
+
struct analogix_dp_plat_data {
enum analogix_dp_devtype dev_type;
struct drm_panel *panel;
int (*power_off)(struct analogix_dp_plat_data *);
int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
struct drm_connector *);
- int (*get_modes)(struct analogix_dp_plat_data *);
+ int (*get_modes)(struct analogix_dp_plat_data *,
+ struct drm_connector *);
};
int analogix_dp_resume(struct device *dev);
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/sched.h>
-#include <linux/seqlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
struct drm_dma_handle;
struct drm_gem_object;
struct drm_master;
+struct drm_vblank_crtc;
struct device_node;
struct videomode;
struct mutex debugfs_lock; /* Protects debugfs_list. */
};
-
-struct drm_pending_vblank_event {
- struct drm_pending_event base;
- unsigned int pipe;
- struct drm_event_vblank event;
-};
-
-struct drm_vblank_crtc {
- struct drm_device *dev; /* pointer to the drm_device */
- wait_queue_head_t queue; /**< VBLANK wait queue */
- struct timer_list disable_timer; /* delayed disable timer */
-
- seqlock_t seqlock; /* protects vblank count and time */
-
- u32 count; /* vblank counter */
- struct timeval time; /* vblank timestamp */
-
- atomic_t refcount; /* number of users of vblank interruptsper crtc */
- u32 last; /* protected by dev->vbl_lock, used */
- /* for wraparound handling */
- u32 last_wait; /* Last vblank seqno waited per CRTC */
- unsigned int inmodeset; /* Display driver is setting mode */
- unsigned int pipe; /* crtc index */
- int framedur_ns; /* frame/field duration in ns */
- int linedur_ns; /* line duration in ns */
- bool enabled; /* so we don't call enable more than
- once per disable */
-};
-
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
int switch_power_state;
};
+#include <drm/drm_irq.h>
+
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
- /* IRQ support (drm_irq.h) */
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
-
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- struct timeval *vblank_time,
- unsigned flags,
- const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
- const struct drm_display_mode *mode);
-
-/**
- * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
- * @crtc: which CRTC's vblank waitqueue to retrieve
- *
- * This function returns a pointer to the vblank waitqueue for the CRTC.
- * Drivers can use this to implement vblank waits using wait_event() & co.
- */
-static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
-{
- return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
-}
-
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
struct drm_plane *primary;
struct drm_plane *cursor;
- /* position inside the mode_config.list, can be used as a [] idx */
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the CRTC.
+ */
unsigned index;
/* position of cursor plane on crtc */
char *name;
int encoder_type;
- /* position inside the mode_config.list, can be used as a [] idx */
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the encoder.
+ */
unsigned index;
uint32_t possible_crtcs;
* @head: list management
* @base: base KMS object
* @name: human readable name, can be overwritten by the driver
- * @connector_id: compacted connector id useful indexing arrays
* @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
struct drm_mode_object base;
char *name;
- int connector_id;
+
+ /**
+ * @index: Compacted connector index, which matches the position inside
+ * the mode_config.list for drivers not supporting hot-add/removing. Can
+ * be used as an array index. It is invariant over the lifetime of the
+ * connector.
+ */
+ unsigned index;
+
int connector_type;
int connector_type_id;
bool interlace_allowed;
enum drm_plane_type type;
- /* position inside the mode_config.list, can be used as a [] idx */
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the plane.
+ */
unsigned index;
const struct drm_plane_helper_funcs *helper_private;
* @fb_lock: mutex to protect fb state and lists
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
- * @num_connector: number of connectors on this device
- * @connector_list: list of connector objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_overlay_plane: number of overlay planes on this device
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
- * @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
- * gamma
- * @degamma_lut_size_property: size of the degamma LUT as supported by the
- * driver (read-only)
- * @ctm_property: Matrix used to convert colors after the lookup in the
- * degamma LUT
- * @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
- * the gamma space of the connected screen (read-only)
- * @gamma_lut_size_property: size of the gamma LUT as supported by the driver
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @async_page_flip: does this device support async flips on the primary plane?
* @cursor_width: hint to userspace for max cursor width
* @cursor_height: hint to userspace for max cursor height
* @helper_private: mid-layer private data
int num_fb;
struct list_head fb_list;
+ /**
+ * @num_connector: Number of connectors on this device.
+ */
int num_connector;
+ /**
+ * @connector_ida: ID allocator for connector indices.
+ */
struct ida connector_ida;
+ /**
+ * @connector_list: List of connector objects.
+ */
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
/* pointers to standard properties */
struct list_head property_blob_list;
+ /**
+ * @edid_property: Default connector property to hold the EDID of the
+ * currently connected sink, if any.
+ */
struct drm_property *edid_property;
+ /**
+ * @dpms_property: Default connector property to control the
+ * connector's DPMS state.
+ */
struct drm_property *dpms_property;
+ /**
+ * @path_property: Default connector property to hold the DP MST path
+ * for the port.
+ */
struct drm_property *path_property;
+ /**
+ * @tile_property: Default connector property to store the tile
+ * position of a tiled screen, for sinks which need to be driven with
+ * multiple CRTCs.
+ */
struct drm_property *tile_property;
+ /**
+ * @plane_type_property: Default plane property to differentiate
+ * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+ */
struct drm_property *plane_type_property;
+ /**
+ * @rotation_property: Optional property for planes or CRTCs to specifiy
+ * rotation.
+ */
struct drm_property *rotation_property;
+ /**
+ * @prop_src_x: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_x;
+ /**
+ * @prop_src_y: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_y;
+ /**
+ * @prop_src_w: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_w;
+ /**
+ * @prop_src_h: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_h;
+ /**
+ * @prop_crtc_x: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_x;
+ /**
+ * @prop_crtc_y: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_y;
+ /**
+ * @prop_crtc_w: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_w;
+ /**
+ * @prop_crtc_h: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_h;
+ /**
+ * @prop_fb_id: Default atomic plane property to specify the
+ * &drm_framebuffer.
+ */
struct drm_property *prop_fb_id;
+ /**
+ * @prop_crtc_id: Default atomic plane property to specify the
+ * &drm_crtc.
+ */
struct drm_property *prop_crtc_id;
+ /**
+ * @prop_active: Default atomic CRTC property to control the active
+ * state, which is the simplified implementation for DPMS in atomic
+ * drivers.
+ */
struct drm_property *prop_active;
+ /**
+ * @prop_mode_id: Default atomic CRTC property to set the mode for a
+ * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+ * connectors must be of and active must be set to disabled, too.
+ */
struct drm_property *prop_mode_id;
- /* DVI-I properties */
+ /**
+ * @dvi_i_subconnector_property: Optional DVI-I property to
+ * differentiate between analog or digital mode.
+ */
struct drm_property *dvi_i_subconnector_property;
+ /**
+ * @dvi_i_select_subconnector_property: Optional DVI-I property to
+ * select between analog or digital mode.
+ */
struct drm_property *dvi_i_select_subconnector_property;
- /* TV properties */
+ /**
+ * @tv_subconnector_property: Optional TV property to differentiate
+ * between different TV connector types.
+ */
struct drm_property *tv_subconnector_property;
+ /**
+ * @tv_select_subconnector_property: Optional TV property to select
+ * between different TV connector types.
+ */
struct drm_property *tv_select_subconnector_property;
+ /**
+ * @tv_mode_property: Optional TV property to select
+ * the output TV mode.
+ */
struct drm_property *tv_mode_property;
+ /**
+ * @tv_left_margin_property: Optional TV property to set the left
+ * margin.
+ */
struct drm_property *tv_left_margin_property;
+ /**
+ * @tv_right_margin_property: Optional TV property to set the right
+ * margin.
+ */
struct drm_property *tv_right_margin_property;
+ /**
+ * @tv_top_margin_property: Optional TV property to set the right
+ * margin.
+ */
struct drm_property *tv_top_margin_property;
+ /**
+ * @tv_bottom_margin_property: Optional TV property to set the right
+ * margin.
+ */
struct drm_property *tv_bottom_margin_property;
+ /**
+ * @tv_brightness_property: Optional TV property to set the
+ * brightness.
+ */
struct drm_property *tv_brightness_property;
+ /**
+ * @tv_contrast_property: Optional TV property to set the
+ * contrast.
+ */
struct drm_property *tv_contrast_property;
+ /**
+ * @tv_flicker_reduction_property: Optional TV property to control the
+ * flicker reduction mode.
+ */
struct drm_property *tv_flicker_reduction_property;
+ /**
+ * @tv_overscan_property: Optional TV property to control the overscan
+ * setting.
+ */
struct drm_property *tv_overscan_property;
+ /**
+ * @tv_saturation_property: Optional TV property to set the
+ * saturation.
+ */
struct drm_property *tv_saturation_property;
+ /**
+ * @tv_hue_property: Optional TV property to set the hue.
+ */
struct drm_property *tv_hue_property;
- /* Optional properties */
+ /**
+ * @scaling_mode_property: Optional connector property to control the
+ * upscaling, mostly used for built-in panels.
+ */
struct drm_property *scaling_mode_property;
+ /**
+ * @aspect_ratio_property: Optional connector property to control the
+ * HDMI infoframe aspect ratio setting.
+ */
struct drm_property *aspect_ratio_property;
+ /**
+ * @dirty_info_property: Optional connector property to give userspace a
+ * hint that the DIRTY_FB ioctl should be used.
+ */
struct drm_property *dirty_info_property;
- /* Optional color correction properties */
+ /**
+ * @degamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the framebuffer's colors to linear gamma.
+ */
struct drm_property *degamma_lut_property;
+ /**
+ * @degamma_lut_size_property: Optional CRTC property for the size of
+ * the degamma LUT as supported by the driver (read-only).
+ */
struct drm_property *degamma_lut_size_property;
+ /**
+ * @ctm_property: Optional CRTC property to set the
+ * matrix used to convert colors after the lookup in the
+ * degamma LUT.
+ */
struct drm_property *ctm_property;
+ /**
+ * @gamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the colors, after the CTM matrix, to the gamma space of the
+ * connected screen.
+ */
struct drm_property *gamma_lut_property;
+ /**
+ * @gamma_lut_size_property: Optional CRTC property for the size of the
+ * gamma LUT as supported by the driver (read-only).
+ */
struct drm_property *gamma_lut_size_property;
- /* properties for virtual machine layout */
+ /**
+ * @suggested_x_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
struct drm_property *suggested_x_property;
+ /**
+ * @suggested_y_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
struct drm_property *suggested_y_property;
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
- /* whether async page flip is supported or not */
+ /**
+ * @async_page_flip: Does this device support async flips on the primary
+ * plane?
+ */
bool async_page_flip;
/**
extern void drm_connector_cleanup(struct drm_connector *connector);
static inline unsigned drm_connector_index(struct drm_connector *connector)
{
- return connector->connector_id;
+ return connector->index;
}
-/* helpers to {un}register all connectors from sysfs for device */
-extern int drm_connector_register_all(struct drm_device *dev);
-extern void drm_connector_unregister_all(struct drm_device *dev);
-
extern __printf(5, 6)
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
extern void drm_plane_force_disable(struct drm_plane *plane);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
+extern int drm_crtc_force_disable(struct drm_crtc *crtc);
+extern int drm_crtc_force_disable_all(struct drm_device *dev);
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
- unsigned i2c_nack_count, i2c_defer_count;
+ /**
+ * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+ */
+ unsigned i2c_nack_count;
+ /**
+ * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+ */
+ unsigned i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
- struct edid *cached_edid; /* for DP logical ports - make tiling work */
+ /**
+ * @cached_edid: for DP logical ports - make tiling work by ensuring
+ * that the EDID for all connectors is read immediately.
+ */
+ struct edid *cached_edid;
+ /**
+ * @has_audio: Tracks whether the sink connector to this port is
+ * audio-capable.
+ */
bool has_audio;
};
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
- * @dev: device pointer for adding i2c devices etc.
- * @cbs: callbacks for connector addition and destruction.
- * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
- * @aux: aux channel for the DP connector.
- * @max_payloads: maximum number of payloads the GPU can generate.
- * @conn_base_id: DRM connector ID this mgr is connected to.
- * @down_rep_recv: msg receiver state for down replies.
- * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, dpcd.
- * @mst_state: if this manager is enabled for an MST capable port.
- * @mst_primary: pointer to the primary branch device.
- * @dpcd: cache of DPCD for primary port.
- * @pbn_div: PBN to slots divisor.
*
* This struct represents the toplevel displayport MST topology manager.
* There should be one instance of this for every MST capable DP connector
* on the GPU.
*/
struct drm_dp_mst_topology_mgr {
-
+ /**
+ * @dev: device pointer for adding i2c devices etc.
+ */
struct device *dev;
+ /**
+ * @cbs: callbacks for connector addition and destruction.
+ */
const struct drm_dp_mst_topology_cbs *cbs;
+ /**
+ * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
+ * in one go.
+ */
int max_dpcd_transaction_bytes;
- struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+ /**
+ * @aux: AUX channel for the DP MST connector this topolgy mgr is
+ * controlling.
+ */
+ struct drm_dp_aux *aux;
+ /**
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ */
int max_payloads;
+ /**
+ * @conn_base_id: DRM connector ID this mgr is connected to. Only used
+ * to build the MST connector path value.
+ */
int conn_base_id;
- /* only ever accessed from the workqueue - which should be serialised */
+ /**
+ * @down_rep_recv: Message receiver state for down replies. This and
+ * @up_req_recv are only ever access from the work item, which is
+ * serialised.
+ */
struct drm_dp_sideband_msg_rx down_rep_recv;
+ /**
+ * @up_req_recv: Message receiver state for up requests. This and
+ * @down_rep_recv are only ever access from the work item, which is
+ * serialised.
+ */
struct drm_dp_sideband_msg_rx up_req_recv;
- /* pointer to info about the initial MST device */
- struct mutex lock; /* protects mst_state + primary + dpcd */
+ /**
+ * @lock: protects mst state, primary, dpcd.
+ */
+ struct mutex lock;
+ /**
+ * @mst_state: If this manager is enabled for an MST capable port. False
+ * if no MST sink/branch devices is connected.
+ */
bool mst_state;
+ /**
+ * @mst_primary: Pointer to the primary/first branch device.
+ */
struct drm_dp_mst_branch *mst_primary;
+ /**
+ * @dpcd: Cache of DPCD for primary port.
+ */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ /**
+ * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+ */
u8 sink_count;
+ /**
+ * @pbn_div: PBN to slots divisor.
+ */
int pbn_div;
+ /**
+ * @total_slots: Total slots that can be allocated.
+ */
int total_slots;
+ /**
+ * @avail_slots: Still available slots that can be allocated.
+ */
int avail_slots;
+ /**
+ * @total_pbn: Total PBN count.
+ */
int total_pbn;
- /* messages to be transmitted */
- /* qlock protects the upq/downq and in_progress,
- the mstb tx_slots and txmsg->state once they are queued */
+ /**
+ * @qlock: protects @tx_msg_downq, the tx_slots in struct
+ * &drm_dp_mst_branch and txmsg->state once they are queued
+ */
struct mutex qlock;
+ /**
+ * @tx_msg_downq: List of pending down replies.
+ */
struct list_head tx_msg_downq;
- bool tx_down_in_progress;
- /* payload info + lock for it */
+ /**
+ * @payload_lock: Protect payload information.
+ */
struct mutex payload_lock;
+ /**
+ * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
+ * VCPI structure itself is embedded into the corresponding
+ * &drm_dp_mst_port structure.
+ */
struct drm_dp_vcpi **proposed_vcpis;
+ /**
+ * @payloads: Array of payloads.
+ */
struct drm_dp_payload *payloads;
+ /**
+ * @payload_mask: Elements of @payloads actually in use. Since
+ * reallocation of active outputs isn't possible gaps can be created by
+ * disabling outputs out of order compared to how they've been enabled.
+ */
unsigned long payload_mask;
+ /**
+ * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
+ */
unsigned long vcpi_mask;
+ /**
+ * @tx_waitq: Wait to queue stall for the tx worker.
+ */
wait_queue_head_t tx_waitq;
+ /**
+ * @work: Probe work.
+ */
struct work_struct work;
-
+ /**
+ * @tx_work: Sideband transmit worker. This can nest within the main
+ * @work worker for each transaction @work launches.
+ */
struct work_struct tx_work;
+ /**
+ * @destroy_connector_list: List of to be destroyed connectors.
+ */
struct list_head destroy_connector_list;
+ /**
+ * @destroy_connector_lock: Protects @connector_list.
+ */
struct mutex destroy_connector_lock;
+ /**
+ * @destroy_connector_work: Work item to destroy connectors. Needed to
+ * avoid locking inversion.
+ */
struct work_struct destroy_connector_work;
};
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
const struct drm_framebuffer_funcs *funcs);
--- /dev/null
+/*
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_IRQ_H_
+#define _DRM_IRQ_H_
+
+#include <linux/seqlock.h>
+
+/**
+ * struct drm_pending_vblank_event - pending vblank event tracking
+ */
+struct drm_pending_vblank_event {
+ /**
+ * @base: Base structure for tracking pending DRM events.
+ */
+ struct drm_pending_event base;
+ /**
+ * @pipe: drm_crtc_index() of the &drm_crtc this event is for.
+ */
+ unsigned int pipe;
+ /**
+ * @event: Actual event which will be sent to userspace.
+ */
+ struct drm_event_vblank event;
+};
+
+/**
+ * struct drm_vblank_crtc - vblank tracking for a CRTC
+ *
+ * This structure tracks the vblank state for one CRTC.
+ *
+ * Note that for historical reasons - the vblank handling code is still shared
+ * with legacy/non-kms drivers - this is a free-standing structure not directly
+ * connected to struct &drm_crtc. But all public interface functions are taking
+ * a struct &drm_crtc to hide this implementation detail.
+ */
+struct drm_vblank_crtc {
+ /**
+ * @dev: Pointer to the &drm_device.
+ */
+ struct drm_device *dev;
+ /**
+ * @queue: Wait queue for vblank waiters.
+ */
+ wait_queue_head_t queue; /**< VBLANK wait queue */
+ /**
+ * @disable_timer: Disable timer for the delayed vblank disabling
+ * hysteresis logic. Vblank disabling is controlled through the
+ * drm_vblank_offdelay module option and the setting of the
+ * max_vblank_count value in the &drm_device structure.
+ */
+ struct timer_list disable_timer;
+
+ /**
+ * @seqlock: Protect vblank count and time.
+ */
+ seqlock_t seqlock; /* protects vblank count and time */
+
+ /**
+ * @count: Current software vblank counter.
+ */
+ u32 count;
+ /**
+ * @time: Vblank timestamp corresponding to @count.
+ */
+ struct timeval time;
+
+ /**
+ * @refcount: Number of users/waiters of the vblank interrupt. Only when
+ * this refcount reaches 0 can the hardware interrupt be disabled using
+ * @disable_timer.
+ */
+ atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ /**
+ * @last: Protected by dev->vbl_lock, used for wraparound handling.
+ */
+ u32 last;
+ /**
+ * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
+ * For legacy driver bit 2 additionally tracks whether an additional
+ * temporary vblank reference has been acquired to paper over the
+ * hardware counter resetting/jumping. KMS drivers should instead just
+ * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
+ * save and restore the vblank count.
+ */
+ unsigned int inmodeset; /* Display driver is setting mode */
+ /**
+ * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
+ * structure.
+ */
+ unsigned int pipe;
+ /**
+ * @framedur_ns: Frame/Field duration in ns, used by
+ * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_calc_timestamping_constants().
+ */
+ int framedur_ns;
+ /**
+ * @linedur_ns: Line duration in ns, used by
+ * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_calc_timestamping_constants().
+ */
+ int linedur_ns;
+ /**
+ * @enabled: Tracks the enabling state of the corresponding &drm_crtc to
+ * avoid double-disabling and hence corrupting saved state. Needed by
+ * drivers not using atomic KMS, since those might go through their CRTC
+ * disabling functions multiple times.
+ */
+ bool enabled;
+};
+
+extern int drm_irq_install(struct drm_device *dev, int irq);
+extern int drm_irq_uninstall(struct drm_device *dev);
+
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ struct timeval *vblanktime);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
+extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() and related
+ * functions.
+ */
+static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
+
+#endif
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
-int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param);
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @priv_flags: Flags describing buffer object internal state.
+ * @moving: Fence set when BO is moving
* @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* Members protected by a bo reservation.
*/
- unsigned long priv_flags;
+ struct fence *moving;
struct drm_vma_offset_node vma_node;
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
+ *
+ * @placement: Return immediately if buffer is busy.
+ * @mem: The struct ttm_mem_reg indicating the region where the bo resides
+ * @new_flags: Describes compatible placement found
+ *
+ * Returns true if the placement is compatible
+ */
+extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags);
+
/**
* ttm_bo_validate
*
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
* @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type.
+ * @move: The fence of the last pipelined move operation.
*
* This structure is used to identify and manage memory types for a device.
* It's set up by the ttm_bo_driver::init_mem_type method.
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
bool io_reserve_fastpath;
+ spinlock_t move_lock;
/*
* Protected by @io_reserve_mutex:
*/
struct list_head lru;
+
+ /*
+ * Protected by @move_lock.
+ */
+ struct fence *move;
};
/**
#define TTM_NUM_MEM_TYPES 8
-#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
- idling before CPU mapping */
-#define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
*
*/
extern void ttm_tt_destroy(struct ttm_tt *ttm);
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-extern void ttm_tt_unbind(struct ttm_tt *ttm);
-
/**
* ttm_tt_swapin:
*
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
- bool evict, bool no_wait_gpu,
+ struct fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_pipeline_move.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Function for pipelining accelerated moves. Either free the memory
+ * immediately or hang it on a temporary buffer object.
+ */
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+ struct fence *fence, bool evict,
+ struct ttm_mem_reg *new_mem);
+
/**
* ttm_io_prot
*
#ifndef __ASM_ARM_KVM_PMU_H
#define __ASM_ARM_KVM_PMU_H
-#ifdef CONFIG_KVM_ARM_PMU
-
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+#ifdef CONFIG_KVM_ARM_PMU
+
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
-#include <linux/tty.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
return tsk->sessionid;
}
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
- struct tty_struct *tty = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&tsk->sighand->siglock, flags);
- if (tsk->signal)
- tty = tty_kref_get(tsk->signal->tty);
- spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
- return tty;
-}
-
-static inline void audit_put_tty(struct tty_struct *tty)
-{
- tty_kref_put(tty);
-}
-
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern void __audit_bprm(struct linux_binprm *bprm);
{
return -1;
}
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
- return NULL;
-}
-static inline void audit_put_tty(struct tty_struct *tty)
-{ }
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
#define BCMA_CORE_DEFAULT 0xFFF
#define BCMA_MAX_NR_CORES 16
+#define BCMA_CORE_SIZE 0x1000
/* Chip IDs of PCIe devices */
#define BCMA_CHIP_ID_BCM4313 0x4313
unsigned long limit;
unsigned long mm_flags;
loff_t written;
+ loff_t pos;
};
/*
BPF_WRITE = 2
};
+/* types of values stored in eBPF registers */
+enum bpf_reg_type {
+ NOT_INIT = 0, /* nothing was written into register */
+ UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ PTR_TO_CTX, /* reg points to bpf_context */
+ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
+ PTR_TO_MAP_VALUE, /* reg points to map element value */
+ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+ FRAME_PTR, /* reg == frame_pointer */
+ PTR_TO_STACK, /* reg == frame_pointer + imm */
+ CONST_IMM, /* constant integer value */
+
+ /* PTR_TO_PACKET represents:
+ * skb->data
+ * skb->data + imm
+ * skb->data + (u16) var
+ * skb->data + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) menas that 'imm' was added
+ */
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_END, /* skb->data + headlen */
+};
+
struct bpf_prog;
struct bpf_verifier_ops {
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
- bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+ bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
+
+static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */
+ /* unused */
#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
#else
static inline void disable_cpuidle(void) { }
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
#define DCACHE_OP_REAL 0x08000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR 0x20000000
extern seqlock_t rename_lock;
return inode;
}
+/**
+ * d_real_inode - Return the real inode
+ * @dentry: The dentry to query
+ *
+ * If dentry is on an union/overlay, then return the underlying, real inode.
+ * Otherwise return d_inode().
+ */
+static inline struct inode *d_real_inode(struct dentry *dentry)
+{
+ return d_backing_inode(d_real(dentry));
+}
+
#endif /* __LINUX_DCACHE_H */
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
- (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+ ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
(md) = (void *)(md) + (m)->desc_size)
/**
}
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
-int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+ return sk_filter_trim_cap(sk, skb, 1);
+}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze);
+ unsigned long address, bool freeze, struct page *page);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
if (pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
- false); \
+ false, NULL); \
} while (0)
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
+ * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
+ * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
*/
struct st_sensor_data {
struct device *dev;
const struct st_sensor_transfer_function *tf;
struct st_sensor_transfer_buffer tb;
+
+ bool hw_irq_trigger;
+ s64 hw_timestamp;
};
#ifdef CONFIG_IIO_BUFFER
const struct iio_trigger_ops *trigger_ops);
void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
-
+int st_sensors_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
#else
static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
return;
}
+#define st_sensors_validate_device NULL
#endif
int st_sensors_init_sensor(struct iio_dev *indio_dev,
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+ struct inet_diag_msg *r, int ext,
+ struct user_namespace *user_ns);
+
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
#endif /* _INET_DIAG_H_ */
#define INIT_TASK(tsk) \
{ \
.state = 0, \
- .stack = &init_thread_info, \
+ .stack = init_stack, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.prio = MAX_PRIO-20, \
#define __LINUX_ISA_H
#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
struct isa_driver {
#define to_isa_driver(x) container_of((x), struct isa_driver, driver)
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ISA_BUS_API
int isa_register_driver(struct isa_driver *, unsigned int);
void isa_unregister_driver(struct isa_driver *);
#else
static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
{
- return 0;
+ return -ENODEV;
}
static inline void isa_unregister_driver(struct isa_driver *d)
#include <linux/atomic.h>
+#ifdef HAVE_JUMP_LABEL
+
static inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ /*
+ * -1 means the first static_key_slow_inc() is in progress.
+ * static_key_enabled() must return true, so return 1 here.
+ */
+ int n = atomic_read(&key->enabled);
+ return n >= 0 ? n : 1;
}
-#ifdef HAVE_JUMP_LABEL
-
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
#else /* !HAVE_JUMP_LABEL */
+static inline int static_key_count(struct static_key *key)
+{
+ return atomic_read(&key->enabled);
+}
+
static __always_inline void jump_label_init(void)
{
static_key_initialized = true;
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
-void kasan_kfree(void *ptr);
+void kasan_poison_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object);
-void kasan_poison_slab_free(struct kmem_cache *s, void *object);
struct kasan_cache {
int alloc_meta_offset;
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
+size_t ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+
#else /* CONFIG_KASAN */
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
{
return false;
}
-static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+static inline void kasan_unpoison_slab(const void *ptr) { }
+
#endif /* CONFIG_KASAN */
#endif /* LINUX_KASAN_H */
#define LED_UNREGISTERING (1 << 1)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_ONESHOT (1 << 17)
-#define LED_BLINK_ONESHOT_STOP (1 << 18)
-#define LED_BLINK_INVERT (1 << 19)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20)
-#define LED_BLINK_DISABLE (1 << 21)
-#define LED_SYSFS_DISABLE (1 << 22)
-#define LED_DEV_CAP_FLASH (1 << 23)
-#define LED_HW_PLUGGABLE (1 << 24)
-#define LED_PANIC_INDICATOR (1 << 25)
+#define LED_BLINK_SW (1 << 17)
+#define LED_BLINK_ONESHOT (1 << 18)
+#define LED_BLINK_ONESHOT_STOP (1 << 19)
+#define LED_BLINK_INVERT (1 << 20)
+#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
+#define LED_BLINK_DISABLE (1 << 22)
+#define LED_SYSFS_DISABLE (1 << 23)
+#define LED_DEV_CAP_FLASH (1 << 24)
+#define LED_HW_PLUGGABLE (1 << 25)
+#define LED_PANIC_INDICATOR (1 << 26)
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
* and if both are zero then a sensible default should be chosen.
* The call should adjust the timings in that case and if it can't
* match the values specified exactly.
- * Deactivate blinking again when the brightness is set to a fixed
- * value via the brightness_set() callback.
+ * Deactivate blinking again when the brightness is set to LED_OFF
+ * via the brightness_set() callback.
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
#define MEM_CGROUP_ID_SHIFT 16
#define MEM_CGROUP_ID_MAX USHRT_MAX
+struct mem_cgroup_id {
+ int id;
+ atomic_t ref;
+};
+
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
struct mem_cgroup {
struct cgroup_subsys_state css;
+ /* Private memcg ID. Used to ID objects that outlive the cgroup */
+ struct mem_cgroup_id id;
+
/* Accounted resources */
struct page_counter memory;
struct page_counter swap;
if (mem_cgroup_disabled())
return 0;
- return memcg->css.id;
-}
-
-/**
- * mem_cgroup_from_id - look up a memcg from an id
- * @id: the id to look up
- *
- * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
- */
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
-{
- struct cgroup_subsys_state *css;
-
- css = css_from_id(id, &memory_cgrp_subsys);
- return mem_cgroup_from_css(css);
+ return memcg->id.id;
}
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
/**
* parent_mem_cgroup - find the accounting parent of a memcg
static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
unsigned reg_cnt, unsigned char *val)
{
- int ret;
+ int ret = 0;
int i;
for (i = 0; i < reg_cnt; i++) {
enum {
MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+ MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
u8 rsvd[8];
};
-#define MLX5_CMD_OP_MAX 0x920
-
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
void *uout;
int uout_size;
mlx5_cmd_cbk_t callback;
+ struct delayed_work cb_timeout_work;
void *context;
int idx;
struct completion done;
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
- MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
+ MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_MAX
};
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x1b];
+ u8 reserved_at_5[0x19];
+ u8 nic_vport_node_guid_modify[0x1];
+ u8 nic_vport_port_guid_modify[0x1];
u8 reserved_at_20[0x7e0];
};
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x19];
+ u8 reserved_at_0[0x16];
+ u8 node_guid[0x1];
+ u8 port_guid[0x1];
+ u8 reserved_at_18[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
enum {
MLX5_FENCE_MODE_NONE = 0 << 5,
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
+ MLX5_FENCE_MODE_FENCE = 2 << 5,
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
};
};
struct mlx5_qp_path {
- u8 fl;
+ u8 fl_free_ar;
u8 rsvd3;
- u8 free_ar;
- u8 pkey_index;
+ __be16 pkey_index;
u8 rsvd0;
u8 grh_mlid;
__be16 rlid;
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
+ u8 rsvd2[16];
};
struct mlx5_modify_qp_mbox_out {
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
}
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
- struct page *page, pte_t *pte, bool write, bool anon, bool old);
+ struct page *page, pte_t *pte, bool write, bool anon);
#endif
/*
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
net_ratelimit()) \
- __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
+ ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
}
+/* return true if dev can't cope with mtu frames that need vlan tag insertion */
+static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+{
+ /* TODO: reserve and use an additional IFF bit, if we get more users */
+ return dev->priv_flags & IFF_MACSEC;
+}
+
extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */
return NULL;
}
-static inline int of_parse_phandle_with_args(struct device_node *np,
+static inline int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name,
int index,
struct of_phandle_args;
struct device_node;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_PCI
int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_reserved_mem_device_init(struct device *dev);
void of_reserved_mem_device_release(struct device *dev);
+int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t start,
+ phys_addr_t end,
+ bool nomap,
+ phys_addr_t *res_base);
+
void fdt_init_reserved_mem(void);
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
unsigned *num_maps, enum pinctrl_map_type type);
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps);
static inline int pinconf_generic_dt_node_to_map_group(
struct pinctrl_dev *pctldev, struct device_node *np_config,
};
struct posix_acl {
- union {
- atomic_t a_refcount;
- struct rcu_head a_rcu;
- };
+ atomic_t a_refcount;
+ struct rcu_head a_rcu;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
};
if (!pwm)
return -EINVAL;
+ if (duty_ns < 0 || period_ns < 0)
+ return -EINVAL;
+
pwm_get_state(pwm, &state);
if (state.duty_cycle == duty_ns && state.period == period_ns)
return 0;
static inline void pwm_apply_args(struct pwm_device *pwm)
{
+ struct pwm_state state = { };
+
/*
* PWM users calling pwm_apply_args() expect to have a fresh config
* where the polarity and period are set according to pwm_args info.
* at startup (even if they are actually enabled), thus authorizing
* polarity setting.
*
- * Instead of setting ->enabled to false, we call pwm_disable()
- * before pwm_set_polarity() to ensure that everything is configured
- * as expected, and the PWM is really disabled when the user request
- * it.
+ * To fulfill this requirement, we apply a new state which disables
+ * the PWM device and set the reference period and polarity config.
*
* Note that PWM users requiring a smooth handover between the
* bootloader and the kernel (like critical regulators controlled by
* PWM devices) will have to switch to the atomic API and avoid calling
* pwm_apply_args().
*/
- pwm_disable(pwm);
- pwm_set_polarity(pwm, pwm->args.polarity);
+
+ state.enabled = false;
+ state.polarity = pwm->args.polarity;
+ state.period = pwm->args.period;
+
+ pwm_apply_state(pwm, &state);
}
struct pwm_lookup {
bool drop_ttl0;
u8 vport_id;
u16 mtu;
+ bool clear_stats;
};
struct qed_stop_rxq_params {
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
+ iter->tags = 0;
return NULL;
}
#endif /* CONFIG_RESET_CONTROLLER */
/**
- * reset_control_get - Lookup and obtain an exclusive reference to a
- * reset controller.
+ * reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ * to a reset controller.
* @dev: device to be reset by the controller
* @id: reset line name
*
*
* Use of id names is optional.
*/
-static inline struct reset_control *__must_check reset_control_get(
- struct device *dev, const char *id)
+static inline struct reset_control *
+__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
}
-static inline struct reset_control *reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
-}
-
/**
* reset_control_get_shared - Lookup and obtain a shared reference to a
* reset controller.
return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
}
+static inline struct reset_control *reset_control_get_optional_exclusive(
+ struct device *dev, const char *id)
+{
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
+}
+
+static inline struct reset_control *reset_control_get_optional_shared(
+ struct device *dev, const char *id)
+{
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
+}
+
/**
- * of_reset_control_get - Lookup and obtain an exclusive reference to a
- * reset controller.
+ * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ * to a reset controller.
* @node: device to be reset by the controller
* @id: reset line name
*
*
* Use of id names is optional.
*/
-static inline struct reset_control *of_reset_control_get(
+static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
return __of_reset_control_get(node, id, 0, 0);
}
/**
- * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
- * a reset controller by index.
+ * of_reset_control_get_shared - Lookup and obtain an shared reference
+ * to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_shared(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, 1);
+}
+
+/**
+ * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
+ * reference to a reset controller
+ * by index.
* @node: device to be reset by the controller
* @index: index of the reset controller
*
* in whatever order. Returns a struct reset_control or IS_ERR() condition
* containing errno.
*/
-static inline struct reset_control *of_reset_control_get_by_index(
+static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
return __of_reset_control_get(node, NULL, index, 0);
}
/**
- * devm_reset_control_get - resource managed reset_control_get()
- * @dev: device to be reset by the controller
- * @id: reset line name
+ * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ * reference to a reset controller
+ * by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
*
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
*/
-static inline struct reset_control *__must_check devm_reset_control_get(
- struct device *dev, const char *id)
-{
-#ifndef CONFIG_RESET_CONTROLLER
- WARN_ON(1);
-#endif
- return __devm_reset_control_get(dev, id, 0, 0);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
- struct device *dev, const char *id)
+static inline struct reset_control *of_reset_control_get_shared_by_index(
+ struct device_node *node, int index)
{
- return __devm_reset_control_get(dev, id, 0, 0);
+ return __of_reset_control_get(node, NULL, index, 1);
}
/**
- * devm_reset_control_get_by_index - resource managed reset_control_get
+ * devm_reset_control_get_exclusive - resource managed
+ * reset_control_get_exclusive()
* @dev: device to be reset by the controller
- * @index: index of the reset controller
+ * @id: reset line name
*
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * Managed reset_control_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
*/
-static inline struct reset_control *devm_reset_control_get_by_index(
- struct device *dev, int index)
+static inline struct reset_control *
+__must_check devm_reset_control_get_exclusive(struct device *dev,
+ const char *id)
{
- return __devm_reset_control_get(dev, NULL, index, 0);
+#ifndef CONFIG_RESET_CONTROLLER
+ WARN_ON(1);
+#endif
+ return __devm_reset_control_get(dev, id, 0, 0);
}
/**
return __devm_reset_control_get(dev, id, 0, 1);
}
+static inline struct reset_control *devm_reset_control_get_optional_exclusive(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional_shared(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 1);
+}
+
+/**
+ * devm_reset_control_get_exclusive_by_index - resource managed
+ * reset_control_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_exclusive(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
+{
+ return __devm_reset_control_get(dev, NULL, index, 0);
+}
+
/**
* devm_reset_control_get_shared_by_index - resource managed
* reset_control_get_shared
* this function, reset_control_put() is called automatically on driver detach.
* See reset_control_get_shared() for more information.
*/
-static inline struct reset_control *devm_reset_control_get_shared_by_index(
- struct device *dev, int index)
+static inline struct reset_control *
+devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
return __devm_reset_control_get(dev, NULL, index, 1);
}
+/*
+ * TEMPORARY calls to use during transition:
+ *
+ * of_reset_control_get() => of_reset_control_get_exclusive()
+ *
+ * These inline function calls will be removed once all consumers
+ * have been moved over to the new explicit API.
+ */
+static inline struct reset_control *reset_control_get(
+ struct device *dev, const char *id)
+{
+ return reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return reset_control_get_optional_exclusive(dev, id);
+}
+
+static inline struct reset_control *of_reset_control_get(
+ struct device_node *node, const char *id)
+{
+ return of_reset_control_get_exclusive(node, id);
+}
+
+static inline struct reset_control *of_reset_control_get_by_index(
+ struct device_node *node, int index)
+{
+ return of_reset_control_get_exclusive_by_index(node, index);
+}
+
+static inline struct reset_control *devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+ return devm_reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return devm_reset_control_get_optional_exclusive(dev, id);
+
+}
+
+static inline struct reset_control *devm_reset_control_get_by_index(
+ struct device *dev, int index)
+{
+ return devm_reset_control_get_exclusive_by_index(dev, index);
+}
#endif
/*
* rmap interfaces called when adding or removing pte of page
*/
-void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_move_anon_rmap(struct page *, struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long, bool);
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- return lockless_dereference(s)->sequence;
+ int seq = READ_ONCE(s->sequence);
+ /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
+ smp_read_barrier_depends();
+ return seq;
}
/**
* unsigned seq, idx;
*
* do {
- * seq = lockless_dereference(latch)->seq;
+ * seq = raw_read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
}
void __skb_get_hash(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen);
skb->csum = csum_partial(start, len, skb->csum);
}
+/**
+ * skb_push_rcsum - push skb and update receive checksum
+ * @skb: buffer to update
+ * @len: length of data pulled
+ *
+ * This function performs an skb_push on the packet and updates
+ * the CHECKSUM_COMPLETE checksum. It should be used on
+ * receive path processing instead of skb_push unless you know
+ * that the checksum difference is zero (e.g., a valid IP header)
+ * or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
+ unsigned int len)
+{
+ skb_push(skb, len);
+ skb_postpush_rcsum(skb, skb->data, len);
+ return skb->data;
+}
+
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
{
switch (sk->sk_family) {
case AF_INET:
+ if (sk->sk_type == SOCK_RAW)
+ return SKNLGRP_NONE;
+
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET_TCP_DESTROY;
return SKNLGRP_NONE;
}
case AF_INET6:
+ if (sk->sk_type == SOCK_RAW)
+ return SKNLGRP_NONE;
+
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET6_TCP_DESTROY;
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
- struct rpc_xprt *xprt);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
const struct rpc_program *, u32);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
struct net *xpt_net;
struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
+ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
size_t addrlen;
const char *servername;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
+ struct rpc_xprt_switch *bc_xps;
unsigned int flags;
};
* @get_trend: a pointer to a function that reads the sensor temperature trend.
* @set_emul_temp: a pointer to a function that sets sensor emulated
* temperature.
+ * @set_trip_temp: a pointer to a function that sets the trip temperature on
+ * hardware.
*/
struct thermal_zone_of_device_ops {
int (*get_temp)(void *, int *);
* PORTSCx
*/
/* HOSTPC: offset 0x84 */
- u32 hostpc[1]; /* HOSTPC extension */
+ u32 hostpc[0]; /* HOSTPC extension */
#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
#define HOSTPC_PSPD (3<<25) /* Port speed detection */
- u32 reserved5[16];
+ u32 reserved5[17];
/* USBMODE_EX: offset 0xc8 */
u32 usbmode_ex; /* USB Device mode extension */
* @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
* this driver will be bound to any available UDC.
* @pending: UDC core private data used for deferred probe of this driver.
+ * @match_existing_only: If udc is not found, return an error and don't add this
+ * gadget driver to list of pending driver
*
* Devices are disabled till a gadget driver successfully bind()s, which
* means the driver will handle setup() requests needed to enumerate (and
char *udc_name;
struct list_head pending;
+ unsigned match_existing_only:1;
};
};
#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
-void musb_mailbox(enum musb_vbus_id_status status);
+int musb_mailbox(enum musb_vbus_id_status status);
#else
-static inline void musb_mailbox(enum musb_vbus_id_status status)
+static inline int musb_mailbox(enum musb_vbus_id_status status)
{
+ return 0;
}
#endif
/*
* v4l2-mc.h - Media Controller V4L2 types and prototypes
*
- * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
* Copyright (C) 2006-2010 Nokia Corporation
* Copyright (c) 2016 Intel Corporation.
*
#define BOND_DEFAULT_MIIMON 100
+#ifndef __long_aligned
+#define __long_aligned __attribute__((aligned((sizeof(long)))))
+#endif
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
struct reciprocal_value reciprocal_packets_per_slave;
u16 ad_actor_sys_prio;
u16 ad_user_port_key;
- u8 ad_actor_system[ETH_ALEN];
+
+ /* 2 bytes of padding : see ether_addr_equal_64bits() */
+ u8 ad_actor_system[ETH_ALEN + 2];
};
struct bond_parm_tbl {
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err, __be16 proto);
+ bool *csum_err, __be16 proto, int nhs);
static inline int gre_calc_hlen(__be16 o_flags)
{
return min(dst->dev->mtu, IP_MAX_MTU);
}
-static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ const struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
-
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
const char *ip_vs_state_name(__u16 proto, int state);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-int ip_vs_check_template(struct ip_vs_conn *ct);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
int ip_vs_conn_init(void);
void ip_vs_conn_cleanup(void);
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+ long timeout = (long)ct->timeout.expires - (long)jiffies;
+
+ return timeout > 0 ? timeout : 0;
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
struct nf_hook_ops *ops);
};
-void nf_register_queue_handler(const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(void);
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(struct net *net);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
struct nft_set;
struct nft_set_iter {
+ u8 genmask;
unsigned int count;
unsigned int skip;
int err;
struct proc_dir_entry;
struct nf_logger;
+struct nf_queue_handler;
struct netns_nf {
#if defined CONFIG_PROC_FS
struct proc_dir_entry *proc_netfilter;
#endif
+ const struct nf_queue_handler __rcu *queue_handler;
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
};
};
-static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev,
+ const struct tcf_proto *tp, u32 flags)
{
+ const struct Qdisc *sch = tp->q;
+ const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
+
if (!(dev->features & NETIF_F_HW_TC))
return false;
-
if (flags & TCA_CLS_FLAGS_SKIP_HW)
return false;
-
if (!dev->netdev_ops->ndo_setup_tc)
return false;
+ if (cops && cops->tcf_cl_offload)
+ return cops->tcf_cl_offload(tp->classid);
return true;
}
/* Filter manipulation */
struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
if (!sch->gso_skb) {
sch->gso_skb = sch->dequeue(sch);
- if (sch->gso_skb)
+ if (sch->gso_skb) {
/* it's still part of the queue */
+ qdisc_qstats_backlog_inc(sch, sch->gso_skb);
sch->q.qlen++;
+ }
}
return sch->gso_skb;
if (skb) {
sch->gso_skb = NULL;
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
skb = sch->dequeue(sch);
*/
void sock_gen_put(struct sock *sk);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+ unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested)
+{
+ return __sk_receive_skb(sk, skb, nested, 1);
+}
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
- u32 ageing_time; /* BRIDGE_AGEING_TIME */
+ clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
} u;
};
int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
int (*decode)(struct sk_buff *, void *, u16 len);
int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
- int (*alloc)(struct tcf_meta_info *, void *);
+ int (*alloc)(struct tcf_meta_info *, void *, gfp_t);
void (*release)(struct tcf_meta_info *);
int (*validate)(void *val, int len);
struct module *owner;
int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
const void *dval);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
IB_DEVICE_CROSS_CHANNEL = (1 << 27),
IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
- IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
+ IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
- IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
- IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
+ IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
+ IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
};
enum ib_signature_prot_cap {
/*
* Allocate a private queue pair data structure for driver specific
- * information which is opaque to rdmavt.
+ * information which is opaque to rdmavt. Errors are returned via
+ * ERR_PTR(err). The driver is free to return NULL or a valid
+ * pointer.
*/
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
gfp_t gfp);
int channels;
};
+struct hdmi_codec_pdata;
struct hdmi_codec_ops {
/*
* Called when ASoC starts an audio stream setup.
* Optional
*/
- int (*audio_startup)(struct device *dev);
+ int (*audio_startup)(struct device *dev, void *data);
/*
* Configures HDMI-encoder for audio stream.
* Mandatory
*/
- int (*hw_params)(struct device *dev,
+ int (*hw_params)(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
* Shuts down the audio stream.
* Mandatory
*/
- void (*audio_shutdown)(struct device *dev);
+ void (*audio_shutdown)(struct device *dev, void *data);
/*
* Mute/unmute HDMI audio stream.
* Optional
*/
- int (*digital_mute)(struct device *dev, bool enable);
+ int (*digital_mute)(struct device *dev, void *data, bool enable);
/*
* Provides EDID-Like-Data from connected HDMI device.
* Optional
*/
- int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+ int (*get_eld)(struct device *dev, void *data,
+ uint8_t *buf, size_t len);
};
/* HDMI codec initalization data */
uint i2s:1;
uint spdif:1;
int max_i2s_channels;
+ void *data;
};
#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
+struct drm_amdgpu_query_fw {
+ /** AMDGPU_INFO_FW_* */
+ __u32 fw_type;
+ /**
+ * Index of the IP if there are more IPs of
+ * the same type.
+ */
+ __u32 ip_instance;
+ /**
+ * Index of the engine. Whether this is used depends
+ * on the firmware type. (e.g. MEC, SDMA)
+ */
+ __u32 index;
+ __u32 _pad;
+};
+
/* Input structure for the INFO ioctl */
struct drm_amdgpu_info {
/* Where the return value will be stored */
__u32 flags;
} read_mmr_reg;
- struct {
- /** AMDGPU_INFO_FW_* */
- __u32 fw_type;
- /**
- * Index of the IP if there are more IPs of
- * the same type.
- */
- __u32 ip_instance;
- /**
- * Index of the engine. Whether this is used depends
- * on the firmware type. (e.g. MEC, SDMA)
- */
- __u32 index;
- __u32 _pad;
- } query_fw;
+ struct drm_amdgpu_query_fw query_fw;
};
};
struct drm_msm_timespec timeout; /* in */
};
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
+#define __MSM_MADV_PURGED 2 /* internal state */
+
+struct drm_msm_gem_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, MSM_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
#define DRM_MSM_GEM_CPU_FINI 0x05
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
-#define DRM_MSM_NUM_IOCTLS 0x08
+#define DRM_MSM_GEM_MADVISE 0x08
+#define DRM_MSM_NUM_IOCTLS 0x09
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#if defined(__cplusplus)
}
#define DRM_VC4_MMAP_BO 0x04
#define DRM_VC4_CREATE_SHADER_BO 0x05
#define DRM_VC4_GET_HANG_STATE 0x06
+#define DRM_VC4_GET_PARAM 0x07
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
+#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
__u32 pad[16];
};
+#define DRM_VC4_PARAM_V3D_IDENT0 0
+#define DRM_VC4_PARAM_V3D_IDENT1 1
+#define DRM_VC4_PARAM_V3D_IDENT2 2
+#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+
+struct drm_vc4_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
#if defined(__cplusplus)
}
#endif
--- /dev/null
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH 0x1
+#define DRM_VGEM_FENCE_SIGNAL 0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+ __u32 handle;
+ __u32 flags;
+#define VGEM_FENCE_WRITE 0x1
+ __u32 out_fence;
+ __u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+ __u32 fence;
+ __u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
header-y += hw_breakpoint.h
header-y += l2tp.h
header-y += libc-compat.h
+header-y += lirc.h
header-y += limits.h
header-y += llc.h
header-y += loop.h
};
union {
char name[BTRFS_SUBVOL_NAME_MAX + 1];
- u64 devid;
+ __u64 devid;
};
};
*
* 7.24
* - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
+ *
+ * 7.25
+ * - add FUSE_PARALLEL_DIROPS
*/
#ifndef _LINUX_FUSE_H
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 24
+#define FUSE_KERNEL_MINOR_VERSION 25
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
* FUSE_ASYNC_DIO: asynchronous direct I/O submission
* FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
* FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
+ * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
#define FUSE_ASYNC_DIO (1 << 15)
#define FUSE_WRITEBACK_CACHE (1 << 16)
#define FUSE_NO_OPEN_SUPPORT (1 << 17)
+#define FUSE_PARALLEL_DIROPS (1 << 18)
/**
* CUSE INIT request/reply flags
#ifndef _UAPI_LINUX_GTP_H_
-#define _UAPI_LINUX_GTP_H__
+#define _UAPI_LINUX_GTP_H_
enum gtp_genl_cmds {
GTP_CMD_NEWPDP,
#define KEY_KBDINPUTASSIST_ACCEPT 0x264
#define KEY_KBDINPUTASSIST_CANCEL 0x265
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP 0x266
+#define KEY_RIGHT_DOWN 0x267
+#define KEY_LEFT_UP 0x268
+#define KEY_LEFT_DOWN 0x269
+
+#define KEY_ROOT_MENU 0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU 0x26b
+#define KEY_NUMERIC_11 0x26c
+#define KEY_NUMERIC_12 0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC 0x26e
+#define KEY_3D_MODE 0x26f
+#define KEY_NEXT_FAVORITE 0x270
+#define KEY_STOP_RECORD 0x271
+#define KEY_PAUSE_RECORD 0x272
+#define KEY_VOD 0x273 /* Video on Demand */
+#define KEY_UNMUTE 0x274
+#define KEY_FASTREVERSE 0x275
+#define KEY_SLOWREVERSE 0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA 0x275
+
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
#define SW_ROTATE_LOCK 0x0c /* set = rotate locked/disabled */
#define SW_LINEIN_INSERT 0x0d /* set = inserted */
#define SW_MUTE_DEVICE 0x0e /* set = device disabled */
+#define SW_PEN_INSERTED 0x0f /* set = pen inserted */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)
#define BUS_ATARI 0x1B
#define BUS_SPI 0x1C
#define BUS_RMI 0x1D
+#define BUS_CEC 0x1E
/*
* MT_TOOL types
header-y += xt_NFQUEUE.h
header-y += xt_RATEEST.h
header-y += xt_SECMARK.h
+header-y += xt_SYNPROXY.h
header-y += xt_TCPMSS.h
header-y += xt_TCPOPTSTRIP.h
header-y += xt_TEE.h
#ifndef _XT_SYNPROXY_H
#define _XT_SYNPROXY_H
+#include <linux/types.h>
+
#define XT_SYNPROXY_OPT_MSS 0x01
#define XT_SYNPROXY_OPT_WSCALE 0x02
#define XT_SYNPROXY_OPT_SACK_PERM 0x04
# UAPI Header export list
header-y += asequencer.h
+header-y += asoc.h
header-y += asound.h
header-y += asound_fm.h
header-y += compress_offload.h
header-y += hdspm.h
header-y += sb16_csp.h
header-y += sfnt_info.h
+header-y += tlv.h
+header-y += usb_stream.h
struct dmfc_channel;
int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc);
void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
- unsigned long bandwidth_mbs, int burstsize);
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
void ipu_dmfc_put(struct dmfc_channel *dmfc);
config KALLSYMS_ABSOLUTE_PERCPU
bool
+ depends on KALLSYMS
default X86_64 && SMP
config KALLSYMS_BASE_RELATIVE
}
# if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
{
}
#endif
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
#endif
- thread_info_cache_init();
+ thread_stack_cache_init();
cred_init();
fork_init();
proc_caches_init();
{
struct blacklist_entry *entry;
char fn_name[KSYM_SYMBOL_LEN];
+ unsigned long addr;
if (list_empty(&blacklisted_initcalls))
return false;
- sprint_symbol_no_offset(fn_name, (unsigned long)fn);
+ addr = (unsigned long) dereference_function_descriptor(fn);
+ sprint_symbol_no_offset(fn_name, addr);
list_for_each_entry(entry, &blacklisted_initcalls, next) {
if (!strcmp(fn_name, entry->buf)) {
audit_log_format(ab, " exe=(null)");
}
+struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+ struct tty_struct *tty = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ if (tsk->signal)
+ tty = tty_kref_get(tsk->signal->tty);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ return tty;
+}
+
+void audit_put_tty(struct tty_struct *tty)
+{
+ tty_kref_put(tty);
+}
+
void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
{
const struct cred *cred;
#include <linux/audit.h>
#include <linux/skbuff.h>
#include <uapi/linux/mqueue.h>
+#include <linux/tty.h>
/* AUDIT_NAMES is the number of slots we reserve in the audit_context
* for saving names from getname(). If we get more names we will allocate
extern void audit_log_d_path_exe(struct audit_buffer *ab,
struct mm_struct *mm);
+extern struct tty_struct *audit_get_tty(struct task_struct *tsk);
+extern void audit_put_tty(struct tty_struct *tty);
+
/* audit watch functions */
#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
#include <asm/unistd.h>
#include <linux/security.h>
#include <linux/list.h>
-#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
if (!audit_enabled)
return;
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+ if (!ab)
+ return;
+
uid = from_kuid(&init_user_ns, task_uid(current));
oldloginuid = from_kuid(&init_user_ns, koldloginuid);
loginuid = from_kuid(&init_user_ns, kloginuid),
tty = audit_get_tty(current);
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
- if (!ab)
- return;
audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
audit_log_task_context(ab);
audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
* are set to NOT_INIT to indicate that they are no longer readable.
*/
-/* types of values stored in eBPF registers */
-enum bpf_reg_type {
- NOT_INIT = 0, /* nothing was written into register */
- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
- PTR_TO_CTX, /* reg points to bpf_context */
- CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
- PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
- FRAME_PTR, /* reg == frame_pointer */
- PTR_TO_STACK, /* reg == frame_pointer + imm */
- CONST_IMM, /* constant integer value */
-
- /* PTR_TO_PACKET represents:
- * skb->data
- * skb->data + imm
- * skb->data + (u16) var
- * skb->data + (u16) var + imm
- * if (range > 0) then [ptr, ptr + range - off) is safe to access
- * if (id > 0) means that some 'var' was added
- * if (off > 0) menas that 'imm' was added
- */
- PTR_TO_PACKET,
- PTR_TO_PACKET_END, /* skb->data + headlen */
-};
-
struct reg_state {
enum bpf_reg_type type;
union {
/* check access to 'struct bpf_context' fields */
static int check_ctx_access(struct verifier_env *env, int off, int size,
- enum bpf_access_type t)
+ enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
if (env->prog->aux->ops->is_valid_access &&
- env->prog->aux->ops->is_valid_access(off, size, t)) {
+ env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
mark_reg_unknown_value(state->regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
+ enum bpf_reg_type reg_type = UNKNOWN_VALUE;
+
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
- err = check_ctx_access(env, off, size, t);
+ err = check_ctx_access(env, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown_value(state->regs, value_regno);
- if (off == offsetof(struct __sk_buff, data) &&
- env->allow_ptr_leaks)
+ if (env->allow_ptr_leaks)
/* note that reg.[id|off|range] == 0 */
- state->regs[value_regno].type = PTR_TO_PACKET;
- else if (off == offsetof(struct __sk_buff, data_end) &&
- env->allow_ptr_leaks)
- state->regs[value_regno].type = PTR_TO_PACKET_END;
+ state->regs[value_regno].type = reg_type;
}
} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
static void put_css_set(struct css_set *cset)
{
+ unsigned long flags;
+
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irqsave(&css_set_lock, flags);
put_css_set_locked(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irqrestore(&css_set_lock, flags);
}
/*
/* First see if we already have a cgroup group that matches
* the desired set */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = find_existing_css_set(old_cset, cgrp, template);
if (cset)
get_css_set(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (cset)
return cset;
* find_existing_css_set() */
memcpy(cset->subsys, template, sizeof(cset->subsys));
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
css_get(css);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return cset;
}
* Release all the links from cset_links to this hierarchy's
* root cgroup
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
list_del(&link->cset_link);
kfree(link);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (!list_empty(&root->root_list)) {
list_del(&root->root_list);
ss->root = dst_root;
css->cgroup = dcgrp;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist)
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* default hierarchy doesn't enable controllers by default */
dst_root->subsys_mask |= 1 << ssid;
if (!buf)
return -ENOMEM;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (len >= PATH_MAX)
len = -ERANGE;
{
struct task_struct *p, *g;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
if (use_task_css_set_links)
goto out_unlock;
* entry won't be deleted though the process has exited.
* Do it while holding siglock so that we don't end up
* racing against cgroup_exit().
+ *
+ * Interrupts were already disabled while acquiring
+ * the css_set_lock, so we do not need to disable it
+ * again when acquiring the sighand->siglock here.
*/
- spin_lock_irq(&p->sighand->siglock);
+ spin_lock(&p->sighand->siglock);
if (!(p->flags & PF_EXITING)) {
struct css_set *cset = task_css_set(p);
list_add_tail(&p->cg_list, &cset->tasks);
get_css_set(cset);
}
- spin_unlock_irq(&p->sighand->siglock);
+ spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
out_unlock:
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
static void init_cgroup_housekeeping(struct cgroup *cgrp)
* Link the root cgroup in this hierarchy into all the css_set
* objects.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist) {
link_css_set(&tmp_links, cset, root_cgrp);
if (css_set_populated(cset))
cgroup_update_populated(root_cgrp, true);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1);
struct cgroup *cgrp;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgrp = cset_cgroup_from_root(ns->root_cset, root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
char *ret;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
return ret;
char *path = NULL;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
path = buf;
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
return path;
}
* the new cgroup. There are no failure cases after here, so this
* is the commit point.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(cset, &tset->src_csets, mg_node) {
list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
struct css_set *from_cset = task_css_set(task);
put_css_set_locked(from_cset);
}
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/*
* Migration is committed, all target tasks are now on dst_csets.
}
} while_each_subsys_mask();
out_release_tset:
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_splice_init(&tset->dst_csets, &tset->src_csets);
list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
list_del_init(&cset->mg_node);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return ret;
}
lockdep_assert_held(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cgrp = NULL;
list_del_init(&cset->mg_preload_node);
put_css_set_locked(cset);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/**
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return cgroup_taskset_migrate(&tset, root);
}
return -EBUSY;
/* look up all src csets */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(&preloaded_csets);
struct cgroup *cgrp;
struct inode *inode;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
while (!cgroup_is_descendant(dst_cgrp, cgrp))
cgrp = cgroup_parent(cgrp);
if (root == &cgrp_dfl_root)
continue;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
from_cgrp = task_cgroup_from_root(from, root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval)
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
struct cgrp_cset_link *link;
cgroup_migrate_add_src(link->cset, dsct,
&preloaded_csets);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&preloaded_csets);
if (ret)
goto out_finish;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
struct task_struct *task, *ntask;
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
cgroup_taskset_add(task, &tset);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
ret = cgroup_taskset_migrate(&tset, cgrp->root);
out_finish:
int count = 0;
struct cgrp_cset_link *link;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
count += atomic_read(&link->cset->refcount);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return count;
}
memset(it, 0, sizeof(*it));
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
it->ss = css->ss;
css_task_iter_advance_css_set(it);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/**
it->cur_task = NULL;
}
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct,
css_task_iter_advance(it);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return it->cur_task;
}
void css_task_iter_end(struct css_task_iter *it)
{
if (it->cur_cset) {
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_del(&it->iters_node);
put_css_set_locked(it->cur_cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
if (it->cur_task)
mutex_lock(&cgroup_mutex);
/* all tasks in @from are being moved, all csets are source */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
ret = cgroup_migrate_prepare_dst(&preloaded_csets);
if (ret)
memset(css, 0, sizeof(*css));
css->cgroup = cgrp;
css->ss = ss;
+ css->id = -1;
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
if (err < 0)
- goto err_free_percpu_ref;
+ goto err_free_css;
css->id = err;
/* @css is ready to be brought online now, make it visible */
err_list_del:
list_del_rcu(&css->sibling);
- cgroup_idr_remove(&ss->css_idr, css->id);
-err_free_percpu_ref:
- percpu_ref_exit(&css->refcnt);
err_free_css:
call_rcu(&css->rcu_head, css_free_rcu_fn);
return ERR_PTR(err);
*/
cgrp->self.flags &= ~CSS_ONLINE;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
link->cset->dead = true;
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* initiate massacre of all css's */
for_each_css(css, ssid, cgrp)
goto out;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
for_each_root(root) {
struct cgroup_subsys *ss;
retval = 0;
out_unlock:
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
kfree(buf);
out:
if (use_task_css_set_links) {
struct css_set *cset;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
if (list_empty(&child->cg_list)) {
get_css_set(cset);
css_set_move_task(child, NULL, cset, false);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/*
cset = task_css_set(tsk);
if (!list_empty(&tsk->cg_list)) {
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
} else {
get_css_set(cset);
}
if (!pathbuf || !agentbuf)
goto out;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (!path)
goto out;
return ERR_PTR(-EPERM);
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
get_css_set(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
new_ns = alloc_cgroup_ns();
if (!name_buf)
return -ENOMEM;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
cset = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
c->root->hierarchy_id, name_buf);
}
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
kfree(name_buf);
return 0;
}
struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
overflow:
seq_puts(seq, " ...\n");
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return 0;
}
.teardown = takedown_cpu,
.cant_stop = true,
},
+#else
+ [CPUHP_BRINGUP_CPU] = { },
#endif
};
return event->state == PERF_EVENT_STATE_DEAD;
}
-static inline int pmu_filter_match(struct perf_event *event)
+static inline int __pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
return pmu->filter_match ? pmu->filter_match(event) : 1;
}
+/*
+ * Check whether we should attempt to schedule an event group based on
+ * PMU-specific filtering. An event group can consist of HW and SW events,
+ * potentially with a SW leader, so we must check all the filters, to
+ * determine whether a group is schedulable:
+ */
+static inline int pmu_filter_match(struct perf_event *event)
+{
+ struct perf_event *child;
+
+ if (!__pmu_filter_match(event))
+ return 0;
+
+ list_for_each_entry(child, &event->sibling_list, group_entry) {
+ if (!__pmu_filter_match(child))
+ return 0;
+ }
+
+ return 1;
+}
+
static inline int
event_filter_match(struct perf_event *event)
{
if (event->ctx)
put_ctx(event->ctx);
- if (event->pmu) {
- exclusive_event_destroy(event);
- module_put(event->pmu->module);
- }
+ exclusive_event_destroy(event);
+ module_put(event->pmu->module);
call_rcu(&event->rcu_head, free_event_rcu);
}
prog = event->tp_event->prog;
if (prog) {
event->tp_event->prog = NULL;
- bpf_prog_put(prog);
+ bpf_prog_put_rcu(prog);
}
}
}
#endif
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
{
}
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
return page ? page_address(page) : NULL;
}
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
{
- struct page *page = virt_to_page(ti);
+ struct page *page = virt_to_page(stack);
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-(1 << THREAD_SIZE_ORDER));
__free_kmem_pages(page, THREAD_SIZE_ORDER);
}
# else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
- return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+ return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
{
- kmem_cache_free(thread_info_cache, ti);
+ kmem_cache_free(thread_stack_cache, stack);
}
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
+ BUG_ON(thread_stack_cache == NULL);
}
# endif
#endif
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
{
- struct zone *zone = page_zone(virt_to_page(ti));
+ struct zone *zone = page_zone(virt_to_page(stack));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
- arch_release_thread_info(tsk->stack);
- free_thread_info(tsk->stack);
+ arch_release_thread_stack(tsk->stack);
+ free_thread_stack(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
- struct thread_info *ti;
+ unsigned long *stack;
int err;
if (node == NUMA_NO_NODE)
if (!tsk)
return NULL;
- ti = alloc_thread_info_node(tsk, node);
- if (!ti)
+ stack = alloc_thread_stack_node(tsk, node);
+ if (!stack)
goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
if (err)
- goto free_ti;
+ goto free_stack;
- tsk->stack = ti;
+ tsk->stack = stack;
#ifdef CONFIG_SECCOMP
/*
* We must handle setting up seccomp filters once we're under
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
- account_kernel_stack(ti, 1);
+ account_kernel_stack(stack, 1);
kcov_task_init(tsk);
return tsk;
-free_ti:
- free_thread_info(ti);
+free_stack:
+ free_thread_stack(stack);
free_tsk:
free_task_struct(tsk);
return NULL;
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
- struct page *page;
+ struct page *page, *tail;
struct address_space *mapping;
int err, ro = 0;
* considered here and page lock forces unnecessarily serialization
* From this point on, mapping will be re-verified if necessary and
* page lock will be acquired only if it is unavoidable
- */
+ *
+ * Mapping checks require the head page for any compound page so the
+ * head page and mapping is looked up now. For anonymous pages, it
+ * does not matter if the page splits in the future as the key is
+ * based on the address. For filesystem-backed pages, the tail is
+ * required as the index of the page determines the key. For
+ * base pages, there is no tail page and tail == page.
+ */
+ tail = page;
page = compound_head(page);
mapping = READ_ONCE(page->mapping);
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.inode = inode;
- key->shared.pgoff = basepage_index(page);
+ key->shared.pgoff = basepage_index(tail);
rcu_read_unlock();
}
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
void static_key_slow_inc(struct static_key *key)
{
+ int v, v1;
+
STATIC_KEY_CHECK_USE();
- if (atomic_inc_not_zero(&key->enabled))
- return;
+
+ /*
+ * Careful if we get concurrent static_key_slow_inc() calls;
+ * later calls must wait for the first one to _finish_ the
+ * jump_label_update() process. At the same time, however,
+ * the jump_label_update() call below wants to see
+ * static_key_enabled(&key) for jumps to be updated properly.
+ *
+ * So give a special meaning to negative key->enabled: it sends
+ * static_key_slow_inc() down the slow path, and it is non-zero
+ * so it counts as "enabled" in jump_label_update(). Note that
+ * atomic_inc_unless_negative() checks >= 0, so roll our own.
+ */
+ for (v = atomic_read(&key->enabled); v > 0; v = v1) {
+ v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
+ if (likely(v1 == v))
+ return;
+ }
jump_label_lock();
- if (atomic_inc_return(&key->enabled) == 1)
+ if (atomic_read(&key->enabled) == 0) {
+ atomic_set(&key->enabled, -1);
jump_label_update(key);
+ atomic_set(&key->enabled, 1);
+ } else {
+ atomic_inc(&key->enabled);
+ }
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
{
+ /*
+ * The negative count check is valid even when a negative
+ * key->enabled is in use by static_key_slow_inc(); a
+ * __static_key_slow_dec() before the first static_key_slow_inc()
+ * returns is unbalanced, because all other static_key_slow_inc()
+ * instances block while the update is in progress.
+ */
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
static int __init kcov_init(void)
{
- if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
+ /*
+ * The kcov debugfs file won't ever get removed and thus,
+ * there is no need to protect it against removal races. The
+ * use of debugfs_create_file_unsafe() is actually safe here.
+ */
+ if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
pr_err("failed to create kcov in debugfs\n");
return -ENOMEM;
}
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti)
+ struct task_struct *task)
{
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
/* Mark the current thread as blocked on the lock: */
- ti->task->blocked_on = waiter;
+ task->blocked_on = waiter;
}
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti)
+ struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
- ti->task->blocked_on = NULL;
+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+ task->blocked_on = NULL;
list_del_init(&waiter->list);
waiter->task = NULL;
extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
- struct thread_info *ti);
+ struct task_struct *task);
extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti);
+ struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
if (!hold_ctx)
return 0;
- if (unlikely(ctx == hold_ctx))
- return -EALREADY;
-
if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
(ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
#ifdef CONFIG_DEBUG_MUTEXES
unsigned long flags;
int ret;
+ if (use_ww_ctx) {
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+ return -EALREADY;
+ }
+
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
goto skip_wait;
debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+ debug_mutex_add_waiter(lock, &waiter, task);
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
}
__set_task_state(task, TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current_thread_info());
+ mutex_remove_waiter(lock, &waiter, task);
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
return 0;
err:
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ mutex_remove_waiter(lock, &waiter, task);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
do { spin_lock(lock); (void)(flags); } while (0)
#define spin_unlock_mutex(lock, flags) \
do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
__list_del((waiter)->list.prev, (waiter)->list.next)
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif
+/*
+ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
+ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
+ *
+ * This means that the store can be delayed, but no later than the
+ * store-release from the unlock. This means that simply observing
+ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
+ *
+ * There are two paths that can issue the unordered store:
+ *
+ * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
+ *
+ * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
+ * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
+ *
+ * However, in both cases we have other !0 state we've set before to queue
+ * ourseves:
+ *
+ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
+ * load is constrained by that ACQUIRE to not pass before that, and thus must
+ * observe the store.
+ *
+ * For (2) we have a more intersting scenario. We enqueue ourselves using
+ * xchg_tail(), which ends up being a RELEASE. This in itself is not
+ * sufficient, however that is followed by an smp_cond_acquire() on the same
+ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
+ * guarantees we must observe that store.
+ *
+ * Therefore both cases have other !0 state that is observable before the
+ * unordered locked byte store comes through. This means we can use that to
+ * wait for the lock store, and then wait for an unlock.
+ */
+#ifndef queued_spin_unlock_wait
+void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ u32 val;
+
+ for (;;) {
+ val = atomic_read(&lock->val);
+
+ if (!val) /* not locked, we're done */
+ goto done;
+
+ if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
+ break;
+
+ /* not locked, but pending, wait until we observe the lock */
+ cpu_relax();
+ }
+
+ /* any unlock is good */
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+
+done:
+ smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+}
+EXPORT_SYMBOL(queued_spin_unlock_wait);
+#endif
+
#endif /* _GEN_PV_LOCK_SLOWPATH */
/**
if (!error && !oom_killer_disable())
error = -EBUSY;
+ /*
+ * There is a hard to fix race between oom_reaper kernel thread
+ * and oom_killer_disable. oom_reaper calls exit_oom_victim
+ * before the victim reaches exit_mm so try to freeze all the tasks
+ * again and catch such a left over task.
+ */
+ if (!error) {
+ pr_info("Double checking all user space processes after OOM killer disable... ");
+ error = try_to_freeze_tasks(true);
+ pr_cont("\n");
+ }
+
if (error)
thaw_processes();
return error;
kref_put(&chan->kref, relay_destroy_channel);
mutex_unlock(&relay_channels_mutex);
+ kfree(chan);
return NULL;
}
EXPORT_SYMBOL_GPL(relay_open);
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
- if (!cpu_active(dest_cpu))
+ if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+ continue;
+ if (!cpu_online(dest_cpu))
continue;
goto out;
}
#endif
#endif
+#ifdef CONFIG_SCHEDSTATS
+
DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+static bool __initdata __sched_schedstats = false;
-#ifdef CONFIG_SCHEDSTATS
static void set_schedstats(bool enabled)
{
if (enabled)
if (!str)
goto out;
+ /*
+ * This code is called before jump labels have been set up, so we can't
+ * change the static branch directly just yet. Instead set a temporary
+ * variable so init_schedstats() can do it later.
+ */
if (!strcmp(str, "enable")) {
- set_schedstats(true);
+ __sched_schedstats = true;
ret = 1;
} else if (!strcmp(str, "disable")) {
- set_schedstats(false);
+ __sched_schedstats = false;
ret = 1;
}
out:
}
__setup("schedstats=", setup_schedstats);
+static void __init init_schedstats(void)
+{
+ set_schedstats(__sched_schedstats);
+}
+
#ifdef CONFIG_PROC_SYSCTL
int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
set_schedstats(state);
return err;
}
-#endif
-#endif
+#endif /* CONFIG_PROC_SYSCTL */
+#else /* !CONFIG_SCHEDSTATS */
+static inline void init_schedstats(void) {}
+#endif /* CONFIG_SCHEDSTATS */
/*
* fork()/clone()-time setup:
*/
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
- /* Post initialize new task's util average when its cfs_rq is set */
+ rq = __task_rq_lock(p, &rf);
post_init_entity_util_avg(&p->se);
- rq = __task_rq_lock(p, &rf);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
static inline void schedule_debug(struct task_struct *prev)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
- BUG_ON(task_stack_end_corrupted(prev));
+ if (task_stack_end_corrupted(prev))
+ panic("corrupted stack end detected inside scheduler\n");
#endif
if (unlikely(in_atomic_preempt_off())) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
+ * Also, reset softlockup watchdogs on all CPUs, because
+ * another CPU might be blocked waiting for us to process
+ * an IPI.
*/
touch_nmi_watchdog();
+ touch_all_softlockup_watchdogs();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
}
- touch_all_softlockup_watchdogs();
-
#ifdef CONFIG_SCHED_DEBUG
if (!state_filter)
sysrq_sched_debug_show();
/*
* Since this CPU is going 'away' for a while, fold any nr_active delta
* we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable.
+ * nr_active count is stable. We need to take the teardown thread which
+ * is calling this into account, so we hand in adjust = 1 to the load
+ * calculation.
*
* Also see the comment "Global load-average calculations".
*/
static void calc_load_migrate(struct rq *rq)
{
- long delta = calc_load_fold_active(rq);
+ long delta = calc_load_fold_active(rq, 1);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}
#endif
init_sched_fair_class();
+ init_schedstats();
+
scheduler_running = 1;
}
SPLIT_NS(p->se.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
-#ifdef CONFIG_SCHEDSTATS
- if (schedstat_enabled()) {
- SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
- SPLIT_NS(p->se.statistics.wait_sum),
- SPLIT_NS(p->se.sum_exec_runtime),
- SPLIT_NS(p->se.statistics.sum_sleep_runtime));
- }
-#else
+
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
- 0LL, 0L,
+ SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
SPLIT_NS(p->se.sum_exec_runtime),
- 0LL, 0L);
-#endif
+ SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
+
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
}
}
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
#else
void init_entity_runnable_average(struct sched_entity *se)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long tg_weight;
+ long tg_weight, load, shares;
/*
- * Use this CPU's real-time load instead of the last load contribution
- * as the updating of the contribution is delayed, and we will use the
- * the real-time load to calc the share. See update_tg_load_avg().
+ * This really should be: cfs_rq->avg.load_avg, but instead we use
+ * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+ * the shares for small weight interactive tasks.
*/
- tg_weight = atomic_long_read(&tg->load_avg);
- tg_weight -= cfs_rq->tg_load_avg_contrib;
- tg_weight += cfs_rq->load.weight;
+ load = scale_load_down(cfs_rq->load.weight);
- return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
- long tg_weight, load, shares;
+ tg_weight = atomic_long_read(&tg->load_avg);
- tg_weight = calc_tg_weight(tg, cfs_rq);
- load = cfs_rq->load.weight;
+ /* Ensure tg_weight >= load */
+ tg_weight -= cfs_rq->tg_load_avg_contrib;
+ tg_weight += load;
shares = (tg->shares * load);
if (tg_weight)
return tg->shares;
}
# endif /* CONFIG_SMP */
+
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
}
}
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ typeof(*ptr) val = (_val); \
+ typeof(*ptr) res, var = READ_ONCE(*ptr); \
+ res = var - val; \
+ if (res > var) \
+ res = 0; \
+ WRITE_ONCE(*ptr, res); \
+} while (0)
+
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
- sa->load_avg = max_t(long, sa->load_avg - r, 0);
- sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+ sub_positive(&sa->load_avg, r);
+ sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
removed_load = 1;
}
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
- sa->util_avg = max_t(long, sa->util_avg - r, 0);
- sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+ sub_positive(&sa->util_avg, r);
+ sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
removed_util = 1;
}
&se->avg, se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
- cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
- cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
- cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
- cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+ sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+ sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+ sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+ sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
cfs_rq_util_change(cfs_rq);
}
trace_sched_stat_iowait_enabled() ||
trace_sched_stat_blocked_enabled() ||
trace_sched_stat_runtime_enabled()) {
- pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+ printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
"stat_blocked and stat_runtime require the "
"kernel parameter schedstats=enabled or "
"kernel.sched_schedstats=1\n");
if (!cfs_bandwidth_used())
return;
+ /* Synchronize hierarchical throttle counter: */
+ if (unlikely(!cfs_rq->throttle_uptodate)) {
+ struct rq *rq = rq_of(cfs_rq);
+ struct cfs_rq *pcfs_rq;
+ struct task_group *tg;
+
+ cfs_rq->throttle_uptodate = 1;
+
+ /* Get closest up-to-date node, because leaves go first: */
+ for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+ pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+ if (pcfs_rq->throttle_uptodate)
+ break;
+ }
+ if (tg) {
+ cfs_rq->throttle_count = pcfs_rq->throttle_count;
+ cfs_rq->throttled_clock_task = rq_clock_task(rq);
+ }
+ }
+
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
+ /* Avoid re-evaluating load for this entity: */
+ se = parent_entity(se);
/*
* Bias pick_next to pick a task from this cfs_rq, as
* p is sleeping when it is within its sched_slice.
*/
- if (task_sleep && parent_entity(se))
- set_next_buddy(parent_entity(se));
-
- /* avoid re-evaluating load for this entity */
- se = parent_entity(se);
+ if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+ set_next_buddy(se);
break;
}
flags |= DEQUEUE_SLEEP;
return wl;
for_each_sched_entity(se) {
- long w, W;
+ struct cfs_rq *cfs_rq = se->my_q;
+ long W, w = cfs_rq_load_avg(cfs_rq);
- tg = se->my_q->tg;
+ tg = cfs_rq->tg;
/*
* W = @wg + \Sum rw_j
*/
- W = wg + calc_tg_weight(tg, se->my_q);
+ W = wg + atomic_long_read(&tg->load_avg);
+
+ /* Ensure \Sum rw_j >= rw_i */
+ W -= cfs_rq->tg_load_avg_contrib;
+ W += w;
/*
* w = rw_i + @wl
*/
- w = cfs_rq_load_avg(se->my_q) + wl;
+ w += wl;
/*
* wl = S * s'_i; see (2)
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
- struct cfs_rq *cfs_rq;
struct sched_entity *se;
+ struct cfs_rq *cfs_rq;
+ struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
+
+ raw_spin_lock_irq(&rq->lock);
post_init_entity_util_avg(se);
+ raw_spin_unlock_irq(&rq->lock);
}
return 1;
*/
static void cpuidle_idle_call(void)
{
- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
loads[2] = (avenrun[2] + offset) << shift;
}
-long calc_load_fold_active(struct rq *this_rq)
+long calc_load_fold_active(struct rq *this_rq, long adjust)
{
long nr_active, delta = 0;
- nr_active = this_rq->nr_running;
+ nr_active = this_rq->nr_running - adjust;
nr_active += (long)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
* We're going into NOHZ mode, if there's any pending delta, fold it
* into the pending idle delta.
*/
- delta = calc_load_fold_active(this_rq);
+ delta = calc_load_fold_active(this_rq, 0);
if (delta) {
int idx = calc_load_write_idx();
if (time_before(jiffies, this_rq->calc_load_update))
return;
- delta = calc_load_fold_active(this_rq);
+ delta = calc_load_fold_active(this_rq, 0);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
extern atomic_long_t calc_load_tasks;
extern void calc_global_load_tick(struct rq *this_rq);
-extern long calc_load_fold_active(struct rq *this_rq);
+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
#ifdef CONFIG_SMP
extern void cpu_load_update_active(struct rq *this_rq);
u64 throttled_clock, throttled_clock_task;
u64 throttled_clock_task_time;
- int throttled, throttle_count;
+ int throttled, throttle_count, throttle_uptodate;
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
+# define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0)
+
#else /* !CONFIG_SCHEDSTATS */
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
# define schedstat_inc(rq, field) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0)
# define schedstat_set(var, val) do { } while (0)
+# define schedstat_val(rq, field) 0
#endif
#ifdef CONFIG_SCHED_INFO
timer->it.cpu.expires = 0;
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
+ return;
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
unlock_task_sighand(p, &flags);
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
- file = (struct file *)array->ptrs[index];
+ file = READ_ONCE(array->ptrs[index]);
if (unlikely(!file))
return -ENOENT;
event->pmu->count)
return -EINVAL;
+ if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
+ event->attr.type != PERF_TYPE_RAW))
+ return -EINVAL;
+
/*
* we don't know if the function is run successfully by the
* return value. It can be judged in other places, such as
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
- file = (struct file *)array->ptrs[index];
+ file = READ_ONCE(array->ptrs[index]);
if (unlikely(!file))
return -ENOENT;
}
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
-static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
/* check bounds */
if (off < 0 || off >= sizeof(struct pt_regs))
}
}
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
{
struct trace_bprintk_fmt *pos;
+
+ if (!fmt)
+ return ERR_PTR(-EINVAL);
+
list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
if (!strcmp(pos->fmt, fmt))
return pos;
for (iter = start; iter < end; iter++) {
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
if (tb_fmt) {
- *iter = tb_fmt->fmt;
+ if (!IS_ERR(tb_fmt))
+ *iter = tb_fmt->fmt;
continue;
}
if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
return;
- /* is @cpu the only online CPU? */
cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
- if (cpumask_weight(&cpumask) != 1)
- return;
/* as we're called from CPU_ONLINE, the following shouldn't fail */
for_each_pool_worker(worker, pool)
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
- pool->attrs->cpumask) < 0);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
}
/*
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+ if (!isolated)
+ break;
+
total_isolated += isolated;
+ cc->nr_freepages += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
page++;
}
-
- /* If a page was split, advance to the end of it */
- if (isolated) {
- cc->nr_freepages += isolated;
- if (!strict &&
- cc->nr_migratepages <= cc->nr_freepages) {
- blockpfn += isolated;
- break;
- }
-
- blockpfn += isolated - 1;
- cursor += isolated - 1;
- continue;
+ if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+ blockpfn += isolated;
+ break;
}
+ /* Advance to the end of split page */
+ blockpfn += isolated - 1;
+ cursor += isolated - 1;
+ continue;
isolate_fail:
if (strict)
}
+ if (locked)
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+
/*
* There is a tiny chance that we have read bogus compound_order(),
* so be careful to not go outside of the pageblock.
if (strict && blockpfn < end_pfn)
total_isolated = 0;
- if (locked)
- spin_unlock_irqrestore(&cc->zone->lock, flags);
-
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
-
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need
continue;
/* Found a block suitable for isolating free pages from. */
- isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freelist, false);
+ isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+ freelist, false);
/*
- * If we isolated enough freepages, or aborted due to async
- * compaction being contended, terminate the loop.
- * Remember where the free scanner should restart next time,
- * which is where isolate_freepages_block() left off.
- * But if it scanned the whole pageblock, isolate_start_pfn
- * now points at block_end_pfn, which is the start of the next
- * pageblock.
- * In that case we will however want to restart at the start
- * of the previous pageblock.
+ * If we isolated enough freepages, or aborted due to lock
+ * contention, terminate.
*/
if ((cc->nr_freepages >= cc->nr_migratepages)
|| cc->contended) {
- if (isolate_start_pfn >= block_end_pfn)
+ if (isolate_start_pfn >= block_end_pfn) {
+ /*
+ * Restart at previous pageblock if more
+ * freepages can be isolated next time.
+ */
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
+ }
break;
- } else {
+ } else if (isolate_start_pfn < block_end_pfn) {
/*
- * isolate_freepages_block() should not terminate
- * prematurely unless contended, or isolated enough
+ * If isolation failed early, do not continue
+ * needlessly.
*/
- VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+ break;
}
}
*/
start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
end_index = (endbyte >> PAGE_SHIFT);
+ if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+ /* First page is tricky as 0 - 1 = -1, but pgoff_t
+ * is unsigned, so the end_index >= start_index
+ * check below would be true and we'll discard the whole
+ * file cache which is not what was asked.
+ */
+ if (end_index == 0)
+ break;
+
+ end_index--;
+ }
if (end_index >= start_index) {
unsigned long count = invalidate_mapping_pages(mapping,
if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--;
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
- do_set_pte(vma, addr, page, pte, false, false, true);
+ do_set_pte(vma, addr, page, pte, false, false);
unlock_page(page);
goto next;
unlock:
if (next - addr != HPAGE_PMD_SIZE) {
get_page(page);
spin_unlock(ptl);
- if (split_huge_page(page)) {
- put_page(page);
- unlock_page(page);
- goto out_unlocked;
- }
+ split_huge_page(page);
put_page(page);
unlock_page(page);
- ret = 1;
goto out_unlocked;
}
}
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze)
+ unsigned long address, bool freeze, struct page *page)
{
spinlock_t *ptl;
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
ptl = pmd_lock(mm, pmd);
+
+ /*
+ * If caller asks to setup a migration entries, we need a page to check
+ * pmd against. Otherwise we can end up replacing wrong page.
+ */
+ VM_BUG_ON(freeze && !page);
+ if (page && page != pmd_page(*pmd))
+ goto out;
+
if (pmd_trans_huge(*pmd)) {
- struct page *page = pmd_page(*pmd);
+ page = pmd_page(*pmd);
if (PageMlocked(page))
clear_page_mlock(page);
} else if (!pmd_devmap(*pmd))
return;
pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
- return;
- /*
- * If caller asks to setup a migration entries, we need a page to check
- * pmd against. Otherwise we can end up replacing wrong page.
- */
- VM_BUG_ON(freeze && !page);
- if (page && page != pmd_page(*pmd))
- return;
-
- /*
- * Caller holds the mmap_sem write mode or the anon_vma lock,
- * so a huge pmd cannot materialize from under us (khugepaged
- * holds both the mmap_sem write mode and the anon_vma lock
- * write mode).
- */
- __split_huge_pmd(vma, pmd, address, freeze);
+ __split_huge_pmd(vma, pmd, address, freeze, page);
}
void vma_adjust_trans_huge(struct vm_area_struct *vma,
* Only the process that called mmap() has reserves for
* private mappings.
*/
- if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
- return true;
+ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ /*
+ * Like the shared case above, a hole punch or truncate
+ * could have been performed on the private mapping.
+ * Examine the value of chg to determine if reserves
+ * actually exist or were previously consumed.
+ * Very Subtle - The value of chg comes from a previous
+ * call to vma_needs_reserves(). The reserve map for
+ * private mappings has different (opposite) semantics
+ * than that of shared mappings. vma_needs_reserves()
+ * has already taken this difference in semantics into
+ * account. Therefore, the meaning of chg is the same
+ * as in the shared case above. Code could easily be
+ * combined, but keeping it separate draws attention to
+ * subtle differences.
+ */
+ if (chg)
+ return false;
+ else
+ return true;
+ }
return false;
}
int nr_pages = 1 << order;
struct page *p = page + 1;
+ atomic_set(compound_mapcount_ptr(page), 0);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
clear_compound_head(p);
set_page_refcounted(p);
if (vma->vm_flags & VM_MAYSHARE)
return ret;
+ else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+ /*
+ * In most cases, reserves always exist for private mappings.
+ * However, a file associated with mapping could have been
+ * hole punched or truncated after reserves were consumed.
+ * As subsequent fault on such a range will not use reserves.
+ * Subtle - The reserve map for private mappings has the
+ * opposite meaning than that of shared mappings. If NO
+ * entry is in the reserve map, it means a reservation exists.
+ * If an entry exists in the reserve map, it means the
+ * reservation has already been consumed. As a result, the
+ * return value of this routine is the opposite of the
+ * value returned from reserve map manipulation routines above.
+ */
+ if (ret)
+ return 0;
+ else
+ return 1;
+ }
else
return ret < 0 ? ret : 0;
}
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
- page_move_anon_rmap(old_page, vma, address);
+ page_move_anon_rmap(old_page, vma);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
if (saddr) {
spte = huge_pte_offset(svma->vm_mm, saddr);
if (spte) {
- mm_inc_nr_pmds(mm);
get_page(virt_to_page(spte));
break;
}
if (pud_none(*pud)) {
pud_populate(mm, pud,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
+ mm_inc_nr_pmds(mm);
} else {
put_page(virt_to_page(spte));
- mm_inc_nr_pmds(mm);
}
spin_unlock(ptl);
out:
*/
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
- __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
+ __GFP_ATOMIC)
/* The GFP flags allowed during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
kasan_kmalloc(cache, object, cache->object_size, flags);
}
-void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
+static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
{
unsigned long size = cache->object_size;
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
kasan_kmalloc(page->slab_cache, object, size, flags);
}
-void kasan_kfree(void *ptr)
+void kasan_poison_kfree(void *ptr)
{
struct page *page;
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
else
- kasan_slab_free(page->slab_cache, ptr);
+ kasan_poison_slab_free(page->slab_cache, ptr);
}
void kasan_kfree_large(const void *ptr)
static int __init kasan_memhotplug_init(void)
{
- pr_err("WARNING: KASAN doesn't support memory hot-add\n");
- pr_err("Memory hot-add will be disabled\n");
+ pr_info("WARNING: KASAN doesn't support memory hot-add\n");
+ pr_info("Memory hot-add will be disabled\n");
hotplug_memory_notifier(kasan_mem_notifier, 0);
struct qlist_head *to,
struct kmem_cache *cache)
{
- struct qlist_node *prev = NULL, *curr;
+ struct qlist_node *curr;
if (unlikely(qlist_empty(from)))
return;
curr = from->head;
+ qlist_init(from);
while (curr) {
- struct qlist_node *qlink = curr;
- struct kmem_cache *obj_cache = qlink_to_cache(qlink);
-
- if (obj_cache == cache) {
- if (unlikely(from->head == qlink)) {
- from->head = curr->next;
- prev = curr;
- } else
- prev->next = curr->next;
- if (unlikely(from->tail == qlink))
- from->tail = curr->next;
- from->bytes -= cache->size;
- qlist_put(to, qlink, cache->size);
- } else {
- prev = curr;
- }
- curr = curr->next;
+ struct qlist_node *next = curr->next;
+ struct kmem_cache *obj_cache = qlink_to_cache(curr);
+
+ if (obj_cache == cache)
+ qlist_put(to, curr, obj_cache->size);
+ else
+ qlist_put(from, curr, obj_cache->size);
+
+ curr = next;
}
}
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
seq_printf(seq, " hex dump (first %zu bytes):\n", len);
+ kasan_disable_current();
seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+ kasan_enable_current();
}
/*
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
{
- if (!current->memcg_may_oom || current->memcg_in_oom)
+ if (!current->memcg_may_oom)
return;
/*
* We are in the middle of the charge context here, so we
{ }, /* terminate */
};
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+ atomic_inc(&memcg->id.ref);
+}
+
+static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+ if (atomic_dec_and_test(&memcg->id.ref)) {
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
+ memcg->id.id = 0;
+
+ /* Memcg ID pins CSS */
+ css_put(&memcg->css);
+ }
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return idr_find(&mem_cgroup_idr, id);
+}
+
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
if (!memcg)
return NULL;
+ memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+ 1, MEM_CGROUP_ID_MAX,
+ GFP_KERNEL);
+ if (memcg->id.id < 0)
+ goto fail;
+
memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
if (!memcg->stat)
goto fail;
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
+ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
return memcg;
fail:
+ if (memcg->id.id > 0)
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
mem_cgroup_free(memcg);
return NULL;
}
return &memcg->css;
fail:
mem_cgroup_free(memcg);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
- if (css->id > MEM_CGROUP_ID_MAX)
- return -ENOSPC;
-
+ /* Online state pins memcg ID, memcg ID pins CSS */
+ mem_cgroup_id_get(mem_cgroup_from_css(css));
+ css_get(css);
return 0;
}
memcg_offline_kmem(memcg);
wb_memcg_offline(memcg);
+
+ mem_cgroup_id_put(memcg);
}
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
struct mem_cgroup *memcg;
unsigned int nr_pages;
bool compound;
+ unsigned long flags;
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
commit_charge(newpage, memcg, false);
- local_irq_disable();
+ local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
memcg_check_events(memcg, newpage);
- local_irq_enable();
+ local_irq_restore(flags);
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
if (!memcg)
return;
+ mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, page, false, -1);
memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put(&memcg->css);
}
/*
!page_counter_try_charge(&memcg->swap, 1, &counter))
return -ENOMEM;
+ mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
- css_get(&memcg->css);
return 0;
}
page_counter_uncharge(&memcg->memsw, 1);
}
mem_cgroup_swap_statistics(memcg, false);
- css_put(&memcg->css);
+ mem_cgroup_id_put(memcg);
}
rcu_read_unlock();
}
* Protected against the rmap code by
* the page lock.
*/
- page_move_anon_rmap(compound_head(old_page),
- vma, address);
+ page_move_anon_rmap(old_page, vma);
}
unlock_page(old_page);
return wp_page_reuse(mm, vma, address, page_table, ptl,
* vm_ops->map_pages.
*/
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
- struct page *page, pte_t *pte, bool write, bool anon, bool old)
+ struct page *page, pte_t *pte, bool write, bool anon)
{
pte_t entry;
entry = mk_pte(page, vma->vm_page_prot);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (old)
- entry = pte_mkold(entry);
if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address, false);
update_mmu_cache(vma, address, pte);
}
-/*
- * If architecture emulates "accessed" or "young" bit without HW support,
- * there is no much gain with fault_around.
- */
static unsigned long fault_around_bytes __read_mostly =
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
- PAGE_SIZE;
-#else
rounddown_pow_of_two(65536);
-#endif
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (!pte_same(*pte, orig_pte))
- goto unlock_out;
do_fault_around(vma, address, pte, pgoff, flags);
- /* Check if the fault is handled by faultaround */
- if (!pte_same(*pte, orig_pte)) {
- /*
- * Faultaround produce old pte, but the pte we've
- * handler fault for should be young.
- */
- pte_t entry = pte_mkyoung(*pte);
- if (ptep_set_access_flags(vma, address, pte, entry, 0))
- update_mmu_cache(vma, address, pte);
+ if (!pte_same(*pte, orig_pte))
goto unlock_out;
- }
pte_unmap_unlock(pte, ptl);
}
put_page(fault_page);
return ret;
}
- do_set_pte(vma, address, fault_page, pte, false, false, false);
+ do_set_pte(vma, address, fault_page, pte, false, false);
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
}
goto uncharge_out;
}
- do_set_pte(vma, address, new_page, pte, true, true, false);
+ do_set_pte(vma, address, new_page, pte, true, true);
mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
put_page(fault_page);
return ret;
}
- do_set_pte(vma, address, fault_page, pte, true, false, false);
+ do_set_pte(vma, address, fault_page, pte, true, false);
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
static void kasan_poison_element(mempool_t *pool, void *element)
{
- if (pool->alloc == mempool_alloc_slab)
- kasan_poison_slab_free(pool->pool_data, element);
- if (pool->alloc == mempool_kmalloc)
- kasan_kfree(element);
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ kasan_poison_kfree(element);
if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{
- if (pool->alloc == mempool_alloc_slab)
- kasan_slab_alloc(pool->pool_data, element, flags);
- if (pool->alloc == mempool_kmalloc)
- kasan_krealloc(element, (size_t)pool->pool_data, flags);
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ kasan_unpoison_slab(element);
if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
return MIGRATEPAGE_SUCCESS;
}
+EXPORT_SYMBOL(migrate_page_move_mapping);
/*
* The expected number of remaining references is the same as that
mem_cgroup_migrate(page, newpage);
}
+EXPORT_SYMBOL(migrate_page_copy);
/************************************************************
* Migration functions
p = find_lock_task_mm(tsk);
if (!p)
goto unlock_oom;
-
mm = p->mm;
- if (!atomic_inc_not_zero(&mm->mm_users)) {
- task_unlock(p);
- goto unlock_oom;
- }
-
+ atomic_inc(&mm->mm_users);
task_unlock(p);
if (!down_read_trylock(&mm->mmap_sem)) {
struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
unsigned long bytes = vm_dirty_bytes;
unsigned long bg_bytes = dirty_background_bytes;
- unsigned long ratio = vm_dirty_ratio;
- unsigned long bg_ratio = dirty_background_ratio;
+ /* convert ratios to per-PAGE_SIZE for higher precision */
+ unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
+ unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
unsigned long thresh;
unsigned long bg_thresh;
struct task_struct *tsk;
/*
* The byte settings can't be applied directly to memcg
* domains. Convert them to ratios by scaling against
- * globally available memory.
+ * globally available memory. As the ratios are in
+ * per-PAGE_SIZE, they can be obtained by dividing bytes by
+ * number of pages.
*/
if (bytes)
- ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
- global_avail, 100UL);
+ ratio = min(DIV_ROUND_UP(bytes, global_avail),
+ PAGE_SIZE);
if (bg_bytes)
- bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
- global_avail, 100UL);
+ bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
+ PAGE_SIZE);
bytes = bg_bytes = 0;
}
if (bytes)
thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
else
- thresh = (ratio * available_memory) / 100;
+ thresh = (ratio * available_memory) / PAGE_SIZE;
if (bg_bytes)
bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
else
- bg_thresh = (bg_ratio * available_memory) / 100;
+ bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
- if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+ int nid = early_pfn_to_nid(pfn);
+
+ if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
return true;
return false;
spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
if (nid < 0)
- nid = 0;
+ nid = first_online_node;
spin_unlock(&early_pfn_lock);
return nid;
.nr_entries = page_ext->nr_entries,
.entries = &page_ext->trace_entries[0],
};
- gfp_t gfp_mask = page_ext->gfp_mask;
- int mt = gfpflags_to_migratetype(gfp_mask);
+ gfp_t gfp_mask;
+ int mt;
if (unlikely(!page_ext)) {
pr_alert("There is not page extension available.\n");
return;
}
+ gfp_mask = page_ext->gfp_mask;
+ mt = gfpflags_to_migratetype(gfp_mask);
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
pr_alert("page_owner info is not active (free page?)\n");
int map_used; /* # of map entries used before the sentry */
int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */
- struct work_struct map_extend_work;/* async ->map[] extension */
+ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
void *data; /* chunk data */
int first_free; /* no free below this */
static int pcpu_reserved_chunk_limit;
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+/* chunks which need their map areas extended, protected by pcpu_lock */
+static LIST_HEAD(pcpu_map_extend_chunks);
+
/*
* The number of empty populated pages, protected by pcpu_lock. The
* reserved chunk doesn't contribute to the count.
{
int margin, new_alloc;
+ lockdep_assert_held(&pcpu_lock);
+
if (is_atomic) {
margin = 3;
if (chunk->map_alloc <
- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
- pcpu_async_enabled)
- schedule_work(&chunk->map_extend_work);
+ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
+ if (list_empty(&chunk->map_extend_list)) {
+ list_add_tail(&chunk->map_extend_list,
+ &pcpu_map_extend_chunks);
+ pcpu_schedule_balance_work();
+ }
+ }
} else {
margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
}
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
+ lockdep_assert_held(&pcpu_alloc_mutex);
+
new = pcpu_mem_zalloc(new_size);
if (!new)
return -ENOMEM;
return 0;
}
-static void pcpu_map_extend_workfn(struct work_struct *work)
-{
- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
- map_extend_work);
- int new_alloc;
-
- spin_lock_irq(&pcpu_lock);
- new_alloc = pcpu_need_to_extend(chunk, false);
- spin_unlock_irq(&pcpu_lock);
-
- if (new_alloc)
- pcpu_extend_area_map(chunk, new_alloc);
-}
-
/**
* pcpu_fit_in_area - try to fit the requested allocation in a candidate area
* @chunk: chunk the candidate area belongs to
chunk->map_used = 1;
INIT_LIST_HEAD(&chunk->list);
- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
+ INIT_LIST_HEAD(&chunk->map_extend_list);
chunk->free_size = pcpu_unit_size;
chunk->contig_hint = pcpu_unit_size;
return NULL;
}
+ if (!is_atomic)
+ mutex_lock(&pcpu_alloc_mutex);
+
spin_lock_irqsave(&pcpu_lock, flags);
/* serve reserved allocations from the reserved chunk if available */
if (is_atomic)
goto fail;
- mutex_lock(&pcpu_alloc_mutex);
-
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
chunk = pcpu_create_chunk();
if (!chunk) {
- mutex_unlock(&pcpu_alloc_mutex);
err = "failed to allocate new chunk";
goto fail;
}
spin_lock_irqsave(&pcpu_lock, flags);
}
- mutex_unlock(&pcpu_alloc_mutex);
goto restart;
area_found:
if (!is_atomic) {
int page_start, page_end, rs, re;
- mutex_lock(&pcpu_alloc_mutex);
-
page_start = PFN_DOWN(off);
page_end = PFN_UP(off + size);
spin_lock_irqsave(&pcpu_lock, flags);
if (ret) {
- mutex_unlock(&pcpu_alloc_mutex);
pcpu_free_area(chunk, off, &occ_pages);
err = "failed to populate";
goto fail_unlock;
/* see the flag handling in pcpu_blance_workfn() */
pcpu_atomic_alloc_failed = true;
pcpu_schedule_balance_work();
+ } else {
+ mutex_unlock(&pcpu_alloc_mutex);
}
return NULL;
}
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
continue;
+ list_del_init(&chunk->map_extend_list);
list_move(&chunk->list, &to_free);
}
pcpu_destroy_chunk(chunk);
}
+ /* service chunks which requested async area map extension */
+ do {
+ int new_alloc = 0;
+
+ spin_lock_irq(&pcpu_lock);
+
+ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
+ struct pcpu_chunk, map_extend_list);
+ if (chunk) {
+ list_del_init(&chunk->map_extend_list);
+ new_alloc = pcpu_need_to_extend(chunk, false);
+ }
+
+ spin_unlock_irq(&pcpu_lock);
+
+ if (new_alloc)
+ pcpu_extend_area_map(chunk, new_alloc);
+ } while (chunk);
+
/*
* Ensure there are certain number of free populated pages for
* atomic allocs. Fill up from the most packed so that atomic
*/
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list);
- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
+ INIT_LIST_HEAD(&schunk->map_extend_list);
schunk->base_addr = base_addr;
schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap);
if (dyn_size) {
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list);
- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
+ INIT_LIST_HEAD(&dchunk->map_extend_list);
dchunk->base_addr = base_addr;
dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap);
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
* @vma: the vma the page belongs to
- * @address: the user virtual address mapped
*
* When a page belongs exclusively to one process after a COW event,
* that page can be moved into the anon_vma that belongs to just that
* process, so the rmap code will not search the parent or sibling
* processes.
*/
-void page_move_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
+ page = compound_head(page);
+
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_VMA(!anon_vma, vma);
- if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
- address &= HPAGE_PMD_MASK;
- VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
/*
goto out;
}
- pte = page_check_address(page, mm, address, &ptl, 0);
+ pte = page_check_address(page, mm, address, &ptl,
+ PageTransCompound(page));
if (!pte)
goto out;
error = shmem_getpage(inode, index, &page, SGP_FALLOC);
if (error) {
/* Remove the !PageUptodate pages we added */
- shmem_undo_range(inode,
- (loff_t)start << PAGE_SHIFT,
- (loff_t)index << PAGE_SHIFT, true);
+ if (index > start) {
+ shmem_undo_range(inode,
+ (loff_t)start << PAGE_SHIFT,
+ ((loff_t)index << PAGE_SHIFT) - 1, true);
+ }
goto undone;
}
goto out_unlock;
cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
- cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
- css->id, memcg_name_buf);
+ cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+ css->serial_nr, memcg_name_buf);
if (!cache_name)
goto out_unlock;
get_page(page);
local_irq_save(flags);
pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_move_tail(pvec);
local_irq_restore(flags);
}
struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
get_page(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
put_cpu_var(activate_page_pvecs);
}
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
get_page(page);
- if (!pagevec_space(pvec))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
- pagevec_add(pvec, page);
put_cpu_var(lru_add_pvec);
}
if (likely(get_page_unless_zero(page))) {
struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
put_cpu_var(lru_deactivate_file_pvecs);
}
struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
get_page(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
put_cpu_var(lru_deactivate_pvecs);
}
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+/*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+ * workqueue, aiding in getting memory freed.
+ */
+static struct workqueue_struct *lru_add_drain_wq;
+
+static int __init lru_init(void)
+{
+ lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
+
+ if (WARN(!lru_add_drain_wq,
+ "Failed to create workqueue lru_add_drain_wq"))
+ return -ENOMEM;
+
+ return 0;
+}
+early_initcall(lru_init);
+
void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
- schedule_work_on(cpu, work);
+ queue_work_on(cpu, lru_add_drain_wq, work);
cpumask_set_cpu(cpu, &has_work);
}
}
void free_page_and_swap_cache(struct page *page)
{
free_swap_cache(page);
- put_page(page);
+ if (is_huge_zero_page(page))
+ put_huge_zero_page();
+ else
+ put_page(page);
}
/*
max_order = fls_long(totalram_pages - 1);
if (max_order > timestamp_bits)
bucket_order = max_order - timestamp_bits;
- printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+ pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
- /* TODO: gotta make sure the underlying layer can handle it,
- * maybe an IFF_VLAN_CAPABLE flag for devices?
- */
- if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ unsigned int max_mtu = real_dev->mtu;
+
+ if (netif_reduces_vlan_mtu(real_dev))
+ max_mtu -= VLAN_HLEN;
+ if (max_mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev;
+ unsigned int max_mtu;
__be16 proto;
int err;
if (err < 0)
return err;
+ max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+ real_dev->mtu;
if (!tb[IFLA_MTU])
- dev->mtu = real_dev->mtu;
- else if (dev->mtu > real_dev->mtu)
+ dev->mtu = max_mtu;
+ else if (dev->mtu > max_mtu)
return -EINVAL;
err = vlan_changelink(dev, tb, data);
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
- ax25_destroy_socket(ax25);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:
switch (ax25->state) {
case AX25_STATE_0:
+ case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
+ /* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
- ax25_disconnect(ax25, ETIMEDOUT);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
switch (ax25->state) {
case AX25_STATE_0:
+ case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
+ /* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
- ax25_disconnect(ax25, ETIMEDOUT);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
{
ax25_clear_queues(ax25);
- ax25_stop_heartbeat(ax25);
+ if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
static void batadv_claim_release(struct kref *ref)
{
struct batadv_bla_claim *claim;
+ struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
- batadv_backbone_gw_put(claim->backbone_gw);
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
+ claim->backbone_gw = NULL;
+ spin_unlock_bh(&claim->backbone_lock);
+
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
kfree_rcu(claim, rcu);
}
break;
}
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb)
+ goto out;
+ }
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface);
const u8 *mac, const unsigned short vid,
struct batadv_bla_backbone_gw *backbone_gw)
{
+ struct batadv_bla_backbone_gw *old_backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_bla_claim search_claim;
+ bool remove_crc = false;
int hash_added;
ether_addr_copy(search_claim.addr, mac);
return;
ether_addr_copy(claim->addr, mac);
+ spin_lock_init(&claim->backbone_lock);
claim->vid = vid;
claim->lasttime = jiffies;
+ kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount);
"bla_add_claim(): changing ownership for %pM, vid %d\n",
mac, BATADV_PRINT_VID(vid));
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
- batadv_backbone_gw_put(claim->backbone_gw);
+ remove_crc = true;
}
- /* set (new) backbone gw */
+
+ /* replace backbone_gw atomically and adjust reference counters */
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
+ spin_unlock_bh(&claim->backbone_lock);
+ if (remove_crc) {
+ /* remove claim address from old backbone_gw */
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+ }
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
+ /* add claim address to new backbone_gw */
spin_lock_bh(&backbone_gw->crc_lock);
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&backbone_gw->crc_lock);
batadv_claim_put(claim);
}
+/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ spin_lock_bh(&claim->backbone_lock);
+ backbone_gw = claim->backbone_gw;
+ kref_get(&backbone_gw->refcount);
+ spin_unlock_bh(&claim->backbone_lock);
+
+ return backbone_gw;
+}
+
/**
* batadv_bla_del_claim - delete a claim from the claim hash
* @bat_priv: the bat priv with all the soft interface information
batadv_choose_claim, claim);
batadv_claim_put(claim); /* reference from the hash is gone */
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
struct batadv_hard_iface *primary_if,
int now)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct hlist_head *head;
struct batadv_hashtable *hash;
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
if (now)
goto purge_now;
- if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+ if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
- continue;
+ goto skip;
+
if (!batadv_has_timed_out(claim->lasttime,
BATADV_BLA_CLAIM_TIMEOUT))
- continue;
+ goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_purge_claims(): %pM, vid %d, time out\n",
purge_now:
batadv_handle_unclaim(bat_priv, primary_if,
- claim->backbone_gw->orig,
+ backbone_gw->orig,
claim->addr, claim->vid);
+skip:
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, bool is_bcast)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
+ bool own_claim;
bool ret;
ethhdr = eth_hdr(skb);
}
/* if it is our own claim ... */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ own_claim = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (own_claim) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
{
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
+ bool client_roamed;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
goto allow;
/* check if we are responsible. */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ client_roamed = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (client_roamed) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+ is_own = batadv_compare_eth(backbone_gw->orig,
primary_addr);
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- backbone_crc = claim->backbone_gw->crc;
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
claim->addr, BATADV_PRINT_VID(claim->vid),
- claim->backbone_gw->orig,
+ backbone_gw->orig,
(is_own ? 'x' : ' '),
backbone_crc);
+
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
if (!skb_new)
goto out;
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb_new)
+ goto out;
+ }
skb_reset_mac_header(skb_new);
skb_new->protocol = eth_type_trans(skb_new,
*/
skb_reset_mac_header(skb_new);
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb_new)
+ goto out;
+ }
/* To preserve backwards compatibility, the node has choose the outgoing
* format based on the incoming request packet type. The assumption is
struct batadv_neigh_node *neigh_node;
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_orig_node_vlan *vlan;
+ struct batadv_orig_ifinfo *last_candidate;
orig_node = container_of(ref, struct batadv_orig_node, refcount);
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_put(orig_ifinfo);
}
+
+ last_candidate = orig_node->last_bonding_candidate;
+ orig_node->last_bonding_candidate = NULL;
spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (last_candidate)
+ batadv_orig_ifinfo_put(last_candidate);
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+ hlist_del_rcu(&vlan->list);
+ batadv_orig_node_vlan_put(vlan);
+ }
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
+ ethhdr = eth_hdr(skb);
icmph = (struct batadv_icmp_header *)skb->data;
icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
return 0;
}
+/**
+ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+ struct batadv_orig_ifinfo *new_candidate)
+{
+ struct batadv_orig_ifinfo *old_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ old_candidate = orig_node->last_bonding_candidate;
+
+ if (new_candidate)
+ kref_get(&new_candidate->refcount);
+ orig_node->last_bonding_candidate = new_candidate;
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ if (old_candidate)
+ batadv_orig_ifinfo_put(old_candidate);
+}
+
/**
* batadv_find_router - find a suitable router for this originator
* @bat_priv: the bat priv with all the soft interface information
}
rcu_read_unlock();
- /* last_bonding_candidate is reset below, remove the old reference. */
- if (orig_node->last_bonding_candidate)
- batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
-
/* After finding candidates, handle the three cases:
* 1) there is a next candidate, use that
* 2) there is no next candidate, use the first of the list
if (next_candidate) {
batadv_neigh_node_put(router);
- /* remove references to first candidate, we don't need it. */
- if (first_candidate) {
- batadv_neigh_node_put(first_candidate_router);
- batadv_orig_ifinfo_put(first_candidate);
- }
+ kref_get(&next_candidate_router->refcount);
router = next_candidate_router;
- orig_node->last_bonding_candidate = next_candidate;
+ batadv_last_bonding_replace(orig_node, next_candidate);
} else if (first_candidate) {
batadv_neigh_node_put(router);
- /* refcounting has already been done in the loop above. */
+ kref_get(&first_candidate_router->refcount);
router = first_candidate_router;
- orig_node->last_bonding_candidate = first_candidate;
+ batadv_last_bonding_replace(orig_node, first_candidate);
} else {
- orig_node->last_bonding_candidate = NULL;
+ batadv_last_bonding_replace(orig_node, NULL);
+ }
+
+ /* cleanup of candidates */
+ if (first_candidate) {
+ batadv_neigh_node_put(first_candidate_router);
+ batadv_orig_ifinfo_put(first_candidate);
+ }
+
+ if (next_candidate) {
+ batadv_neigh_node_put(next_candidate_router);
+ batadv_orig_ifinfo_put(next_candidate);
}
return router;
struct batadv_orig_node *orig_node;
orig_node = batadv_gw_get_selected_orig(bat_priv);
- return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
- orig_node, vid);
+ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+ BATADV_P_DATA, orig_node, vid);
}
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
struct list_head *head)
{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *hard_iface;
+ struct batadv_softif_vlan *vlan;
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface == soft_iface)
BATADV_IF_CLEANUP_KEEP);
}
+ /* destroy the "untagged" VLAN */
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (vlan) {
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+ batadv_softif_vlan_put(vlan);
+ }
+
batadv_sysfs_del_meshif(soft_iface);
unregister_netdevice_queue(soft_iface, head);
}
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
- if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
- addr, BATADV_PRINT_VID(vid))) {
+ if (!vlan) {
+ net_ratelimited_function(batadv_info, soft_iface,
+ "adding TT local entry %pM to non-existent VLAN %d\n",
+ addr, BATADV_PRINT_VID(vid));
kfree(tt_local);
tt_local = NULL;
goto out;
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_put(tt_local);
- batadv_softif_vlan_put(vlan);
goto out;
}
return crc;
}
+/**
+ * batadv_tt_req_node_release - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+ struct batadv_tt_req_node *tt_req_node;
+
+ tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
+
+ kfree(tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
+{
+ kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node;
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
if (!tt_req_node)
goto unlock;
+ kref_init(&tt_req_node->refcount);
ether_addr_copy(tt_req_node->addr, orig_node->orig);
tt_req_node->issued_at = jiffies;
+ kref_get(&tt_req_node->refcount);
hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
spin_unlock_bh(&bat_priv->tt.req_list_lock);
out:
if (primary_if)
batadv_hardif_put(primary_if);
+
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
- /* hlist_del_init() verifies tt_req_node still is in the list */
- hlist_del_init(&tt_req_node->list);
+ if (!hlist_unhashed(&tt_req_node->list)) {
+ hlist_del_init(&tt_req_node->list);
+ batadv_tt_req_node_put(tt_req_node);
+ }
spin_unlock_bh(&bat_priv->tt.req_list_lock);
- kfree(tt_req_node);
}
+
+ if (tt_req_node)
+ batadv_tt_req_node_put(tt_req_node);
+
kfree(tvlv_tt_data);
return ret;
}
if (!batadv_compare_eth(node->addr, resp_src))
continue;
hlist_del_init(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
u32 last_bcast_seqno;
struct hlist_head neigh_list;
- /* neigh_list_lock protects: neigh_list and router */
+ /* neigh_list_lock protects: neigh_list, ifinfo_list,
+ * last_bonding_candidate and router
+ */
spinlock_t neigh_list_lock;
struct hlist_node hash_entry;
struct batadv_priv *bat_priv;
* @addr: mac address of claimed non-mesh client
* @vid: vlan id this client was detected on
* @backbone_gw: pointer to backbone gw claiming this client
+ * @backbone_lock: lock protecting backbone_gw pointer
* @lasttime: last time we heard of claim (locals only)
* @hash_entry: hlist node for batadv_priv_bla::claim_hash
* @refcount: number of contexts the object is used
u8 addr[ETH_ALEN];
unsigned short vid;
struct batadv_bla_backbone_gw *backbone_gw;
+ spinlock_t backbone_lock; /* protects backbone_gw */
unsigned long lasttime;
struct hlist_node hash_entry;
struct rcu_head rcu;
* struct batadv_tt_req_node - data to keep track of the tt requests in flight
* @addr: mac address address of the originator this request was sent to
* @issued_at: timestamp used for purging stale tt requests
+ * @refcount: number of contexts the object is used by
* @list: list node for batadv_priv_tt::req_list
*/
struct batadv_tt_req_node {
u8 addr[ETH_ALEN];
unsigned long issued_at;
+ struct kref refcount;
struct hlist_node list;
};
* change from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst)
fdb_delete_local(br, NULL, f);
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
-/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void __br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
/* check if vlan is allowed, to avoid spoofing */
if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+}
+
+/* note: already called with rcu_read_lock */
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+ __br_handle_local_finish(skb);
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
br_pass_frame_up(skb);
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
- break;
+ *pskb = skb;
+ __br_handle_local_finish(skb);
+ return RX_HANDLER_PASS;
case 0x01: /* IEEE MAC (Pause) */
goto drop;
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
+ br->has_ipv6_addr = 0;
return NULL;
}
+
+ br->has_ipv6_addr = 1;
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
+ br->has_ipv6_addr = 1;
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
- unsigned int mtu = ip_skb_dst_mtu(skb);
+ unsigned int mtu = ip_skb_dst_mtu(sk, skb);
struct iphdr *iph = ip_hdr(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
struct bridge_vlan_xstats vxi;
struct br_vlan_stats stats;
- if (vl_idx++ < *prividx)
+ if (++vl_idx < *prividx)
continue;
memset(&vxi, 0, sizeof(vxi));
vxi.vid = v->vid;
u8 multicast_disabled:1;
u8 multicast_querier:1;
u8 multicast_query_use_ifaddr:1;
+ u8 has_ipv6_addr:1;
u32 hash_elasticity;
u32 hash_max;
static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
- struct bridge_mcast_other_query *querier)
+ struct bridge_mcast_other_query *querier,
+ const bool is_ipv6)
{
+ bool own_querier_enabled;
+
+ if (br->multicast_querier) {
+ if (is_ipv6 && !br->has_ipv6_addr)
+ own_querier_enabled = false;
+ else
+ own_querier_enabled = true;
+ } else {
+ own_querier_enabled = false;
+ }
+
return time_is_before_jiffies(querier->delay_time) &&
- (br->multicast_querier || timer_pending(&querier->timer));
+ (own_querier_enabled || timer_pending(&querier->timer));
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
- return __br_multicast_querier_exists(br, &br->ip4_other_query);
+ return __br_multicast_querier_exists(br,
+ &br->ip4_other_query, false);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
- return __br_multicast_querier_exists(br, &br->ip6_other_query);
+ return __br_multicast_querier_exists(br,
+ &br->ip6_other_query, true);
#endif
default:
return false;
return map;
}
+/*
+ * Encoding order is (new_up_client, new_state, new_weight). Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ * new_up_client: { osd=6, addr=... } # set osd_state and addr
+ * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+ struct ceph_osdmap *map)
+{
+ void *new_up_client;
+ void *new_state;
+ void *new_weight_end;
+ u32 len;
+
+ new_up_client = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ new_state = *p;
+ ceph_decode_32_safe(p, end, len, e_inval);
+ len *= sizeof(u32) + sizeof(u8);
+ ceph_decode_need(p, end, len, e_inval);
+ *p += len;
+
+ /* new_weight */
+ ceph_decode_32_safe(p, end, len, e_inval);
+ while (len--) {
+ s32 osd;
+ u32 w;
+
+ ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+ osd = ceph_decode_32(p);
+ w = ceph_decode_32(p);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d weight 0x%x %s\n", osd, w,
+ w == CEPH_OSD_IN ? "(in)" :
+ (w == CEPH_OSD_OUT ? "(out)" : ""));
+ map->osd_weight[osd] = w;
+
+ /*
+ * If we are marking in, set the EXISTS, and clear the
+ * AUTOOUT and NEW bits.
+ */
+ if (w) {
+ map->osd_state[osd] |= CEPH_OSD_EXISTS;
+ map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+ CEPH_OSD_NEW);
+ }
+ }
+ new_weight_end = *p;
+
+ /* new_state (up/down) */
+ *p = new_state;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ u8 xorstate;
+ int ret;
+
+ osd = ceph_decode_32(p);
+ xorstate = ceph_decode_8(p);
+ if (xorstate == 0)
+ xorstate = CEPH_OSD_UP;
+ BUG_ON(osd >= map->max_osd);
+ if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+ (xorstate & CEPH_OSD_UP))
+ pr_info("osd%d down\n", osd);
+ if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+ (xorstate & CEPH_OSD_EXISTS)) {
+ pr_info("osd%d does not exist\n", osd);
+ map->osd_weight[osd] = CEPH_OSD_IN;
+ ret = set_primary_affinity(map, osd,
+ CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+ if (ret)
+ return ret;
+ memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+ map->osd_state[osd] = 0;
+ } else {
+ map->osd_state[osd] ^= xorstate;
+ }
+ }
+
+ /* new_up_client */
+ *p = new_up_client;
+ len = ceph_decode_32(p);
+ while (len--) {
+ s32 osd;
+ struct ceph_entity_addr addr;
+
+ osd = ceph_decode_32(p);
+ ceph_decode_copy(p, &addr, sizeof(addr));
+ ceph_decode_addr(&addr);
+ BUG_ON(osd >= map->max_osd);
+ pr_info("osd%d up\n", osd);
+ map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+ map->osd_addr[osd] = addr;
+ }
+
+ *p = new_weight_end;
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
/*
* decode and apply an incremental map update.
*/
__remove_pg_pool(&map->pg_pools, pi);
}
- /* new_up */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- struct ceph_entity_addr addr;
- ceph_decode_32_safe(p, end, osd, e_inval);
- ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
- ceph_decode_addr(&addr);
- pr_info("osd%d up\n", osd);
- BUG_ON(osd >= map->max_osd);
- map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
- map->osd_addr[osd] = addr;
- }
-
- /* new_state */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd;
- u8 xorstate;
- ceph_decode_32_safe(p, end, osd, e_inval);
- xorstate = **(u8 **)p;
- (*p)++; /* clean flag */
- if (xorstate == 0)
- xorstate = CEPH_OSD_UP;
- if (xorstate & CEPH_OSD_UP)
- pr_info("osd%d down\n", osd);
- if (osd < map->max_osd)
- map->osd_state[osd] ^= xorstate;
- }
-
- /* new_weight */
- ceph_decode_32_safe(p, end, len, e_inval);
- while (len--) {
- u32 osd, off;
- ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
- osd = ceph_decode_32(p);
- off = ceph_decode_32(p);
- pr_info("osd%d weight 0x%x %s\n", osd, off,
- off == CEPH_OSD_IN ? "(in)" :
- (off == CEPH_OSD_OUT ? "(out)" : ""));
- if (osd < map->max_osd)
- map->osd_weight[osd] = off;
- }
+ /* new_up_client, new_state, new_weight */
+ err = decode_new_up_state_weight(p, end, map);
+ if (err)
+ goto bad;
/* new_pg_temp */
err = decode_new_pg_temp(p, end, map);
__scm_destroy(scm);
}
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
+/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
{
struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
__get_user(ptr, &fprog32->filter) ||
__put_user(len, &kfprog->len) ||
__put_user(compat_ptr(ptr), &kfprog->filter))
+ return NULL;
+
+ return kfprog;
+}
+EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
+
+static int do_set_attach_filter(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock_fprog __user *kfprog;
+
+ kfprog = get_compat_bpf_fprog(optval);
+ if (!kfprog)
return -EFAULT;
return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
- if (optname == SO_ATTACH_FILTER)
+ if (optname == SO_ATTACH_FILTER ||
+ optname == SO_ATTACH_REUSEPORT_CBPF)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
#include <net/sock_reuseport.h>
/**
- * sk_filter - run a packet through a socket filter
+ * sk_filter_trim_cap - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
+ * @cap: limit on how short the eBPF program may trim the packet
*
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
* be accepted or -EPERM if the packet should be tossed.
*
*/
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
int err;
struct sk_filter *filter;
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-
- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();
return err;
}
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
}
static bool sk_filter_is_valid_access(int off, int size,
- enum bpf_access_type type)
+ enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
switch (off) {
case offsetof(struct __sk_buff, tc_classid):
}
static bool tc_cls_act_is_valid_access(int off, int size,
- enum bpf_access_type type)
+ enum bpf_access_type type,
+ enum bpf_reg_type *reg_type)
{
if (type == BPF_WRITE) {
switch (off) {
return false;
}
}
+
+ switch (off) {
+ case offsetof(struct __sk_buff, data):
+ *reg_type = PTR_TO_PACKET;
+ break;
+ case offsetof(struct __sk_buff, data_end):
+ *reg_type = PTR_TO_PACKET_END;
+ break;
+ }
+
return __is_valid_access(off, size, type);
}
}
EXPORT_SYMBOL(make_flow_keys_digest);
+static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
+
+u32 __skb_get_hash_symmetric(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+
+ __flow_hash_secret_init();
+
+ memset(&keys, 0, sizeof(keys));
+ __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
+ NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+ return __flow_hash_from_keys(&keys, hashrnd);
+}
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
/**
* __skb_get_hash: calculate a flow hash
* @skb: sk_buff to calculate flow hash from
},
};
+static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+};
+
static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
skb_flow_dissector_init(&flow_keys_dissector,
flow_keys_dissector_keys,
ARRAY_SIZE(flow_keys_dissector_keys));
+ skb_flow_dissector_init(&flow_keys_dissector_symmetric,
+ flow_keys_dissector_symmetric_keys,
+ ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
skb_flow_dissector_init(&flow_keys_buf_dissector,
flow_keys_buf_dissector_keys,
ARRAY_SIZE(flow_keys_buf_dissector_keys));
* @xstats_type: TLV type for backward compatibility xstats TLV
* @lock: statistics lock
* @d: dumping handle
+ * @padattr: padding attribute
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
* @type: TLV type for top level statistic TLV
* @lock: statistics lock
* @d: dumping handle
+ * @padattr: padding attribute
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
tbl = neigh_tables[index];
if (!tbl)
goto out;
+ rcu_read_lock_bh();
neigh = __neigh_lookup_noref(tbl, addr, dev);
if (!neigh)
neigh = __neigh_create(tbl, addr, dev, false);
err = PTR_ERR(neigh);
- if (IS_ERR(neigh))
+ if (IS_ERR(neigh)) {
+ rcu_read_unlock_bh();
goto out_kfree_skb;
+ }
err = neigh->output(neigh, skb);
+ rcu_read_unlock_bh();
}
else if (index == NEIGH_LINK_TABLE) {
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
#include <linux/jiffies.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include "net-sysfs.h"
}
EXPORT_SYMBOL_GPL(skb_append_pagefrags);
-/**
- * skb_push_rcsum - push skb and update receive checksum
- * @skb: buffer to update
- * @len: length of data pulled
- *
- * This function performs an skb_push on the packet and updates
- * the CHECKSUM_COMPLETE checksum. It should be used on
- * receive path processing instead of skb_push unless you know
- * that the checksum difference is zero (e.g., a valid IP header)
- * or you are setting ip_summed to CHECKSUM_NONE.
- */
-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
-{
- skb_push(skb, len);
- skb_postpush_rcsum(skb, skb->data, len);
- return skb->data;
-}
-
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested, unsigned int trim_cap)
{
int rc = NET_RX_SUCCESS;
- if (sk_filter(sk, skb))
+ if (sk_filter_trim_cap(sk, skb, trim_cap))
goto discard_and_relse;
skb->dev = NULL;
kfree_skb(skb);
goto out;
}
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
sockc->tsflags |= tsflags;
break;
+ /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
+ case SCM_RIGHTS:
+ case SCM_CREDENTIALS:
+ break;
default:
return -EINVAL;
}
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
- __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
rxiph->daddr);
skb_dst_set(skb, dst_clone(dst));
+ local_bh_disable();
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
rxiph->daddr, rxiph->saddr, NULL);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
- DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
- DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+ __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
}
+ local_bh_enable();
out:
- dst_release(dst);
+ dst_release(dst);
}
static void dccp_v4_reqsk_destructor(struct request_sock *req)
goto discard_and_relse;
nf_reset(skb);
- return sk_receive_skb(sk, skb, 1);
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
- return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
#include <net/dn_fib.h>
#include <net/dn_neigh.h>
#include <net/dn_dev.h>
+#include <net/nexthop.h>
#define RT_MIN_TABLE 1
struct rtnexthop *nhp = nla_data(attr);
int nhs = 0, nhlen = nla_len(attr);
- while(nhlen >= (int)sizeof(struct rtnexthop)) {
- if ((nhlen -= nhp->rtnh_len) < 0)
- return 0;
+ while (rtnh_ok(nhp, nhlen)) {
nhs++;
- nhp = RTNH_NEXT(nhp);
+ nhp = rtnh_next(nhp, &nhlen);
}
- return nhs;
+ /* leftover implies invalid nexthop configuration, discard it */
+ return nhlen > 0 ? 0 : nhs;
}
static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
int nhlen = nla_len(attr);
change_nexthops(fi) {
- int attrlen = nhlen - sizeof(struct rtnexthop);
- if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
+ int attrlen;
+
+ if (!rtnh_ok(nhp, nhlen))
return -EINVAL;
nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
nh->nh_oif = nhp->rtnh_ifindex;
nh->nh_weight = nhp->rtnh_hops + 1;
- if (attrlen) {
+ attrlen = rtnh_attrlen(nhp);
+ if (attrlen > 0) {
struct nlattr *gw_attr;
gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
}
- nhp = RTNH_NEXT(nhp);
+
+ nhp = rtnh_next(nhp, &nhlen);
} endfor_nexthops(fi);
return 0;
void *tmp;
};
+struct esp_output_extra {
+ __be32 seqhi;
+ u32 esphoff;
+};
+
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
*
* TODO: Use spare space in skb for this where possible.
*/
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
{
unsigned int len;
- len = seqhilen;
+ len = extralen;
len += crypto_aead_ivsize(aead);
return kmalloc(len, GFP_ATOMIC);
}
-static inline __be32 *esp_tmp_seqhi(void *tmp)
+static inline void *esp_tmp_extra(void *tmp)
{
- return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+ return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
}
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
{
return crypto_aead_ivsize(aead) ?
- PTR_ALIGN((u8 *)tmp + seqhilen,
- crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
+ PTR_ALIGN((u8 *)tmp + extralen,
+ crypto_aead_alignmask(aead) + 1) : tmp + extralen;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
{
struct ip_esp_hdr *esph = (void *)(skb->data + offset);
void *tmp = ESP_SKB_CB(skb)->tmp;
- __be32 *seqhi = esp_tmp_seqhi(tmp);
+ __be32 *seqhi = esp_tmp_extra(tmp);
esph->seq_no = esph->spi;
esph->spi = *seqhi;
static void esp_output_restore_header(struct sk_buff *skb)
{
- esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
+ void *tmp = ESP_SKB_CB(skb)->tmp;
+ struct esp_output_extra *extra = esp_tmp_extra(tmp);
+
+ esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
+ sizeof(__be32));
}
static void esp_output_done_esn(struct crypto_async_request *base, int err)
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
+ struct esp_output_extra *extra;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
int tfclen;
int nfrags;
int assoclen;
- int seqhilen;
- __be32 *seqhi;
+ int extralen;
__be64 seqno;
/* skb is pure payload to encrypt */
nfrags = err;
assoclen = sizeof(*esph);
- seqhilen = 0;
+ extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
- seqhilen += sizeof(__be32);
- assoclen += seqhilen;
+ extralen += sizeof(*extra);
+ assoclen += sizeof(__be32);
}
- tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+ tmp = esp_alloc_tmp(aead, nfrags, extralen);
if (!tmp) {
err = -ENOMEM;
goto error;
}
- seqhi = esp_tmp_seqhi(tmp);
- iv = esp_tmp_iv(aead, tmp, seqhilen);
+ extra = esp_tmp_extra(tmp);
+ iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
* encryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
- *seqhi = esph->spi;
+ extra->esphoff = (unsigned char *)esph -
+ skb_transport_header(skb);
+ esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+ extra->seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
}
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
- seqhi = esp_tmp_seqhi(tmp);
+ seqhi = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
+ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ return -EINVAL;
+
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
goto err_inval;
+ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ goto err_inval;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
/* Fills in tpi and returns header length to be pulled. */
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err, __be16 proto)
+ bool *csum_err, __be16 proto, int nhs)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+ if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ greh = (struct gre_base_hdr *)(skb->data + nhs);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
- if (!pskb_may_pull(skb, hdr_len))
+ if (!pskb_may_pull(skb, nhs + hdr_len))
return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ greh = (struct gre_base_hdr *)(skb->data + nhs);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
#include <net/gre.h>
#include <net/dst_metadata.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
-#endif
-
/*
Problems & solutions
--------------------
* by themselves???
*/
+ const struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
+ if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
+ iph->ihl * 4) < 0) {
if (!csum_err) /* ignore csum errors. */
return;
}
}
#endif
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+ hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
if (hdr_len < 0)
goto drop;
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
struct ip_tunnel *t;
int err;
t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL);
- if (err < 0)
- goto out;
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
if (err)
goto out;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto out;
+
return dev;
out:
- free_netdev(dev);
+ ip_tunnel_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
return dst_output(net, sk, skb);
}
#endif
- mtu = ip_skb_dst_mtu(skb);
+ mtu = ip_skb_dst_mtu(sk, skb);
if (skb_is_gso(skb))
return ip_finish_output_gso(net, sk, skb, mtu);
iph = ip_hdr(skb);
- mtu = ip_skb_dst_mtu(skb);
+ mtu = ip_skb_dst_mtu(sk, skb);
if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
mtu = IPCB(skb)->frag_max_size;
static __be32 ic_netmask = NONE; /* Netmask for local subnet */
__be32 ic_gateway = NONE; /* Gateway IP address */
-__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#ifdef IPCONFIG_DYNAMIC
+static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#endif
__be32 ic_servaddr = NONE; /* Boot server IP address */
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c)
+ if (c) {
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS;
+ }
return c;
}
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
return flag;
}
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+ u32 *last_oow_ack_time)
+{
+ if (*last_oow_ack_time) {
+ s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+ if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+ NET_INC_STATS(net, mib_idx);
+ return true; /* rate-limited: don't send yet! */
+ }
+ }
+
+ *last_oow_ack_time = tcp_time_stamp;
+
+ return false; /* not rate-limited: go ahead, send dupack now! */
+}
+
/* Return true if we're currently rate-limiting out-of-window ACKs and
* thus shouldn't send a dupack right now. We rate-limit dupacks in
* response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
/* Data packets without SYNs are not likely part of an ACK loop. */
if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
!tcp_hdr(skb)->syn)
- goto not_rate_limited;
-
- if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
- if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
- NET_INC_STATS(net, mib_idx);
- return true; /* rate-limited: don't send yet! */
- }
- }
-
- *last_oow_ack_time = tcp_time_stamp;
+ return false;
-not_rate_limited:
- return false; /* not rate-limited: go ahead, send dupack now! */
+ return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
}
/* RFC 5961 7 [ACK Throttling] */
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
- if (tcp_oow_rate_limited(sock_net(sk), skb,
- LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
- &tp->last_oow_ack_time))
+ if (__tcp_oow_rate_limited(sock_net(sk),
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+ &tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
struct sk_buff *hole = NULL;
- u32 last_lost;
+ u32 max_segs, last_lost;
int mib_idx;
int fwd_rexmitting = 0;
last_lost = tp->snd_una;
}
+ max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
int segs;
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
if (segs <= 0)
return;
+ /* In case tcp_shift_skb_data() have aggregated large skbs,
+ * we need to make sure not sending too bigs TSO packets
+ */
+ segs = min_t(int, segs, max_segs);
if (fwd_rexmitting) {
begin_fwd:
return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
-static inline int compute_score(struct sock *sk, struct net *net,
- __be32 saddr, unsigned short hnum, __be16 sport,
- __be32 daddr, __be16 dport, int dif)
+static int compute_score(struct sock *sk, struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
return score;
}
-/*
- * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
- */
-static inline int compute_score2(struct sock *sk, struct net *net,
- __be32 saddr, __be16 sport,
- __be32 daddr, unsigned int hnum, int dif)
-{
- int score;
- struct inet_sock *inet;
-
- if (!net_eq(sock_net(sk), net) ||
- ipv6_only_sock(sk))
- return -1;
-
- inet = inet_sk(sk);
-
- if (inet->inet_rcv_saddr != daddr ||
- inet->inet_num != hnum)
- return -1;
-
- score = (sk->sk_family == PF_INET) ? 2 : 1;
-
- if (inet->inet_daddr) {
- if (inet->inet_daddr != saddr)
- return -1;
- score += 4;
- }
-
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score += 4;
- }
-
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score += 4;
- }
-
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
- score++;
-
- return score;
-}
-
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
udp_ehash_secret + net_hash_mix(net));
}
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2, unsigned int slot2,
+ struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score2(sk, net, saddr, sport,
+ score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, slot2, skb);
+ hslot2, skb);
if (!result) {
+ unsigned int old_slot2 = slot2;
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
+ /* avoid searching the same slot again. */
+ if (unlikely(slot2 == old_slot2))
+ return result;
+
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
- htonl(INADDR_ANY), hnum, dif,
- hslot2, slot2, skb);
+ daddr, hnum, dif,
+ hslot2, skb);
}
return result;
}
result = NULL;
badness = 0;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, hnum, sport,
- daddr, dport, dif);
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
}
}
- if (rcu_access_pointer(sk->sk_filter)) {
- if (udp_lib_checksum_complete(skb))
+ if (rcu_access_pointer(sk->sk_filter) &&
+ udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter(sk, skb))
- goto drop;
- }
+
+ if (sk_filter(sk, skb))
+ goto drop;
+ if (unlikely(skb->len < sizeof(struct udphdr)))
+ goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
return err;
}
- return skb_checksum_init_zero_check(skb, proto, uh->check,
- inet_compute_pseudo);
+ /* Note, we are only interested in != 0 or == 0, thus the
+ * force to int.
+ */
+ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+ inet_compute_pseudo);
}
/*
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
- ping_err(skb, offset, info);
+ ping_err(skb, offset, ntohl(info));
}
static int icmpv6_rcv(struct sk_buff *skb);
* we accept a checksum of zero here. When we find the socket
* for the UDP packet we'll check if that socket allows zero checksum
* for IPv6 (set by socket option).
+ *
+ * Note, we are only interested in != 0 or == 0, thus the
+ * force to int.
*/
- return skb_checksum_init_zero_check(skb, proto, uh->check,
- ip6_compute_pseudo);
+ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+ ip6_compute_pseudo);
}
EXPORT_SYMBOL(udp6_csum_init);
}
}
+ free_percpu(non_pcpu_rt->rt6i_pcpu);
non_pcpu_rt->rt6i_pcpu = NULL;
}
bool csum_err = false;
int hdr_len;
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+ hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
if (hdr_len < 0)
goto drop;
if (ret)
return ret;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
tunnel = netdev_priv(dev);
ip6gre_tnl_link_config(tunnel, 1);
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
}
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
const struct in6_addr *final_dst)
{
struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
- int err;
dst = ip6_sk_dst_check(sk, dst, fl6);
+ if (!dst)
+ dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
- err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
- if (err)
- return ERR_PTR(err);
- if (final_dst)
- fl6->daddr = *final_dst;
-
- return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+ return dst;
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (!c)
return NULL;
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXMIFS;
return c;
}
fl6.daddr = *gw;
fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+ fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = 0;
+ int flags = RT6_LOOKUP_F_IFACE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, IPPROTO_IPV6, 0);
+ t->parms.link, 0, iph->protocol, 0);
err = 0;
goto out;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- IPPROTO_IPV6, 0);
+ iph->protocol, 0);
err = 0;
goto out;
}
static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr,
int oif, struct tcp_md5sig_key *key, int rst,
- u8 tclass, u32 label)
+ u8 tclass, __be32 label)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key, u8 tclass,
- u32 label)
+ __be32 label)
{
tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
tclass, label);
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
udp_lib_rehash(sk, new_hash);
}
-static inline int compute_score(struct sock *sk, struct net *net,
- unsigned short hnum,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif)
+static int compute_score(struct sock *sk, struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, unsigned short hnum,
+ int dif)
{
int score;
struct inet_sock *inet;
return score;
}
-static inline int compute_score2(struct sock *sk, struct net *net,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr,
- unsigned short hnum, int dif)
-{
- int score;
- struct inet_sock *inet;
-
- if (!net_eq(sock_net(sk), net) ||
- udp_sk(sk)->udp_port_hash != hnum ||
- sk->sk_family != PF_INET6)
- return -1;
-
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
- return -1;
-
- score = 0;
- inet = inet_sk(sk);
-
- if (inet->inet_dport) {
- if (inet->inet_dport != sport)
- return -1;
- score++;
- }
-
- if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
- return -1;
- score++;
- }
-
- if (sk->sk_bound_dev_if) {
- if (sk->sk_bound_dev_if != dif)
- return -1;
- score++;
- }
-
- if (sk->sk_incoming_cpu == raw_smp_processor_id())
- score++;
-
- return score;
-}
-
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2, unsigned int slot2,
+ struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score2(sk, net, saddr, sport,
+ score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, slot2, skb);
+ hslot2, skb);
if (!result) {
+ unsigned int old_slot2 = slot2;
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
+ /* avoid searching the same slot again. */
+ if (unlikely(slot2 == old_slot2))
+ return result;
+
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
- &in6addr_any, hnum, dif,
- hslot2, slot2, skb);
+ daddr, hnum, dif,
+ hslot2, skb);
}
return result;
}
result = NULL;
badness = -1;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
+ score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
}
}
- if (rcu_access_pointer(sk->sk_filter)) {
- if (udp_lib_checksum_complete(skb))
- goto csum_error;
- if (sk_filter(sk, skb))
- goto drop;
- }
+ if (rcu_access_pointer(sk->sk_filter) &&
+ udp_lib_checksum_complete(skb))
+ goto csum_error;
+
+ if (sk_filter(sk, skb))
+ goto drop;
+ if (unlikely(skb->len < sizeof(struct udphdr)))
+ goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
.open = kcm_seq_open,
.read = seq_read,
.llseek = seq_lseek,
+ .release = seq_release_net,
};
static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
- struct udp_tunnel_sock_cfg udp_cfg;
+ struct udp_tunnel_sock_cfg udp_cfg = { };
udp_cfg.sk_user_data = tunnel;
udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
void mesh_sta_cleanup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed;
+ u32 changed = 0;
/*
* maybe userspace handles peer allocation and peering, but in either
* case the beacon is still generated by the kernel and we might need
* an update.
*/
- changed = mesh_accept_plinks_update(sdata);
+ if (sdata->u.mesh.user_mpm &&
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed |= mesh_plink_dec_estab_count(sdata);
+ changed |= mesh_accept_plinks_update(sdata);
if (!sdata->u.mesh.user_mpm) {
changed |= mesh_plink_deactivate(sta);
del_timer_sync(&sta->mesh->plink_timer);
}
+ /* make sure no readers can access nexthop sta from here on */
+ mesh_path_flush_by_nexthop(sta);
+ synchronize_net();
+
if (changed)
ieee80211_mbss_info_change_notify(sdata, changed);
}
u8 sa_offs, da_offs, pn_offs;
u8 band;
u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
- sizeof(rfc1042_header)];
+ sizeof(rfc1042_header)] __aligned(2);
struct rcu_head rcu_head;
};
* If available, return 1, otherwise invalidate this connection
* template and return 0.
*/
-int ip_vs_check_template(struct ip_vs_conn *ct)
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = ct->ipvs;
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- expire_quiescent_template(ipvs, dest)) {
+ expire_quiescent_template(ipvs, dest) ||
+ (cdest && (dest != cdest))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
/* Check if a template already exists */
ct = ip_vs_ct_in_get(¶m);
- if (!ct || !ip_vs_check_template(ct)) {
+ if (!ct || !ip_vs_check_template(ct, NULL)) {
struct ip_vs_scheduler *sched;
/*
vport, ¶m) < 0)
return NULL;
ct = ip_vs_ct_in_get(¶m);
- if (!ct) {
+ /* check if template exists and points to the same dest */
+ if (!ct || !ip_vs_check_template(ct, dest)) {
ct = ip_vs_conn_new(¶m, dest->af, daddr, dport,
IP_VS_CONN_F_TEMPLATE, dest, 0);
if (!ct) {
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ int ifindex)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+ sock->sk->sk_bound_dev_if = ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
if (state == IP_VS_STATE_MASTER)
sock = make_send_sock(ipvs, id);
else
- sock = make_receive_sock(ipvs, id);
+ sock = make_receive_sock(ipvs, id, dev->ifindex);
if (IS_ERR(sock)) {
result = PTR_ERR(sock);
goto outtinfo;
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->allow_clash &&
+ !nfct_nat(ct) &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
nf_conntrack_tstamp_fini();
nf_conntrack_acct_fini();
nf_conntrack_expect_fini();
+
+ kmem_cache_destroy(nf_conntrack_cachep);
}
/*
unsigned int nr_slots, i;
size_t sz;
+ if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+ if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
if (ret) {
pr_err("failed to register helper for pf: %d port: %d\n",
ftp[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_ftp_fini();
return ret;
}
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
- int ret = 0;
- struct nf_conntrack_helper *cur;
+ struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
+ struct nf_conntrack_helper *cur;
+ int ret = 0;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
mutex_lock(&nf_ct_helper_mutex);
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
- if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
- cur->tuple.src.l3num == me->tuple.src.l3num &&
- cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+ if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
ret = -EEXIST;
goto out;
}
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
irc[i].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_irc_fini();
return ret;
}
if (ret) {
pr_err("failed to register helper for pf: %d port: %d\n",
sane[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_sane_fini();
return ret;
}
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
sip[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_sip_fini();
return ret;
}
{ }
};
-#define NET_NF_CONNTRACK_MAX 2089
-
static struct ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
tftp[i][j].tuple.src.l3num, ports[i]);
+ ports_c = i;
nf_conntrack_tftp_fini();
return ret;
}
* Once the queue is registered it must reinject all packets it
* receives, no matter what.
*/
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
/* return EBUSY when somebody else is registered, return EEXIST if the
* same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
{
/* should never happen, we only have one queueing backend in kernel */
- WARN_ON(rcu_access_pointer(queue_handler));
- rcu_assign_pointer(queue_handler, qh);
+ WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+ rcu_assign_pointer(net->nf.queue_handler, qh);
}
EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
{
- RCU_INIT_POINTER(queue_handler, NULL);
- synchronize_rcu();
+ RCU_INIT_POINTER(net->nf.queue_handler, NULL);
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
const struct nf_queue_handler *qh;
rcu_read_lock();
- qh = rcu_dereference(queue_handler);
+ qh = rcu_dereference(net->nf.queue_handler);
if (qh)
qh->nf_hook_drop(net, ops);
rcu_read_unlock();
struct nf_queue_entry *entry = NULL;
const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh;
+ struct net *net = state->net;
/* QUEUE == DROP if no one is waiting, to be safe. */
- qh = rcu_dereference(queue_handler);
+ qh = rcu_dereference(net->nf.queue_handler);
if (!qh) {
status = -ESRCH;
goto err;
err = nf_tables_newexpr(ctx, &info, expr);
if (err < 0)
- goto err2;
+ goto err3;
return expr;
+err3:
+ kfree(expr);
err2:
module_put(info.ops->type->owner);
err1:
/* Only accept unspec with dump */
if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
+ if (!nla[NFTA_SET_TABLE])
+ return -EINVAL;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
* jumps are already validated for that chain.
*/
list_for_each_entry(i, &set->bindings, list) {
- if (binding->flags & NFT_SET_MAP &&
+ if (i->flags & NFT_SET_MAP &&
i->chain == binding->chain)
goto bind;
}
+ iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_bind_check_setelem;
set->ops->walk(ctx, set, &iter);
- if (iter.err < 0) {
- /* Destroy anonymous sets if binding fails */
- if (set->flags & NFT_SET_ANONYMOUS)
- nf_tables_set_destroy(ctx, set);
-
+ if (iter.err < 0)
return iter.err;
- }
}
bind:
binding->chain = ctx->chain;
if (nest == NULL)
goto nla_put_failure;
- args.cb = cb;
- args.skb = skb;
- args.iter.skip = cb->args[0];
- args.iter.count = 0;
- args.iter.err = 0;
- args.iter.fn = nf_tables_dump_setelem;
+ args.cb = cb;
+ args.skb = skb;
+ args.iter.genmask = nft_genmask_cur(ctx.net);
+ args.iter.skip = cb->args[0];
+ args.iter.count = 0;
+ args.iter.err = 0;
+ args.iter.fn = nf_tables_dump_setelem;
set->ops->walk(&ctx, set, &args.iter);
nla_nest_end(skb, nest);
binding->chain != chain)
continue;
+ iter.genmask = nft_genmask_next(ctx->net);
iter.skip = 0;
iter.count = 0;
iter.err = 0;
list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
/* This rule is not active, skip. */
- if (unlikely(rule->genmask & (1 << gencursor)))
+ if (unlikely(rule->genmask & gencursor))
continue;
rulenum++;
if (entskb->tstamp.tv64) {
struct nfqnl_msg_packet_timestamp ts;
- struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+ struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
net->nf.proc_netfilter, &nfqnl_file_ops))
return -ENOMEM;
#endif
+ nf_register_queue_handler(net, &nfqh);
return 0;
}
static void __net_exit nfnl_queue_net_exit(struct net *net)
{
+ nf_unregister_queue_handler(net);
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
#endif
}
+static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
+{
+ synchronize_rcu();
+}
+
static struct pernet_operations nfnl_queue_net_ops = {
- .init = nfnl_queue_net_init,
- .exit = nfnl_queue_net_exit,
- .id = &nfnl_queue_net_id,
- .size = sizeof(struct nfnl_queue_net),
+ .init = nfnl_queue_net_init,
+ .exit = nfnl_queue_net_exit,
+ .exit_batch = nfnl_queue_net_exit_batch,
+ .id = &nfnl_queue_net_id,
+ .size = sizeof(struct nfnl_queue_net),
};
static int __init nfnetlink_queue_init(void)
}
register_netdevice_notifier(&nfqnl_dev_notifier);
- nf_register_queue_handler(&nfqh);
return status;
cleanup_netlink_notifier:
static void __exit nfnetlink_queue_fini(void)
{
- nf_unregister_queue_handler();
unregister_netdevice_notifier(&nfqnl_dev_notifier);
nfnetlink_subsys_unregister(&nfqnl_subsys);
netlink_unregister_notifier(&nfqnl_rtnl_notifier);
const struct nf_conn_help *help;
const struct nf_conntrack_tuple *tuple;
const struct nf_conntrack_helper *helper;
- long diff;
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
return;
#endif
case NFT_CT_EXPIRATION:
- diff = (long)jiffies - (long)ct->timeout.expires;
- if (diff < 0)
- diff = 0;
- *dest = jiffies_to_msecs(diff);
+ *dest = jiffies_to_msecs(nf_ct_expires(ct));
return;
case NFT_CT_HELPER:
if (ct->master == NULL)
struct nft_hash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
- u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
int err;
err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
goto cont;
if (nft_set_elem_expired(&he->ext))
goto cont;
- if (!nft_set_elem_active(&he->ext, genmask))
+ if (!nft_set_elem_active(&he->ext, iter->genmask))
goto cont;
elem.priv = he;
skb->pkt_type = value;
break;
case NFT_META_NFTRACE:
- skb->nf_trace = 1;
+ skb->nf_trace = !!value;
break;
default:
WARN_ON(1);
struct nft_rbtree_elem *rbe;
struct nft_set_elem elem;
struct rb_node *node;
- u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
spin_lock_bh(&nft_rbtree_lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
if (iter->count < iter->skip)
goto cont;
- if (!nft_set_elem_active(&rbe->ext, genmask))
+ if (!nft_set_elem_active(&rbe->ext, iter->genmask))
goto cont;
elem.priv = rbe;
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
- target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
return -EINVAL;
/* compat_xt_entry match has less strict aligment requirements,
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
- target_offset + sizeof(struct xt_standard_target) != next_offset)
+ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
return -EINVAL;
return xt_check_entry_match(elems, base + target_offset,
*/
state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
__ovs_ct_update_key(key, state, &info->zone, exp->master);
- } else
- return __ovs_ct_lookup(net, key, info, skb);
+ } else {
+ struct nf_conn *ct;
+ int err;
+
+ err = __ovs_ct_lookup(net, key, info, skb);
+ if (err)
+ return err;
+
+ ct = (struct nf_conn *)skb->nfct;
+ if (ct)
+ nf_ct_deliver_cached_events(ct);
+ }
return 0;
}
#include <net/inet_common.h>
#endif
#include <linux/bpf.h>
+#include <net/compat.h>
#include "internal.h"
struct sk_buff *skb,
unsigned int num)
{
- return reciprocal_scale(skb_get_hash(skb), num);
+ return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
goto out_unlock;
}
- sockc.tsflags = 0;
+ sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
- if (unlikely(err)) {
- err = -EINVAL;
+ if (unlikely(err))
goto out_unlock;
- }
}
skb->protocol = proto;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
}
- sockc.tsflags = 0;
+ sockc.tsflags = po->sk.sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(&po->sk, msg, &sockc);
if (unlikely(err))
if (unlikely(!(dev->flags & IFF_UP)))
goto out_unlock;
- sockc.tsflags = 0;
+ sockc.tsflags = sk->sk_tsflags;
sockc.mark = sk->sk_mark;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
}
+#ifdef CONFIG_COMPAT
+static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct packet_sock *po = pkt_sk(sock->sk);
+
+ if (level != SOL_PACKET)
+ return -ENOPROTOOPT;
+
+ if (optname == PACKET_FANOUT_DATA &&
+ po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
+ optval = (char __user *)get_compat_bpf_fprog(optval);
+ if (!optval)
+ return -EFAULT;
+ optlen = sizeof(struct sock_fprog);
+ }
+
+ return packet_setsockopt(sock, level, optname, optval, optlen);
+}
+#endif
+
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
.shutdown = sock_no_shutdown,
.setsockopt = packet_setsockopt,
.getsockopt = packet_getsockopt,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = compat_packet_setsockopt,
+#endif
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,
}
}
- if (conn->c_version < RDS_PROTOCOL(3,1)) {
+ if (conn->c_version < RDS_PROTOCOL(3, 1)) {
printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
" no longer supported\n",
&conn->c_faddr,
*/
static void rds_loop_inc_free(struct rds_incoming *inc)
{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_put(rm);
+ struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+
+ rds_message_put(rm);
}
/* we need to at least give the thread something to succeed */
RDS_CONN_CONNECTING,
RDS_CONN_DISCONNECTING,
RDS_CONN_UP,
+ RDS_CONN_RESETTING,
RDS_CONN_ERROR,
};
void rds_shutdown_worker(struct work_struct *);
void rds_send_worker(struct work_struct *);
void rds_recv_worker(struct work_struct *);
+void rds_connect_path_complete(struct rds_connection *conn, int curr);
void rds_connect_complete(struct rds_connection *conn);
/* transport.c */
minfo.fport = inc->i_hdr.h_dport;
}
+ minfo.flags = 0;
+
rds_info_copy(iter, &minfo, sizeof(minfo));
}
list_splice_init(&conn->c_retrans, &conn->c_send_queue);
spin_unlock_irqrestore(&conn->c_lock, flags);
}
+EXPORT_SYMBOL_GPL(rds_send_reset);
static int acquire_in_xmit(struct rds_connection *conn)
{
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
- rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
+ rds_sysctl_reg_table =
+ register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
}
/*
- * This is the only path that sets tc->t_sock. Send and receive trust that
- * it is set. The RDS_CONN_UP bit protects those paths from being
- * called while it isn't set.
+ * rds_tcp_reset_callbacks() switches the to the new sock and
+ * returns the existing tc->t_sock.
+ *
+ * The only functions that set tc->t_sock are rds_tcp_set_callbacks
+ * and rds_tcp_reset_callbacks. Send and receive trust that
+ * it is set. The absence of RDS_CONN_UP bit protects those paths
+ * from being called while it isn't set.
+ */
+void rds_tcp_reset_callbacks(struct socket *sock,
+ struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ struct socket *osock = tc->t_sock;
+
+ if (!osock)
+ goto newsock;
+
+ /* Need to resolve a duelling SYN between peers.
+ * We have an outstanding SYN to this peer, which may
+ * potentially have transitioned to the RDS_CONN_UP state,
+ * so we must quiesce any send threads before resetting
+ * c_transport_data. We quiesce these threads by setting
+ * c_state to something other than RDS_CONN_UP, and then
+ * waiting for any existing threads in rds_send_xmit to
+ * complete release_in_xmit(). (Subsequent threads entering
+ * rds_send_xmit() will bail on !rds_conn_up().
+ *
+ * However an incoming syn-ack at this point would end up
+ * marking the conn as RDS_CONN_UP, and would again permit
+ * rds_send_xmi() threads through, so ideally we would
+ * synchronize on RDS_CONN_UP after lock_sock(), but cannot
+ * do that: waiting on !RDS_IN_XMIT after lock_sock() may
+ * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
+ * would not get set. As a result, we set c_state to
+ * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
+ * cannot mark rds_conn_path_up() in the window before lock_sock()
+ */
+ atomic_set(&conn->c_state, RDS_CONN_RESETTING);
+ wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
+ lock_sock(osock->sk);
+ /* reset receive side state for rds_tcp_data_recv() for osock */
+ if (tc->t_tinc) {
+ rds_inc_put(&tc->t_tinc->ti_inc);
+ tc->t_tinc = NULL;
+ }
+ tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+ tc->t_tinc_data_rem = 0;
+ tc->t_sock = NULL;
+
+ write_lock_bh(&osock->sk->sk_callback_lock);
+
+ osock->sk->sk_user_data = NULL;
+ osock->sk->sk_data_ready = tc->t_orig_data_ready;
+ osock->sk->sk_write_space = tc->t_orig_write_space;
+ osock->sk->sk_state_change = tc->t_orig_state_change;
+ write_unlock_bh(&osock->sk->sk_callback_lock);
+ release_sock(osock->sk);
+ sock_release(osock);
+newsock:
+ rds_send_reset(conn);
+ lock_sock(sock->sk);
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ tc->t_sock = sock;
+ sock->sk->sk_user_data = conn;
+ sock->sk->sk_data_ready = rds_tcp_data_ready;
+ sock->sk->sk_write_space = rds_tcp_write_space;
+ sock->sk->sk_state_change = rds_tcp_state_change;
+
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+ release_sock(sock->sk);
+}
+
+/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
+ * above rds_tcp_reset_callbacks for notes about synchronization
+ * with data path
*/
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
{
ret = rds_tcp_recv_init();
if (ret)
- goto out_slab;
+ goto out_pernet;
ret = rds_trans_register(&rds_tcp_transport);
if (ret)
out_recv:
rds_tcp_recv_exit();
-out_slab:
+out_pernet:
unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
void rds_tcp_tune(struct socket *sock);
void rds_tcp_nonagle(struct socket *sock);
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc);
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
void rds_tcp_xmit_prepare(struct rds_connection *conn);
void rds_tcp_xmit_complete(struct rds_connection *conn);
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off);
+ unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_tcp_write_space(struct sock *sk);
/* tcp_stats.c */
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
- switch(sk->sk_state) {
- /* ignore connecting sockets as they make progress */
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- break;
- case TCP_ESTABLISHED:
- rds_connect_complete(conn);
- break;
- case TCP_CLOSE_WAIT:
- case TCP_CLOSE:
- rds_conn_drop(conn);
- default:
- break;
+ switch (sk->sk_state) {
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+ break;
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSE:
+ rds_conn_drop(conn);
+ default:
+ break;
}
out:
read_unlock_bh(&sk->sk_callback_lock);
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp = NULL;
int conn_state;
- struct sock *nsk;
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
!conn->c_outgoing) {
goto rst_nsk;
} else {
- atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
- wait_event(conn->c_waitq,
- !test_bit(RDS_IN_XMIT, &conn->c_flags));
- rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+ rds_tcp_reset_callbacks(new_sock, conn);
conn->c_outgoing = 0;
+ /* rds_connect_path_complete() marks RDS_CONN_UP */
+ rds_connect_path_complete(conn, RDS_CONN_RESETTING);
}
+ } else {
+ rds_tcp_set_callbacks(new_sock, conn);
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
}
- rds_tcp_set_callbacks(new_sock, conn);
- rds_connect_complete(conn); /* marks RDS_CONN_UP */
new_sock = NULL;
ret = 0;
goto out;
rst_nsk:
/* reset the newly returned accept sock and bail */
- nsk = new_sock->sk;
- rds_tcp_stats_inc(s_tcp_listen_closed_stale);
- nsk->sk_user_data = NULL;
- nsk->sk_prot->disconnect(nsk, 0);
- tcp_done(nsk);
- new_sock = NULL;
+ kernel_sock_shutdown(new_sock, SHUT_RDWR);
ret = 0;
out:
if (rs_tcp)
while (left) {
if (!tinc) {
tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
- arg->gfp);
+ arg->gfp);
if (!tinc) {
desc->error = -ENOMEM;
goto out;
static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
{
struct kvec vec = {
- .iov_base = data,
- .iov_len = len,
+ .iov_base = data,
+ .iov_len = len,
+ };
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
};
- struct msghdr msg = {
- .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
- };
return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
}
/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
- unsigned int hdr_off, unsigned int sg, unsigned int off)
+ unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_tcp_connection *tc = conn->c_transport_data;
int done = 0;
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
struct workqueue_struct *rds_wq;
EXPORT_SYMBOL_GPL(rds_wq);
-void rds_connect_complete(struct rds_connection *conn)
+void rds_connect_path_complete(struct rds_connection *conn, int curr)
{
- if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
+ if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
printk(KERN_WARNING "%s: Cannot transition to state UP, "
"current state is %d\n",
__func__,
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
}
+EXPORT_SYMBOL_GPL(rds_connect_path_complete);
+
+void rds_connect_complete(struct rds_connection *conn)
+{
+ rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+}
EXPORT_SYMBOL_GPL(rds_connect_complete);
/*
rds_info_iter_unmap(iter);
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++)
- {
+ for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
if (!trans || !trans->stats_info_copy)
continue;
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sock_queue_rcv_skb(sk, skb) == 0) {
+ if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ __sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
/* pin the cipher we need so that the crypto layer doesn't invoke
* keventd to go get it */
rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(rxkad_ci))
- return PTR_ERR(rxkad_ci);
- return 0;
+ return PTR_ERR_OR_ZERO(rxkad_ci);
}
/*
nla_nest_end(skb, nest);
ret = skb->len;
} else
- nla_nest_cancel(skb, nest);
+ nlmsg_trim(skb, b);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
if (NETLINK_CB(cb->skb).portid && ret)
}
EXPORT_SYMBOL_GPL(ife_get_meta_u16);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
{
- mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+ mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
if (!mi->metaval)
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
{
- mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+ mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
if (!mi->metaval)
return -ENOMEM;
}
/* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
*/
static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
- void *val, int len)
+ void *val, int len, bool exists)
{
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
if (!ops) {
ret = -ENOENT;
#ifdef CONFIG_MODULES
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
rtnl_unlock();
request_module("ifemeta%u", metaid);
rtnl_lock();
- spin_lock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid);
#endif
}
}
/* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
*/
static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
- int len)
+ int len, bool atomic)
{
struct tcf_meta_info *mi = NULL;
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
if (!ops)
return -ENOENT;
- mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+ mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
if (!mi) {
/*put back what find_ife_oplist took */
module_put(ops->owner);
mi->metaid = metaid;
mi->ops = ops;
if (len > 0) {
- ret = ops->alloc(mi, metaval);
+ ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
if (ret != 0) {
kfree(mi);
module_put(ops->owner);
int rc = 0;
int installed = 0;
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
- rc = add_metainfo(ife, o->metaid, NULL, 0);
+ rc = add_metainfo(ife, o->metaid, NULL, 0, true);
if (rc == 0)
installed += 1;
}
+ read_unlock(&ife_mod_lock);
if (installed)
return 0;
spin_unlock_bh(&ife->tcf_lock);
}
-/* under ife->tcf_lock */
-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+/* under ife->tcf_lock for existing action */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ bool exists)
{
int len = 0;
int rc = 0;
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len);
+ rc = load_metaops_and_vet(ife, i, val, len, exists);
if (rc != 0)
return rc;
- rc = add_metainfo(ife, i, val, len);
+ rc = add_metainfo(ife, i, val, len, exists);
if (rc)
return rc;
}
saddr = nla_data(tb[TCA_IFE_SMAC]);
}
- spin_lock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action;
if (parm->flags & IFE_ENCODE) {
if (ret == ACT_P_CREATED)
_tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
return err;
}
- err = populate_metalist(ife, tb2);
+ err = populate_metalist(ife, tb2, exists);
if (err)
goto metadata_parse_err;
if (ret == ACT_P_CREATED)
_tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
return err;
}
}
- spin_unlock_bh(&ife->tcf_lock);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(tn, a);
}
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
- if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
+ if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+ if (exists)
+ tcf_hash_release(a, bind);
return -EINVAL;
+ }
- if (!tcf_hash_check(tn, index, a, bind)) {
+ if (!exists) {
ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
false);
if (ret)
if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
- skb_push(skb2, skb->mac_len);
+ skb_push_rcsum(skb2, skb->mac_len);
}
/* mirror is always swallowed */
bool peak_present;
};
#define to_police(pc) \
- container_of(pc, struct tcf_police, common)
+ container_of(pc->priv, struct tcf_police, common)
#define POL_TAB_MASK 15
struct nlattr *est, struct tc_action *a,
int ovr, int bind)
{
- unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
- struct tcf_hashinfo *hinfo = tn->hinfo;
int size;
if (nla == NULL)
if (parm->index) {
if (tcf_hash_search(tn, a, parm->index)) {
- police = to_police(a->priv);
+ police = to_police(a);
if (bind) {
police->tcf_bindcnt += 1;
police->tcf_refcnt += 1;
/* not replacing */
return -EEXIST;
}
+ } else {
+ ret = tcf_hash_create(tn, parm->index, NULL, a,
+ sizeof(*police), bind, false);
+ if (ret)
+ return ret;
+ ret = ACT_P_CREATED;
}
- police = kzalloc(sizeof(*police), GFP_KERNEL);
- if (police == NULL)
- return -ENOMEM;
- ret = ACT_P_CREATED;
- police->tcf_refcnt = 1;
- spin_lock_init(&police->tcf_lock);
- if (bind)
- police->tcf_bindcnt = 1;
+ police = to_police(a);
override:
if (parm->rate.rate) {
err = -ENOMEM;
return ret;
police->tcfp_t_c = ktime_get_ns();
- police->tcf_index = parm->index ? parm->index :
- tcf_hash_new_index(tn);
- police->tcf_tm.install = jiffies;
- police->tcf_tm.lastuse = jiffies;
- h = tcf_hash(police->tcf_index, POL_TAB_MASK);
- spin_lock_bh(&hinfo->lock);
- hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
- spin_unlock_bh(&hinfo->lock);
+ tcf_hash_insert(tn, a);
- a->priv = police;
return ret;
failure_unlock:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
if (ret == ACT_P_CREATED)
- kfree(police);
+ tcf_hash_cleanup(a, est);
return err;
}
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
- if (!tc_should_offload(dev, 0))
+ if (!tc_should_offload(dev, tp, 0))
return;
offload.command = TC_CLSFLOWER_DESTROY;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
- if (!tc_should_offload(dev, flags))
+ if (!tc_should_offload(dev, tp, flags))
return;
offload.command = TC_CLSFLOWER_REPLACE;
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
- if (!tc_should_offload(dev, 0))
+ if (!tc_should_offload(dev, tp, 0))
return;
offload.command = TC_CLSFLOWER_STATS;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, 0)) {
+ if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
struct tc_to_netdev offload;
int err;
+ if (!tc_should_offload(dev, tp, flags))
+ return tc_skip_sw(flags) ? -EINVAL : 0;
+
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, flags)) {
- offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
- offload.cls_u32->hnode.divisor = h->divisor;
- offload.cls_u32->hnode.handle = h->handle;
- offload.cls_u32->hnode.prio = h->prio;
+ offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
+ offload.cls_u32->hnode.divisor = h->divisor;
+ offload.cls_u32->hnode.handle = h->handle;
+ offload.cls_u32->hnode.prio = h->prio;
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
- if (tc_skip_sw(flags))
- return err;
- }
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->protocol, &offload);
+ if (tc_skip_sw(flags))
+ return err;
return 0;
}
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, 0)) {
+ if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
- if (tc_should_offload(dev, flags)) {
- offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
- offload.cls_u32->knode.handle = n->handle;
- offload.cls_u32->knode.fshift = n->fshift;
+ if (!tc_should_offload(dev, tp, flags))
+ return tc_skip_sw(flags) ? -EINVAL : 0;
+
+ offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
+ offload.cls_u32->knode.handle = n->handle;
+ offload.cls_u32->knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
- offload.cls_u32->knode.val = n->val;
- offload.cls_u32->knode.mask = n->mask;
+ offload.cls_u32->knode.val = n->val;
+ offload.cls_u32->knode.mask = n->mask;
#else
- offload.cls_u32->knode.val = 0;
- offload.cls_u32->knode.mask = 0;
+ offload.cls_u32->knode.val = 0;
+ offload.cls_u32->knode.mask = 0;
#endif
- offload.cls_u32->knode.sel = &n->sel;
- offload.cls_u32->knode.exts = &n->exts;
- if (n->ht_down)
- offload.cls_u32->knode.link_handle = n->ht_down->handle;
-
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
- tp->protocol, &offload);
- if (tc_skip_sw(flags))
- return err;
- }
+ offload.cls_u32->knode.sel = &n->sel;
+ offload.cls_u32->knode.exts = &n->exts;
+ if (n->ht_down)
+ offload.cls_u32->knode.link_handle = n->ht_down->handle;
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->protocol, &offload);
+ if (tc_skip_sw(flags))
+ return err;
return 0;
}
if (tb[TCA_U32_FLAGS]) {
flags = nla_get_u32(tb[TCA_U32_FLAGS]);
if (!tc_flags_valid(flags))
- return err;
+ return -EINVAL;
}
n = (struct tc_u_knode *)*arg;
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
+
+ err = u32_replace_hw_hnode(tp, ht, flags);
+ if (err) {
+ kfree(ht);
+ return err;
+ }
+
RCU_INIT_POINTER(ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht;
- u32_replace_hw_hnode(tp, ht, flags);
return 0;
}
cl->deficit = cl->quantum;
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return err;
}
bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
if (cl->qdisc->ops->drop) {
len = cl->qdisc->ops->drop(cl->qdisc);
if (len > 0) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
qdisc_reset(cl->qdisc);
}
}
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
+ unsigned int prev_backlog;
+
if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch);
+ prev_backlog = sch->qstats.backlog;
/* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q);
qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch);
+ qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
return NET_XMIT_CN;
}
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
+ unsigned int pkt_len;
bool memory_limited;
idx = fq_codel_classify(skb, sch, &ret);
prev_backlog = sch->qstats.backlog;
prev_qlen = sch->q.qlen;
+ /* save this packet length as it might be dropped by fq_codel_drop() */
+ pkt_len = qdisc_pkt_len(skb);
/* fq_codel_drop() is quite expensive, as it performs a linear search
* in q->backlogs[] to find a fat flow.
* So instead of dropping a single packet, drop half of its backlog
*/
ret = fq_codel_drop(sch, q->drop_batch_size);
- q->drop_overlimit += prev_qlen - sch->q.qlen;
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ q->drop_overlimit += prev_qlen;
if (memory_limited)
- q->drop_overmemory += prev_qlen - sch->q.qlen;
- /* As we dropped packet(s), better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ q->drop_overmemory += prev_qlen;
- return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ /* As we dropped packet(s), better let upper stack know this.
+ * If we dropped a packet for this flow, return NET_XMIT_CN,
+ * but in this case, our parents wont increase their backlogs.
+ */
+ if (ret == idx) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - pkt_len);
+ return NET_XMIT_CN;
+ }
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+ return NET_XMIT_SUCCESS;
}
/* This is the specific function called from codel_dequeue()
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
}
- if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
{
q->gso_skb = skb;
q->qstats.requeues++;
+ qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
+ qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
} else
skb = NULL;
q->eligible = RB_ROOT;
INIT_LIST_HEAD(&q->droplist);
qdisc_watchdog_cancel(&q->watchdog);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
struct hfsc_sched *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt;
- struct hfsc_class *cl;
- unsigned int i;
-
- sch->qstats.backlog = 0;
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
- sch->qstats.backlog += cl->qdisc->qstats.backlog;
- }
qopt.defcls = q->defcls;
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
cl->qstats.drops++;
qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
struct htb_sched *q = container_of(work, struct htb_sched, work);
struct Qdisc *sch = q->watchdog.qdisc;
+ rcu_read_lock();
__netif_schedule(qdisc_root(sch));
+ rcu_read_unlock();
}
static int htb_init(struct Qdisc *sch, struct nlattr *opt)
if (!cl->level && cl->un.leaf.q)
qlen = cl->un.leaf.q->q.qlen;
- cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
- cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
+ cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
+ INT_MIN, INT_MAX);
+ cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
+ INT_MIN, INT_MAX);
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
return TC_H_MIN(classid) + 1;
}
+static bool ingress_cl_offload(u32 classid)
+{
+ return true;
+}
+
static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = ingress_find_tcf,
+ .tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
};
}
}
+static bool clsact_cl_offload(u32 classid)
+{
+ return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
+}
+
static unsigned long clsact_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = clsact_find_tcf,
+ .tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put,
};
#endif
if (q->qdisc) {
+ unsigned int pkt_len = qdisc_pkt_len(skb);
int err = qdisc_enqueue(skb, q->qdisc);
- if (unlikely(err != NET_XMIT_SUCCESS)) {
- if (net_xmit_drop_count(err)) {
- qdisc_qstats_drop(sch);
- qdisc_tree_reduce_backlog(sch, 1,
- qdisc_pkt_len(skb));
- }
+ if (err != NET_XMIT_SUCCESS &&
+ net_xmit_drop_count(err)) {
+ qdisc_qstats_drop(sch);
+ qdisc_tree_reduce_backlog(sch, 1,
+ pkt_len);
}
goto tfifo_dequeue;
}
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
if (skb) {
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
for (prio = q->bands-1; prio >= 0; prio--) {
qdisc = q->queues[prio];
if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
struct prio_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *queues[TCQ_PRIO_BANDS];
+ int oldbands = q->bands, i;
struct tc_prio_qopt *qopt;
- int i;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
return -EINVAL;
}
+ /* Before commit, make sure we can allocate all new qdiscs */
+ for (i = oldbands; i < qopt->bands; i++) {
+ queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, i + 1));
+ if (!queues[i]) {
+ while (i > oldbands)
+ qdisc_destroy(queues[--i]);
+ return -ENOMEM;
+ }
+ }
+
sch_tree_lock(sch);
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
+ for (i = q->bands; i < oldbands; i++) {
struct Qdisc *child = q->queues[i];
- q->queues[i] = &noop_qdisc;
- if (child != &noop_qdisc) {
- qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
- qdisc_destroy(child);
- }
- }
- sch_tree_unlock(sch);
- for (i = 0; i < q->bands; i++) {
- if (q->queues[i] == &noop_qdisc) {
- struct Qdisc *child, *old;
-
- child = qdisc_create_dflt(sch->dev_queue,
- &pfifo_qdisc_ops,
- TC_H_MAKE(sch->handle, i + 1));
- if (child) {
- sch_tree_lock(sch);
- old = q->queues[i];
- q->queues[i] = child;
-
- if (old != &noop_qdisc) {
- qdisc_tree_reduce_backlog(old,
- old->q.qlen,
- old->qstats.backlog);
- qdisc_destroy(old);
- }
- sch_tree_unlock(sch);
- }
- }
+ qdisc_tree_reduce_backlog(child, child->q.qlen,
+ child->qstats.backlog);
+ qdisc_destroy(child);
}
+
+ for (i = oldbands; i < q->bands; i++)
+ q->queues[i] = queues[i];
+
+ sch_tree_unlock(sch);
return 0;
}
static int prio_init(struct Qdisc *sch, struct nlattr *opt)
{
- struct prio_sched_data *q = qdisc_priv(sch);
- int i;
-
- for (i = 0; i < TCQ_PRIO_BANDS; i++)
- q->queues[i] = &noop_qdisc;
-
- if (opt == NULL) {
+ if (!opt)
return -EINVAL;
- } else {
- int err;
- if ((err = prio_tune(sch, opt)) != 0)
- return err;
- }
- return 0;
+ return prio_tune(sch, opt);
}
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
qdisc_pkt_len(skb));
- if (err)
- return err;
+ if (err) {
+ cl->qstats.drops++;
+ return qdisc_drop(skb, sch);
+ }
}
err = qdisc_enqueue(skb, cl->qdisc);
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
skb = child->dequeue(child);
if (skb) {
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
if (!red_is_idling(&q->vars))
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++;
qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
struct red_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
red_restart(&q->vars);
}
return ret;
}
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
unsigned int len = 0;
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
qdisc_qstats_drop(sch);
}
q->t_c = now;
q->tokens = toks;
q->ptokens = ptoks;
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
q->t_c = ktime_get_ns();
q->tokens = q->buffer;
struct sctp_ep_common *rcvr;
struct sctp_transport *transport = NULL;
struct sctp_chunk *chunk;
- struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
if (skb_linearize(skb))
goto discard_it;
- sh = sctp_hdr(skb);
-
/* Pull up the IP and SCTP headers. */
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
chunk->rcvr = rcvr;
/* Remember the SCTP header. */
- chunk->sctp_hdr = sh;
+ chunk->sctp_hdr = sctp_hdr(skb);
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk, &src, &dest);
#include <linux/sock_diag.h>
#include <net/sctp/sctp.h>
-extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
- struct sock *sk);
-extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
- struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
-
static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *info);
return ERR_PTR(err);
}
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
struct rpc_xprt *xprt)
{
struct rpc_clnt *clnt = NULL;
struct rpc_xprt_switch *xps;
- xps = xprt_switch_alloc(xprt, GFP_KERNEL);
- if (xps == NULL)
- return ERR_PTR(-ENOMEM);
-
+ if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
+ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ xps = args->bc_xprt->xpt_bc_xps;
+ xprt_switch_get(xps);
+ } else {
+ xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+ if (xps == NULL) {
+ xprt_put(xprt);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (xprt->bc_xprt) {
+ xprt_switch_get(xps);
+ xprt->bc_xprt->xpt_bc_xps = xps;
+ }
+ }
clnt = rpc_new_client(args, xps, xprt, NULL);
if (IS_ERR(clnt))
return clnt;
return clnt;
}
-EXPORT_SYMBOL_GPL(rpc_create_xprt);
/**
* rpc_create - create an RPC client and transport with one call
};
char servername[48];
+ if (args->bc_xprt) {
+ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ xprt = args->bc_xprt->xpt_bc_xprt;
+ if (xprt) {
+ xprt_get(xprt);
+ return rpc_create_xprt(args, xprt);
+ }
+ }
+
if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
/* See comment on corresponding get in xs_setup_bc_tcp(): */
if (xprt->xpt_bc_xprt)
xprt_put(xprt->xpt_bc_xprt);
+ if (xprt->xpt_bc_xps)
+ xprt_switch_put(xprt->xpt_bc_xps);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
return xprt;
args->bc_xprt->xpt_bc_xprt = NULL;
+ args->bc_xprt->xpt_bc_xps = NULL;
xprt_put(xprt);
ret = ERR_PTR(-EINVAL);
out_err:
return 0;
}
+/* tipc_bearer_reset_all - reset all links on all bearers
+ */
+void tipc_bearer_reset_all(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = rcu_dereference_rtnl(tn->bearer_list[i]);
+ if (b)
+ tipc_reset_bearer(net, b);
+ }
+}
+
/**
* bearer_disable
*
return 0;
/* Send RESET message even if bearer is detached from device */
- tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+ tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
goto drop;
void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
struct tipc_media *tipc_media_find(const char *name);
+void tipc_bearer_reset_all(struct net *net);
int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
u16 ack = snd_l->snd_nxt - 1;
snd_l->ackers--;
+ rcv_l->bc_peer_is_up = true;
+ rcv_l->state = LINK_ESTABLISHED;
tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
tipc_link_reset(rcv_l);
rcv_l->state = LINK_RESET;
*/
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- int mtyp, rc = 0;
+ int mtyp = 0;
+ int rc = 0;
bool state = false;
bool probe = false;
bool setup = false;
if (!msg_peer_node_is_up(hdr))
return;
- l->bc_peer_is_up = true;
+ /* Open when peer ackowledges our bcast init msg (pkt #1) */
+ if (msg_ack(hdr))
+ l->bc_peer_is_up = true;
+
+ if (!l->bc_peer_is_up)
+ return;
/* Ignore if peers_snd_nxt goes beyond receive window */
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
#include "name_table.h"
#define MAX_FORWARD_SIZE 1024
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_TAILROOM 16
static unsigned int align(unsigned int i)
{
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
+ if (skb_cloned(_skb) &&
+ pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+ goto exit;
+
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
msg_set_origport(hdr, msg_destport(&ohdr));
#define TIPC_MEDIA_INFO_OFFSET 5
-/**
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- * are word aligned for quicker access
- */
-#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-
struct tipc_skb_cb {
void *handle;
struct sk_buff *tail;
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
- strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+ nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));
rc = tipc_bcast_rcv(net, be->link, skb);
- /* Broadcast link reset may happen at reassembly failure */
- if (rc & TIPC_LINK_DOWN_EVT)
- tipc_node_reset_links(n);
-
/* Broadcast ACKs are sent on a unicast link */
if (rc & TIPC_LINK_SND_BC_ACK) {
tipc_node_read_lock(n);
spin_unlock_bh(&be->inputq2.lock);
tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
}
+
+ if (rc & TIPC_LINK_DOWN_EVT) {
+ /* Reception reassembly failure => reset all links to peer */
+ if (!tipc_link_is_up(be->link))
+ tipc_node_reset_links(n);
+
+ /* Retransmission failure => reset all links to all peers */
+ if (!tipc_link_is_up(tipc_bc_sndlink(net)))
+ tipc_bearer_reset_all(net);
+ }
+
tipc_node_put(n);
}
* @tsk: receiving socket
* @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
+ u32 onode = tsk_own_node(tsk);
struct tipc_msg *hdr = buf_msg(skb);
int mtyp = msg_type(hdr);
bool conn_cong;
if (mtyp == CONN_PROBE) {
msg_set_type(hdr, CONN_PROBE_REPLY);
- tipc_sk_respond(sk, skb, TIPC_OK);
+ if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+ __skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
*
* Returns true if message was added to socket receive queue, otherwise false
*/
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
int usr = msg_user(hdr);
if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
- tipc_sk_proto_rcv(tsk, skb);
+ tipc_sk_proto_rcv(tsk, skb, xmitq);
return false;
}
return true;
reject:
- tipc_sk_respond(sk, skb, err);
+ if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+ __skb_queue_tail(xmitq, skb);
return false;
}
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
unsigned int truesize = skb->truesize;
+ struct sk_buff_head xmitq;
+ u32 dnode, selector;
- if (likely(filter_rcv(sk, skb)))
+ __skb_queue_head_init(&xmitq);
+
+ if (likely(filter_rcv(sk, skb, &xmitq))) {
atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+ return 0;
+ }
+
+ if (skb_queue_empty(&xmitq))
+ return 0;
+
+ /* Send response/rejected message */
+ skb = __skb_dequeue(&xmitq);
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
return 0;
}
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport)
+ u32 dport, struct sk_buff_head *xmitq)
{
+ unsigned long time_limit = jiffies + 2;
+ struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
- struct sk_buff *skb;
- unsigned long time_limit = jiffies + 2;
+ u32 onode;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- filter_rcv(sk, skb);
+ filter_rcv(sk, skb, xmitq);
continue;
}
continue;
/* Overload => reject message back to sender */
- tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ onode = tipc_own_addr(sock_net(sk));
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ __skb_queue_tail(xmitq, skb);
break;
}
}
*/
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
+ struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
+ __skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- tipc_sk_enqueue(inputq, sk, dport);
+ tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
+ /* Send pending response/rejected messages, if any */
+ while ((skb = __skb_dequeue(&xmitq))) {
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
+ }
sock_put(sk);
continue;
}
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
- if (dentry && d_backing_inode(dentry) == i) {
+ if (dentry && d_real_inode(dentry) == i) {
sock_hold(s);
goto found;
}
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
- inode = d_backing_inode(path.dentry);
+ inode = d_real_inode(path.dentry);
err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
- hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
u->path = u_path;
list = &unix_socket_table[hash];
* function will also cleanup rejected sockets, those that reach the connected
* state but leave it before they have been accepted.
*
+ * - Lock ordering for pending or accept queue sockets is:
+ *
+ * lock_sock(listener);
+ * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
+ *
+ * Using explicit nested locking keeps lockdep happy since normally only one
+ * lock of a given class may be taken at a time.
+ *
* - Sockets created by user action will be cleaned up when the user process
* calls close(2), causing our release implementation to be called. Our release
* implementation will perform some cleanup then drop the last reference so our
cleanup = true;
lock_sock(listener);
- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (vsock_is_pending(sk)) {
vsock_remove_pending(listener, sk);
if (connected) {
listener->sk_ack_backlog--;
- lock_sock(connected);
+ lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
vconnected = vsock_sk(connected);
/* If the listener socket has received an error, then we should
WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
- WARN_ON(ops->set_tx_power && !ops->get_tx_power);
- WARN_ON(ops->set_antenna && !ops->get_antenna);
alloc_size = sizeof(*rdev) + sizeof_priv;
params.smps_mode = NL80211_SMPS_OFF;
}
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
+ return -EOPNOTSUPP;
+
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
params.acl = parse_acl_data(&rdev->wiphy, info);
if (IS_ERR(params.acl))
return PTR_ERR(params.acl);
}
- params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
- if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
- return -EOPNOTSUPP;
-
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, ¶ms);
if (!err) {
* replace EtherType */
hdrlen += ETH_ALEN + 2;
else
- tmp.h_proto = htons(skb->len);
+ tmp.h_proto = htons(skb->len - hdrlen);
pskb_pull(skb, hdrlen);
* alignment since sizeof(struct ethhdr) is 14.
*/
frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+ if (!frame)
+ return NULL;
skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
- if (dev->netdev_ops->ndo_do_ioctl)
- return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+ if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+ int ret = 0;
+ struct iwreq iwr_lcl;
+ struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+ memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+ iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+ iwr_lcl.u.data.length = iwp_compat->length;
+ iwr_lcl.u.data.flags = iwp_compat->flags;
+
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+ iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+ iwp_compat->length = iwr_lcl.u.data.length;
+ iwp_compat->flags = iwr_lcl.u.data.flags;
+
+ return ret;
+ } else
+#endif
+ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ }
return -EOPNOTSUPP;
}
*.pyc
*.pyo
+constants.py
$(CPP) -E -x c -P $(c_flags) $< > $@ ;\
sed -i '1,/<!-- end-c-headers -->/d;' $@
-$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
- $(call if_changed,gen_constants_py)
+targets += constants.py
+$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
+ $(call if_changed_dep,gen_constants_py)
build_constants_py: $(obj)/constants.py
+ @:
clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
#include <linux/fs.h>
#include <linux/mount.h>
-#include <linux/radix-tree.h>
/* We need to stringify expanded macros so that they can be parsed */
LX_VALUE(MNT_NOATIME)
LX_VALUE(MNT_NODIRATIME)
LX_VALUE(MNT_RELATIME)
-
-/* linux/radix-tree.h */
-LX_VALUE(RADIX_TREE_INDIRECT_PTR)
-LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
-LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
-LX_GDBPARSED(RADIX_TREE_MAP_MASK)
+++ /dev/null
-#
-# gdb helper commands and functions for Linux kernel debugging
-#
-# Radix Tree Parser
-#
-# Copyright (c) 2016 Linaro Ltd
-#
-# Authors:
-# Kieran Bingham <kieran.bingham@linaro.org>
-#
-# This work is licensed under the terms of the GNU GPL version 2.
-#
-
-import gdb
-
-from linux import utils
-from linux import constants
-
-radix_tree_root_type = utils.CachedType("struct radix_tree_root")
-radix_tree_node_type = utils.CachedType("struct radix_tree_node")
-
-
-def is_indirect_ptr(node):
- long_type = utils.get_long_type()
- return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
-
-
-def indirect_to_ptr(node):
- long_type = utils.get_long_type()
- node_type = node.type
- indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
- return indirect_ptr.cast(node_type)
-
-
-def maxindex(height):
- height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
- return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
-
-
-def lookup(root, index):
- if root.type == radix_tree_root_type.get_type().pointer():
- root = root.dereference()
- elif root.type != radix_tree_root_type.get_type():
- raise gdb.GdbError("Must be struct radix_tree_root not {}"
- .format(root.type))
-
- node = root['rnode']
- if node is 0:
- return None
-
- if not (is_indirect_ptr(node)):
- if (index > 0):
- return None
- return node
-
- node = indirect_to_ptr(node)
-
- height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
- if (index > maxindex(height)):
- return None
-
- shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
-
- while True:
- new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
- slot = node['slots'][new_index]
-
- node = slot.cast(node.type.pointer()).dereference()
- if node is 0:
- return None
-
- shift -= constants.LX_RADIX_TREE_MAP_SHIFT
- height -= 1
-
- if (height <= 0):
- break
-
- return node
-
-
-class LxRadixTree(gdb.Function):
- """ Lookup and return a node from a RadixTree.
-
-$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
-If index is omitted, the root node is dereferenced and returned."""
-
- def __init__(self):
- super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
-
- def invoke(self, root, index=0):
- result = lookup(root, index)
- if result is None:
- raise gdb.GdbError("No entry in tree at index {}".format(index))
-
- return result
-
-LxRadixTree()
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
- self.module_paths = arg.split()
+ self.module_paths = [os.path.expanduser(p) for p in arg.split()]
self.module_paths.append(os.getcwd())
# enforce update
import linux.lists
import linux.proc
import linux.constants
- import linux.radixtree
len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
(*type)[0] ? *type : "*");
- if (compatible[0])
+ if ((*compatible)[0])
sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
*compatible);
{
struct common_audit_data sa;
struct apparmor_audit_data aad = {0,};
- char *command, *args = value;
+ char *command, *largs = NULL, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
- /* args points to a PAGE_SIZE buffer, AppArmor requires that
- * the buffer must be null terminated or have size <= PAGE_SIZE -1
- * so that AppArmor can null terminate them
- */
- if (args[size - 1] != '\0') {
- if (size == PAGE_SIZE)
- return -EINVAL;
- args[size] = '\0';
- }
-
/* task can only write its own attributes */
if (current != task)
return -EACCES;
- args = value;
+ /* AppArmor requires that the buffer must be null terminated atm */
+ if (args[size - 1] != '\0') {
+ /* null terminate */
+ largs = args = kmalloc(size + 1, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ memcpy(args, value, size);
+ args[size] = '\0';
+ }
+
+ error = -EINVAL;
args = strim(args);
command = strsep(&args, " ");
if (!args)
- return -EINVAL;
+ goto out;
args = skip_spaces(args);
if (!*args)
- return -EINVAL;
+ goto out;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
goto fail;
} else
/* only support the "current" and "exec" process attributes */
- return -EINVAL;
+ goto fail;
if (!error)
error = size;
+out:
+ kfree(largs);
return error;
fail:
aad.profile = aa_current_profile();
aad.op = OP_SETPROCATTR;
aad.info = name;
- aad.error = -EINVAL;
+ aad.error = error = -EINVAL;
aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
- return -EINVAL;
+ goto out;
}
static int apparmor_task_setrlimit(struct task_struct *task,
mutex_unlock(&key_construction_mutex);
- if (keyring)
+ if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (snd_BUG_ON(!card || !id))
return;
+ if (card->shutdown)
+ return;
read_lock(&card->ctl_files_rwlock);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
card->mixer_oss_change_count++;
}
EXPORT_SYMBOL(snd_pcm_new_internal);
+static void free_chmap(struct snd_pcm_str *pstr)
+{
+ if (pstr->chmap_kctl) {
+ snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
+ pstr->chmap_kctl = NULL;
+ }
+}
+
static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
{
struct snd_pcm_substream *substream, *substream_next;
kfree(setup);
}
#endif
+ free_chmap(pstr);
if (pstr->substream_count)
put_device(&pstr->dev);
}
for (cidx = 0; cidx < 2; cidx++) {
if (!pcm->internal)
snd_unregister_device(&pcm->streams[cidx].dev);
- if (pcm->streams[cidx].chmap_kctl) {
- snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
- pcm->streams[cidx].chmap_kctl = NULL;
- }
+ free_chmap(&pcm->streams[cidx]);
}
mutex_unlock(&pcm->open_mutex);
mutex_unlock(®ister_mutex);
qhead = tu->qhead++;
tu->qhead %= tu->queue_size;
+ tu->qused--;
spin_unlock_irq(&tu->qlock);
if (tu->tread) {
}
spin_lock_irq(&tu->qlock);
- tu->qused--;
if (err < 0)
goto _error;
result += unit;
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
{
+ hrtimer_cancel(&dpcm->timer);
tasklet_kill(&dpcm->tasklet);
}
err = reg_raw_write(codec, reg, val);
if (err == -EAGAIN) {
err = snd_hdac_power_up_pm(codec);
- if (!err)
+ if (err >= 0)
err = reg_raw_write(codec, reg, val);
snd_hdac_power_down_pm(codec);
}
err = reg_raw_read(codec, reg, val, uncached);
if (err == -EAGAIN) {
err = snd_hdac_power_up_pm(codec);
- if (!err)
+ if (err >= 0)
err = reg_raw_read(codec, reg, val, uncached);
snd_hdac_power_down_pm(codec);
}
int page, p, pp, delta, i;
page =
- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
- WT_SUBBUF_MASK)
- >> WT_SUBBUF_SHIFT;
+ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
+ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
if (dma->nr_periods >= 4)
delta = (page - dma->period_real) & 3;
else {
u32 pipe_alloc_mask;
int err;
- commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
+ commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
if (commpage_bak == NULL)
return -ENOMEM;
commpage = chip->comm_page;
- memcpy(commpage_bak, commpage, sizeof(struct comm_page));
+ memcpy(commpage_bak, commpage, sizeof(*commpage));
err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
if (err < 0) {
for (n = 0; n < spec->paths.used; n++) {
path = snd_array_elem(&spec->paths, n);
+ if (!path->depth)
+ continue;
if (path->path[0] == nid ||
path->path[path->depth - 1] == nid) {
bool pin_old = path->pin_enabled;
#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
+#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
if (use_vga_switcheroo(hda)) {
if (chip->disabled && hda->probe_continued)
snd_hda_unlock_devices(&chip->bus);
- if (hda->vga_switcheroo_registered)
+ if (hda->vga_switcheroo_registered) {
vga_switcheroo_unregister_client(chip->pci);
+ vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
+ }
}
if (bus->chip_init) {
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ /* Kabylake */
+ { PCI_DEVICE(0x8086, 0xa171),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ /* Kabylake-LP */
+ { PCI_DEVICE(0x8086, 0x9d71),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ /* Kabylake-H */
+ { PCI_DEVICE(0x8086, 0xa2f0),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x157a),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ { PCI_DEVICE(0x1002, 0x15b3),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x793b),
.driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
{ PCI_DEVICE(0x1002, 0x7919),
/*
* Register access ops. Tegra HDA register access is DWORD only.
*/
-static void hda_tegra_writel(u32 value, u32 *addr)
+static void hda_tegra_writel(u32 value, u32 __iomem *addr)
{
writel(value, addr);
}
-static u32 hda_tegra_readl(u32 *addr)
+static u32 hda_tegra_readl(u32 __iomem *addr)
{
return readl(addr);
}
-static void hda_tegra_writew(u16 value, u16 *addr)
+static void hda_tegra_writew(u16 value, u16 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
writel(v, dword_addr);
}
-static u16 hda_tegra_readw(u16 *addr)
+static u16 hda_tegra_readw(u16 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
return (v >> shift) & 0xffff;
}
-static void hda_tegra_writeb(u8 value, u8 *addr)
+static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
writel(v, dword_addr);
}
-static u8 hda_tegra_readb(u8 *addr)
+static u8 hda_tegra_readb(u8 __iomem *addr)
{
unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
- void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+ void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
u32 v;
v = readl(dword_addr);
case 0x10ec0234:
case 0x10ec0274:
case 0x10ec0294:
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
alc_update_coef_idx(codec, 0x10, 1<<15, 0);
break;
case 0x10ec0662:
ALC269_TYPE_ALC256,
ALC269_TYPE_ALC225,
ALC269_TYPE_ALC294,
+ ALC269_TYPE_ALC700,
};
/*
case ALC269_TYPE_ALC256:
case ALC269_TYPE_ALC225:
case ALC269_TYPE_ALC294:
+ case ALC269_TYPE_ALC700:
ssids = alc269_ssids;
break;
default:
static void alc_headset_mode_unplugged(struct hda_codec *codec)
{
static struct coef_fw coef0255[] = {
- WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
{}
};
+ static struct coef_fw coef0255_1[] = {
+ WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+ {}
+ };
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x1b, 0x0c0b),
WRITE_COEF(0x45, 0xc429),
switch (codec->core.vendor_id) {
case 0x10ec0255:
+ alc_process_coef_fw(codec, coef0255_1);
+ alc_process_coef_fw(codec, coef0255);
+ break;
case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
{}
};
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+ WRITE_COEF(0x1b, 0x0c6b),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x45, 0xd429),
WRITE_COEF(0x1b, 0x0c2b),
switch (codec->core.vendor_id) {
case 0x10ec0255:
- case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0256);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
{}
};
+ static struct coef_fw coef0256[] = {
+ WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+ WRITE_COEF(0x1b, 0x0c6b),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ {}
+ };
static struct coef_fw coef0233[] = {
WRITE_COEF(0x45, 0xe429),
WRITE_COEF(0x1b, 0x0c2b),
switch (codec->core.vendor_id) {
case 0x10ec0255:
- case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0256);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
static void alc255_set_default_jack_type(struct hda_codec *codec)
{
/* Set to iphone type */
- static struct coef_fw fw[] = {
+ static struct coef_fw alc255fw[] = {
WRITE_COEF(0x1b, 0x880b),
WRITE_COEF(0x45, 0xd089),
WRITE_COEF(0x1b, 0x080b),
WRITE_COEF(0x1b, 0x0c0b),
{}
};
- alc_process_coef_fw(codec, fw);
+ static struct coef_fw alc256fw[] = {
+ WRITE_COEF(0x1b, 0x884b),
+ WRITE_COEF(0x45, 0xd089),
+ WRITE_COEF(0x1b, 0x084b),
+ WRITE_COEF(0x46, 0x0004),
+ WRITE_COEF(0x1b, 0x0c4b),
+ {}
+ };
+ switch (codec->core.vendor_id) {
+ case 0x10ec0255:
+ alc_process_coef_fw(codec, alc255fw);
+ break;
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, alc256fw);
+ break;
+ }
msleep(30);
}
SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
{}
};
#define ALC225_STANDARD_PINS \
- {0x12, 0xb7a60130}, \
{0x21, 0x04211020}
#define ALC256_STANDARD_PINS \
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x14, 0x901701a0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x14, 0x901701b0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60150},
{0x14, 0x901701a0}),
SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60150},
{0x14, 0x901701b0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x1b, 0x90170110}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
{0x12, 0x90a60180},
{0x14, 0x90170130},
{0x21, 0x02211040}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60180},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160},
{0x14, 0x90170120},
{0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60170},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
case 0x10ec0294:
spec->codec_variant = ALC269_TYPE_ALC294;
break;
+ case 0x10ec0700:
+ case 0x10ec0701:
+ case 0x10ec0703:
+ spec->codec_variant = ALC269_TYPE_ALC700;
+ spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+ break;
+
}
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
+ HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
tristate
config SND_SOC_HDMI_CODEC
- tristate
- select SND_PCM_ELD
- select SND_PCM_IEC958
+ tristate
+ select SND_PCM_ELD
+ select SND_PCM_IEC958
+ select HDMI
config SND_SOC_ES8328
tristate "Everest Semi ES8328 CODEC"
.max_register = 0x16,
.reg_defaults = ak4613_reg,
.num_reg_defaults = ARRAY_SIZE(ak4613_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct of_device_id ak4613_of_match[] = {
static struct i2c_driver ak4613_i2c_driver = {
.driver = {
.name = "ak4613-codec",
- .owner = THIS_MODULE,
.of_match_table = ak4613_of_match,
},
.probe = ak4613_i2c_probe,
if (!tty->disc_data)
return -ENODEV;
+ tty->receive_room = 16;
if (tty->ops->write(tty, v253_init, len) != len) {
ret = -EIO;
goto err;
* exit, we call pm_runtime_suspend() so that will do for us
*/
hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ if (!hlink) {
+ dev_err(&edev->hdac.dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_get(edev->ebus, hlink);
ret = create_fill_widget_route_map(dapm);
/* hold the ref while we probe */
hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ if (!hlink) {
+ dev_err(&edev->hdac.dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_get(edev->ebus, hlink);
hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
}
hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+ if (!hlink) {
+ dev_err(dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_put(ebus, hlink);
return 0;
return 0;
hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+ if (!hlink) {
+ dev_err(dev, "hdac link not found\n");
+ return -EIO;
+ }
+
snd_hdac_ext_bus_link_get(ebus, hlink);
err = snd_hdac_display_power(bus, true);
return ret;
if (hcp->hcd.ops->audio_startup) {
- ret = hcp->hcd.ops->audio_startup(dai->dev->parent);
+ ret = hcp->hcd.ops->audio_startup(dai->dev->parent, hcp->hcd.data);
if (ret) {
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
}
if (hcp->hcd.ops->get_eld) {
- ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
- sizeof(hcp->eld));
+ ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->hcd.data,
+ hcp->eld, sizeof(hcp->eld));
if (!ret) {
ret = snd_pcm_hw_constraint_eld(substream->runtime,
WARN_ON(hcp->current_stream != substream);
- hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+ hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
hp.sample_rate = params_rate(params);
hp.channels = params_channels(params);
- return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
- &hp);
+ return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
+ &hcp->daifmt[dai->id], &hp);
}
static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
dev_dbg(dai->dev, "%s()\n", __func__);
if (hcp->hcd.ops->digital_mute)
- return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+ return hcp->hcd.ops->digital_mute(dai->dev->parent,
+ hcp->hcd.data, mute);
return 0;
}
{ 0x2b, 0x5454 },
{ 0x2c, 0xaaa0 },
{ 0x2d, 0x0000 },
- { 0x2f, 0x1002 },
+ { 0x2f, 0x5002 },
{ 0x31, 0x5000 },
{ 0x32, 0x0000 },
{ 0x33, 0x0000 },
RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
- 39, 0, out_vol_tlv),
+ 39, 1, out_vol_tlv),
/* OUTPUT Control */
SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
.capture = {
.stream_name = "Audio Trace CPU",
.channels_min = 1,
- .channels_max = 6,
+ .channels_max = 4,
.rates = WM5102_RATES,
.formats = WM5102_FORMATS,
},
{ "OUT2L", NULL, "SYSCLK" },
{ "OUT2R", NULL, "SYSCLK" },
{ "OUT3L", NULL, "SYSCLK" },
+ { "OUT3R", NULL, "SYSCLK" },
{ "OUT4L", NULL, "SYSCLK" },
{ "OUT4R", NULL, "SYSCLK" },
{ "OUT5L", NULL, "SYSCLK" },
.max_register = WM8940_MONOMIX,
.reg_defaults = wm8940_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
.readable_reg = wm8940_readable_register,
.volatile_reg = wm8940_volatile_register,
};
static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
- .tx_dma_offset = 0x200,
- .rx_dma_offset = 0x284,
+ /* The CFG port offset will be calculated if it is needed */
+ .tx_dma_offset = 0,
+ .rx_dma_offset = 0,
.version = MCASP_VERSION_4,
};
return PCM_EDMA;
}
+static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+ int i;
+ u32 offset = 0;
+
+ if (pdata->version != MCASP_VERSION_4)
+ return pdata->tx_dma_offset;
+
+ for (i = 0; i < pdata->num_serializer; i++) {
+ if (pdata->serial_dir[i] == TX_MODE) {
+ if (!offset) {
+ offset = DAVINCI_MCASP_TXBUF_REG(i);
+ } else {
+ pr_err("%s: Only one serializer allowed!\n",
+ __func__);
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
+static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+ int i;
+ u32 offset = 0;
+
+ if (pdata->version != MCASP_VERSION_4)
+ return pdata->rx_dma_offset;
+
+ for (i = 0; i < pdata->num_serializer; i++) {
+ if (pdata->serial_dir[i] == RX_MODE) {
+ if (!offset) {
+ offset = DAVINCI_MCASP_RXBUF_REG(i);
+ } else {
+ pr_err("%s: Only one serializer allowed!\n",
+ __func__);
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
static int davinci_mcasp_probe(struct platform_device *pdev)
{
struct snd_dmaengine_dai_dma_data *dma_data;
if (dat)
dma_data->addr = dat->start;
else
- dma_data->addr = mem->start + pdata->tx_dma_offset;
+ dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (dat)
dma_data->addr = dat->start;
else
- dma_data->addr = mem->start + pdata->rx_dma_offset;
+ dma_data->addr =
+ mem->start + davinci_mcasp_rxdma_offset(pdata);
dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
(n << 2))
/* Transmit Buffer for Serializer n */
-#define DAVINCI_MCASP_TXBUF_REG 0x200
+#define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2))
/* Receive Buffer for Serializer n */
-#define DAVINCI_MCASP_RXBUF_REG 0x280
+#define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2))
/* McASP FIFO Registers */
#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010)
ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
+ regmap_update_bits(regs, CCSR_SSI_STCCR,
+ CCSR_SSI_SxCCR_DC_MASK,
+ CCSR_SSI_SxCCR_DC(2));
+ regmap_update_bits(regs, CCSR_SSI_SRCCR,
+ CCSR_SSI_SxCCR_DC_MASK,
+ CCSR_SSI_SxCCR_DC(2));
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFS:
ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
- regmap_update_bits(regs, CCSR_SSI_STCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
- regmap_update_bits(regs, CCSR_SSI_SRCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
break;
case SND_SOC_DAIFMT_CBM_CFM:
ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
case SNDRV_PCM_TRIGGER_START:
if (stream->compr_ops->stream_start)
return stream->compr_ops->stream_start(sst->dev, stream->id);
+ break;
case SNDRV_PCM_TRIGGER_STOP:
if (stream->compr_ops->stream_drop)
return stream->compr_ops->stream_drop(sst->dev, stream->id);
+ break;
case SND_COMPR_TRIGGER_DRAIN:
if (stream->compr_ops->stream_drain)
return stream->compr_ops->stream_drain(sst->dev, stream->id);
+ break;
case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
if (stream->compr_ops->stream_partial_drain)
return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
+ break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (stream->compr_ops->stream_pause)
return stream->compr_ops->stream_pause(sst->dev, stream->id);
+ break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (stream->compr_ops->stream_pause_release)
return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
- default:
- return -EINVAL;
+ break;
}
+ return -EINVAL;
}
static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+ INIT_LIST_HEAD(&sst->module_list);
ret = skl_ipc_init(dev, skl);
if (ret)
return ret;
}
}
- rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr);
+ rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
rsnd_mod_write(adg_mod, BRRA, rbga);
rsnd_mod_write(adg_mod, BRRB, rbgb);
goto __error;
}
chip = usb_chip[i];
- dev_set_drvdata(&dev->dev, chip);
atomic_inc(&chip->active); /* avoid autopm */
break;
}
goto __error;
}
}
+ dev_set_drvdata(&dev->dev, chip);
/*
* For devices with more than one control interface, we assume the
/* check for STACK_FRAME_NON_STANDARD */
if (file->whitelist && file->whitelist->rela)
- list_for_each_entry(rela, &file->whitelist->rela->rela_list, list)
- if (rela->sym->sec == func->sec &&
+ list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+ if (rela->sym->type == STT_SECTION &&
+ rela->sym->sec == func->sec &&
rela->addend == func->offset)
return true;
+ if (rela->sym->type == STT_FUNC && rela->sym == func)
+ return true;
+ }
/* check if it has a context switching instruction */
func_for_each_insn(file, func, insn)
return (value_int & value_mask) | ~value_mask;
}
+static int string_set_value(struct bt_ctf_field *field, const char *string)
+{
+ char *buffer = NULL;
+ size_t len = strlen(string), i, p;
+ int err;
+
+ for (i = p = 0; i < len; i++, p++) {
+ if (isprint(string[i])) {
+ if (!buffer)
+ continue;
+ buffer[p] = string[i];
+ } else {
+ char numstr[5];
+
+ snprintf(numstr, sizeof(numstr), "\\x%02x",
+ (unsigned int)(string[i]) & 0xff);
+
+ if (!buffer) {
+ buffer = zalloc(i + (len - i) * 4 + 2);
+ if (!buffer) {
+ pr_err("failed to set unprintable string '%s'\n", string);
+ return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
+ }
+ if (i > 0)
+ strncpy(buffer, string, i);
+ }
+ strncat(buffer + p, numstr, 4);
+ p += 3;
+ }
+ }
+
+ if (!buffer)
+ return bt_ctf_field_string_set_value(field, string);
+ err = bt_ctf_field_string_set_value(field, buffer);
+ free(buffer);
+ return err;
+}
+
static int add_tracepoint_field_value(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
}
if (flags & FIELD_IS_STRING)
- ret = bt_ctf_field_string_set_value(field,
- data + offset + i * len);
+ ret = string_set_value(field, data + offset + i * len);
else {
unsigned long long value_int;
int err;
union perf_event *event;
+ if (symbol_conf.kptr_restrict)
+ return -1;
if (map == NULL)
return -1;
static bool symbol__read_kptr_restrict(void)
{
bool value = false;
+ FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
- if (geteuid() != 0) {
- FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
- if (fp != NULL) {
- char line[8];
+ if (fp != NULL) {
+ char line[8];
- if (fgets(line, sizeof(line), fp) != NULL)
- value = atoi(line) != 0;
+ if (fgets(line, sizeof(line), fp) != NULL)
+ value = (geteuid() != 0) ?
+ (atoi(line) != 0) :
+ (atoi(line) == 2);
- fclose(fp);
- }
+ fclose(fp);
}
return value;
NODE_TAGGED = 2,
};
-#define THRASH_SIZE 1000 * 1000
+#define THRASH_SIZE (1000 * 1000)
#define N 127
#define BATCH 33
exit_unsupported
fi
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
echo "hist trigger is not supported"
exit_unsupported
fi
+reset_tracer
+do_reset
+
echo "Test histogram with execname modifier"
echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
exit_unsupported
fi
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
echo "hist trigger is not supported"
exit_unsupported
fi
+reset_tracer
+do_reset
+
echo "Test histogram basic tigger"
echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
exit_unsupported
fi
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
echo "hist trigger is not supported"
exit_unsupported
fi
+reset_tracer
+do_reset
+
reset_trigger
echo "Test histogram multiple tiggers"
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insn_cnt = ARRAY_SIZE(prog);
- attr.insns = (uint64_t)prog;
- attr.license = (uint64_t)bpf_license;
- attr.log_buf = (uint64_t)bpf_log_buf;
+ attr.insns = (unsigned long) &prog;
+ attr.license = (unsigned long) &bpf_license;
+ attr.log_buf = (unsigned long) &bpf_log_buf;
attr.log_size = sizeof(bpf_log_buf);
attr.log_level = 1;
attr.kern_version = 0;
memset(&eprog, 0, sizeof(eprog));
eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
eprog.insn_cnt = ARRAY_SIZE(ecode);
- eprog.insns = (uint64_t)ecode;
- eprog.license = (uint64_t)bpf_license;
+ eprog.insns = (unsigned long) &ecode;
+ eprog.license = (unsigned long) &bpf_license;
eprog.kern_version = 0;
memset(&cprog, 0, sizeof(cprog));
printf("No of huge pages allocated = %d\n",
(atoi(nr_hugepages)));
- if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
+ if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
all:
-all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder
+all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring
CFLAGS += -Wall
CFLAGS += -pthread -O2 -ggdb
virtio_ring_0_9: virtio_ring_0_9.o main.o
virtio_ring_poll: virtio_ring_poll.o main.o
virtio_ring_inorder: virtio_ring_inorder.o main.o
+noring: noring.o main.o
clean:
-rm main.o
-rm ring.o ring
-rm virtio_ring_0_9.o virtio_ring_0_9
-rm virtio_ring_poll.o virtio_ring_poll
-rm virtio_ring_inorder.o virtio_ring_inorder
+ -rm noring.o noring
.PHONY: all clean
Partial implementation of various ring layouts, useful to tune virtio design.
Uses shared memory heavily.
+
+Typical use:
+
+# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
--- /dev/null
+#define _GNU_SOURCE
+#include "main.h"
+#include <assert.h>
+
+/* stub implementation: useful for measuring overhead */
+void alloc_ring(void)
+{
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+ return 0;
+}
+
+/*
+ * skb_array API provides no way for producer to find out whether a given
+ * buffer was consumed. Our tests merely require that a successful get_buf
+ * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
+ * fake it accordingly.
+ */
+void *get_buf(unsigned *lenp, void **bufp)
+{
+ return "Buffer";
+}
+
+void poll_used(void)
+{
+}
+
+void disable_call()
+{
+ assert(0);
+}
+
+bool enable_call()
+{
+ assert(0);
+}
+
+void kick_available(void)
+{
+ assert(0);
+}
+
+/* host side */
+void disable_kick()
+{
+ assert(0);
+}
+
+bool enable_kick()
+{
+ assert(0);
+}
+
+void poll_avail(void)
+{
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+ return true;
+}
+
+void call_used(void)
+{
+ assert(0);
+}
#use last CPU for host. Why not the first?
#many devices tend to use cpu0 by default so
#it tends to be busier
-HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
#run command on all cpus
-for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+for cpu in $(seq 0 $HOST_AFFINITY)
do
#Don't run guest and host on same CPU
#It actually works ok if using signalling
s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
if (total) {
- printf("\nSlab Deactivation Ocurrences %%\n");
+ printf("\nSlab Deactivation Occurrences %%\n");
printf("-------------------------------------------------\n");
printf("Slab full %7lu %3lu%%\n",
s->deactivate_full, (s->deactivate_full * 100) / total);
s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
}
- if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail)
+ if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
printf("\nCmpxchg_double Looping\n------------------------\n");
printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n",
s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
+ }
}
static void report(struct slabinfo *s)
if (copy_from_user(&routing, argp, sizeof(routing)))
goto out;
r = -EINVAL;
- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+ if (routing.nr > KVM_MAX_IRQ_ROUTES)
goto out;
if (routing.flags)
goto out;