- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
+ - 'dma_fence_chain_for_each'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
+ - 'drm_client_for_each_connector_iter'
+ - 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
+ - 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_available_child_of_node'
- 'for_each_cpu_not'
- 'for_each_cpu_wrap'
- 'for_each_dev_addr'
+ - 'for_each_dev_scope'
+ - 'for_each_displayid_db'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
+ - 'for_each_free_mem_pfn_range_in_zone'
+ - 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_link_codecs'
+ - 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'ide_port_for_each_present_dev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
+ - 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
+ - 'in_dev_for_each_ifa_rcu'
+ - 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- 'key_for_each'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- - 'mp_bvec_for_each_page'
- - 'mp_bvec_for_each_segment'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rbtree_postorder_for_each_entry_safe'
+ - 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- - 'rht_for_each_from'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
+ - 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
+Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
+Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
:numbered:
pci
- picebus-howto
+ pciebus-howto
pci-iov-howto
msi-howto
acpi-info
--- /dev/null
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===========================================
+The PCI Express Port Bus Driver Guide HOWTO
+===========================================
+
+:Author: Tom L Nguyen tom.l.nguyen@intel.com 11/03/2004
+:Copyright: |copy| 2004 Intel Corporation
+
+About this guide
+================
+
+This guide describes the basics of the PCI Express Port Bus driver
+and provides information on how to enable the service drivers to
+register/unregister with the PCI Express Port Bus Driver.
+
+
+What is the PCI Express Port Bus Driver
+=======================================
+
+A PCI Express Port is a logical PCI-PCI Bridge structure. There
+are two types of PCI Express Port: the Root Port and the Switch
+Port. The Root Port originates a PCI Express link from a PCI Express
+Root Complex and the Switch Port connects PCI Express links to
+internal logical PCI buses. The Switch Port, which has its secondary
+bus representing the switch's internal routing logic, is called the
+switch's Upstream Port. The switch's Downstream Port is bridging from
+switch's internal routing bus to a bus representing the downstream
+PCI Express link from the PCI Express Switch.
+
+A PCI Express Port can provide up to four distinct functions,
+referred to in this document as services, depending on its port type.
+PCI Express Port's services include native hotplug support (HP),
+power management event support (PME), advanced error reporting
+support (AER), and virtual channel support (VC). These services may
+be handled by a single complex driver or be individually distributed
+and handled by corresponding service drivers.
+
+Why use the PCI Express Port Bus Driver?
+========================================
+
+In existing Linux kernels, the Linux Device Driver Model allows a
+physical device to be handled by only a single driver. The PCI
+Express Port is a PCI-PCI Bridge device with multiple distinct
+services. To maintain a clean and simple solution each service
+may have its own software service driver. In this case several
+service drivers will compete for a single PCI-PCI Bridge device.
+For example, if the PCI Express Root Port native hotplug service
+driver is loaded first, it claims a PCI-PCI Bridge Root Port. The
+kernel therefore does not load other service drivers for that Root
+Port. In other words, it is impossible to have multiple service
+drivers load and run on a PCI-PCI Bridge device simultaneously
+using the current driver model.
+
+To enable multiple service drivers running simultaneously requires
+having a PCI Express Port Bus driver, which manages all populated
+PCI Express Ports and distributes all provided service requests
+to the corresponding service drivers as required. Some key
+advantages of using the PCI Express Port Bus driver are listed below:
+
+ - Allow multiple service drivers to run simultaneously on
+ a PCI-PCI Bridge Port device.
+
+ - Allow service drivers implemented in an independent
+ staged approach.
+
+ - Allow one service driver to run on multiple PCI-PCI Bridge
+ Port devices.
+
+ - Manage and distribute resources of a PCI-PCI Bridge Port
+ device to requested service drivers.
+
+Configuring the PCI Express Port Bus Driver vs. Service Drivers
+===============================================================
+
+Including the PCI Express Port Bus Driver Support into the Kernel
+-----------------------------------------------------------------
+
+Including the PCI Express Port Bus driver depends on whether the PCI
+Express support is included in the kernel config. The kernel will
+automatically include the PCI Express Port Bus driver as a kernel
+driver when the PCI Express support is enabled in the kernel.
+
+Enabling Service Driver Support
+-------------------------------
+
+PCI device drivers are implemented based on Linux Device Driver Model.
+All service drivers are PCI device drivers. As discussed above, it is
+impossible to load any service driver once the kernel has loaded the
+PCI Express Port Bus Driver. To meet the PCI Express Port Bus Driver
+Model requires some minimal changes on existing service drivers that
+imposes no impact on the functionality of existing service drivers.
+
+A service driver is required to use the two APIs shown below to
+register its service with the PCI Express Port Bus driver (see
+section 5.2.1 & 5.2.2). It is important that a service driver
+initializes the pcie_port_service_driver data structure, included in
+header file /include/linux/pcieport_if.h, before calling these APIs.
+Failure to do so will result an identity mismatch, which prevents
+the PCI Express Port Bus driver from loading a service driver.
+
+pcie_port_service_register
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+::
+
+ int pcie_port_service_register(struct pcie_port_service_driver *new)
+
+This API replaces the Linux Driver Model's pci_register_driver API. A
+service driver should always calls pcie_port_service_register at
+module init. Note that after service driver being loaded, calls
+such as pci_enable_device(dev) and pci_set_master(dev) are no longer
+necessary since these calls are executed by the PCI Port Bus driver.
+
+pcie_port_service_unregister
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+::
+
+ void pcie_port_service_unregister(struct pcie_port_service_driver *new)
+
+pcie_port_service_unregister replaces the Linux Driver Model's
+pci_unregister_driver. It's always called by service driver when a
+module exits.
+
+Sample Code
+~~~~~~~~~~~
+
+Below is sample service driver code to initialize the port service
+driver data structure.
+::
+
+ static struct pcie_port_service_id service_id[] = { {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .port_type = PCIE_RC_PORT,
+ .service_type = PCIE_PORT_SERVICE_AER,
+ }, { /* end: all zeroes */ }
+ };
+
+ static struct pcie_port_service_driver root_aerdrv = {
+ .name = (char *)device_name,
+ .id_table = &service_id[0],
+
+ .probe = aerdrv_load,
+ .remove = aerdrv_unload,
+
+ .suspend = aerdrv_suspend,
+ .resume = aerdrv_resume,
+ };
+
+Below is a sample code for registering/unregistering a service
+driver.
+::
+
+ static int __init aerdrv_service_init(void)
+ {
+ int retval = 0;
+
+ retval = pcie_port_service_register(&root_aerdrv);
+ if (!retval) {
+ /*
+ * FIX ME
+ */
+ }
+ return retval;
+ }
+
+ static void __exit aerdrv_service_exit(void)
+ {
+ pcie_port_service_unregister(&root_aerdrv);
+ }
+
+ module_init(aerdrv_service_init);
+ module_exit(aerdrv_service_exit);
+
+Possible Resource Conflicts
+===========================
+
+Since all service drivers of a PCI-PCI Bridge Port device are
+allowed to run simultaneously, below lists a few of possible resource
+conflicts with proposed solutions.
+
+MSI and MSI-X Vector Resource
+-----------------------------
+
+Once MSI or MSI-X interrupts are enabled on a device, it stays in this
+mode until they are disabled again. Since service drivers of the same
+PCI-PCI Bridge port share the same physical device, if an individual
+service driver enables or disables MSI/MSI-X mode it may result
+unpredictable behavior.
+
+To avoid this situation all service drivers are not permitted to
+switch interrupt mode on its device. The PCI Express Port Bus driver
+is responsible for determining the interrupt mode and this should be
+transparent to service drivers. Service drivers need to know only
+the vector IRQ assigned to the field irq of struct pcie_device, which
+is passed in when the PCI Express Port Bus driver probes each service
+driver. Service drivers should use (struct pcie_device*)dev->irq to
+call request_irq/free_irq. In addition, the interrupt mode is stored
+in the field interrupt_mode of struct pcie_device.
+
+PCI Memory/IO Mapped Regions
+----------------------------
+
+Service drivers for PCI Express Power Management (PME), Advanced
+Error Reporting (AER), Hot-Plug (HP) and Virtual Channel (VC) access
+PCI configuration space on the PCI Express port. In all cases the
+registers accessed are independent of each other. This patch assumes
+that all service drivers will be well behaved and not overwrite
+other service driver's configuration settings.
+
+PCI Config Registers
+--------------------
+
+Each service driver runs its PCI config operations on its own
+capability structure except the PCI Express capability structure, in
+which Root Control register and Device Control register are shared
+between PME and AER. This patch assumes that all service drivers
+will be well behaved and not overwrite other service driver's
+configuration settings.
+++ /dev/null
-.. SPDX-License-Identifier: GPL-2.0
-.. include:: <isonum.txt>
-
-===========================================
-The PCI Express Port Bus Driver Guide HOWTO
-===========================================
-
-:Author: Tom L Nguyen tom.l.nguyen@intel.com 11/03/2004
-:Copyright: |copy| 2004 Intel Corporation
-
-About this guide
-================
-
-This guide describes the basics of the PCI Express Port Bus driver
-and provides information on how to enable the service drivers to
-register/unregister with the PCI Express Port Bus Driver.
-
-
-What is the PCI Express Port Bus Driver
-=======================================
-
-A PCI Express Port is a logical PCI-PCI Bridge structure. There
-are two types of PCI Express Port: the Root Port and the Switch
-Port. The Root Port originates a PCI Express link from a PCI Express
-Root Complex and the Switch Port connects PCI Express links to
-internal logical PCI buses. The Switch Port, which has its secondary
-bus representing the switch's internal routing logic, is called the
-switch's Upstream Port. The switch's Downstream Port is bridging from
-switch's internal routing bus to a bus representing the downstream
-PCI Express link from the PCI Express Switch.
-
-A PCI Express Port can provide up to four distinct functions,
-referred to in this document as services, depending on its port type.
-PCI Express Port's services include native hotplug support (HP),
-power management event support (PME), advanced error reporting
-support (AER), and virtual channel support (VC). These services may
-be handled by a single complex driver or be individually distributed
-and handled by corresponding service drivers.
-
-Why use the PCI Express Port Bus Driver?
-========================================
-
-In existing Linux kernels, the Linux Device Driver Model allows a
-physical device to be handled by only a single driver. The PCI
-Express Port is a PCI-PCI Bridge device with multiple distinct
-services. To maintain a clean and simple solution each service
-may have its own software service driver. In this case several
-service drivers will compete for a single PCI-PCI Bridge device.
-For example, if the PCI Express Root Port native hotplug service
-driver is loaded first, it claims a PCI-PCI Bridge Root Port. The
-kernel therefore does not load other service drivers for that Root
-Port. In other words, it is impossible to have multiple service
-drivers load and run on a PCI-PCI Bridge device simultaneously
-using the current driver model.
-
-To enable multiple service drivers running simultaneously requires
-having a PCI Express Port Bus driver, which manages all populated
-PCI Express Ports and distributes all provided service requests
-to the corresponding service drivers as required. Some key
-advantages of using the PCI Express Port Bus driver are listed below:
-
- - Allow multiple service drivers to run simultaneously on
- a PCI-PCI Bridge Port device.
-
- - Allow service drivers implemented in an independent
- staged approach.
-
- - Allow one service driver to run on multiple PCI-PCI Bridge
- Port devices.
-
- - Manage and distribute resources of a PCI-PCI Bridge Port
- device to requested service drivers.
-
-Configuring the PCI Express Port Bus Driver vs. Service Drivers
-===============================================================
-
-Including the PCI Express Port Bus Driver Support into the Kernel
------------------------------------------------------------------
-
-Including the PCI Express Port Bus driver depends on whether the PCI
-Express support is included in the kernel config. The kernel will
-automatically include the PCI Express Port Bus driver as a kernel
-driver when the PCI Express support is enabled in the kernel.
-
-Enabling Service Driver Support
--------------------------------
-
-PCI device drivers are implemented based on Linux Device Driver Model.
-All service drivers are PCI device drivers. As discussed above, it is
-impossible to load any service driver once the kernel has loaded the
-PCI Express Port Bus Driver. To meet the PCI Express Port Bus Driver
-Model requires some minimal changes on existing service drivers that
-imposes no impact on the functionality of existing service drivers.
-
-A service driver is required to use the two APIs shown below to
-register its service with the PCI Express Port Bus driver (see
-section 5.2.1 & 5.2.2). It is important that a service driver
-initializes the pcie_port_service_driver data structure, included in
-header file /include/linux/pcieport_if.h, before calling these APIs.
-Failure to do so will result an identity mismatch, which prevents
-the PCI Express Port Bus driver from loading a service driver.
-
-pcie_port_service_register
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-::
-
- int pcie_port_service_register(struct pcie_port_service_driver *new)
-
-This API replaces the Linux Driver Model's pci_register_driver API. A
-service driver should always calls pcie_port_service_register at
-module init. Note that after service driver being loaded, calls
-such as pci_enable_device(dev) and pci_set_master(dev) are no longer
-necessary since these calls are executed by the PCI Port Bus driver.
-
-pcie_port_service_unregister
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-::
-
- void pcie_port_service_unregister(struct pcie_port_service_driver *new)
-
-pcie_port_service_unregister replaces the Linux Driver Model's
-pci_unregister_driver. It's always called by service driver when a
-module exits.
-
-Sample Code
-~~~~~~~~~~~
-
-Below is sample service driver code to initialize the port service
-driver data structure.
-::
-
- static struct pcie_port_service_id service_id[] = { {
- .vendor = PCI_ANY_ID,
- .device = PCI_ANY_ID,
- .port_type = PCIE_RC_PORT,
- .service_type = PCIE_PORT_SERVICE_AER,
- }, { /* end: all zeroes */ }
- };
-
- static struct pcie_port_service_driver root_aerdrv = {
- .name = (char *)device_name,
- .id_table = &service_id[0],
-
- .probe = aerdrv_load,
- .remove = aerdrv_unload,
-
- .suspend = aerdrv_suspend,
- .resume = aerdrv_resume,
- };
-
-Below is a sample code for registering/unregistering a service
-driver.
-::
-
- static int __init aerdrv_service_init(void)
- {
- int retval = 0;
-
- retval = pcie_port_service_register(&root_aerdrv);
- if (!retval) {
- /*
- * FIX ME
- */
- }
- return retval;
- }
-
- static void __exit aerdrv_service_exit(void)
- {
- pcie_port_service_unregister(&root_aerdrv);
- }
-
- module_init(aerdrv_service_init);
- module_exit(aerdrv_service_exit);
-
-Possible Resource Conflicts
-===========================
-
-Since all service drivers of a PCI-PCI Bridge Port device are
-allowed to run simultaneously, below lists a few of possible resource
-conflicts with proposed solutions.
-
-MSI and MSI-X Vector Resource
------------------------------
-
-Once MSI or MSI-X interrupts are enabled on a device, it stays in this
-mode until they are disabled again. Since service drivers of the same
-PCI-PCI Bridge port share the same physical device, if an individual
-service driver enables or disables MSI/MSI-X mode it may result
-unpredictable behavior.
-
-To avoid this situation all service drivers are not permitted to
-switch interrupt mode on its device. The PCI Express Port Bus driver
-is responsible for determining the interrupt mode and this should be
-transparent to service drivers. Service drivers need to know only
-the vector IRQ assigned to the field irq of struct pcie_device, which
-is passed in when the PCI Express Port Bus driver probes each service
-driver. Service drivers should use (struct pcie_device*)dev->irq to
-call request_irq/free_irq. In addition, the interrupt mode is stored
-in the field interrupt_mode of struct pcie_device.
-
-PCI Memory/IO Mapped Regions
-----------------------------
-
-Service drivers for PCI Express Power Management (PME), Advanced
-Error Reporting (AER), Hot-Plug (HP) and Virtual Channel (VC) access
-PCI configuration space on the PCI Express port. In all cases the
-registers accessed are independent of each other. This patch assumes
-that all service drivers will be well behaved and not overwrite
-other service driver's configuration settings.
-
-PCI Config Registers
---------------------
-
-Each service driver runs its PCI config operations on its own
-capability structure except the PCI Express capability structure, in
-which Root Control register and Device Control register are shared
-between PME and AER. This patch assumes that all service drivers
-will be well behaved and not overwrite other service driver's
-configuration settings.
Run specified binary instead of /init from the ramdisk,
used for early userspace startup. See initrd.
+ rdrand= [X86]
+ force - Override the decision by the kernel to hide the
+ advertisement of RDRAND support (this affects
+ certain AMD processors because of buggy BIOS
+ support, specifically around the suspend/resume
+ path).
+
rdt= [HW,X86,RDT]
Turn on/off individual RDT features. List is:
cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
802 E802 protocol ax25 AX25
ethernet Ethernet protocol rose X.25 PLP layer
ipv4 IP version 4 x25 X.25 protocol
- ipx IPX token-ring IBM token ring
bridge Bridging decnet DEC net
ipv6 IP version 6 tipc TIPC
========= =================== = ========== ==================
(network) that the route leads to, the router (may be directly connected), the
route flags, and the device the route is using.
-
-5. IPX
-------
-
-The IPX protocol has no tunable values in proc/sys/net.
-
-The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
-socket giving the local and remote addresses in Novell format (that is
-network:node:port). In accordance with the strange Novell tradition,
-everything but the port is in hex. Not_Connected is displayed for sockets that
-are not tied to a specific remote address. The Tx and Rx queue sizes indicate
-the number of bytes pending for transmission and reception. The state
-indicates the state the socket is in and the uid is the owning uid of the
-socket.
-
-The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
-it gives the network number, the node number, and indicates if the network is
-the primary network. It also indicates which device it is bound to (or
-Internal for internal networks) and the Frame Type if appropriate. Linux
-supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
-IPX.
-
-The /proc/net/ipx_route table holds a list of IPX routes. For each route it
-gives the destination network, the router node (or Directly) and the network
-address of the router (or Connected) for internal networks.
-
-6. TIPC
+5. TIPC
-------
tipc_rmem
DT_DOCS = $(shell \
cd $(srctree)/$(src) && \
- find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
+ find * \( -name '*.yaml' ! \
+ -name $(DT_TMP_SCHEMA) ! \
+ -name '*.example.dt.yaml' \) \
)
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
+++ /dev/null
-ADS1015 (I2C)
-
-This device is a 12-bit A-D converter with 4 inputs.
-
-The inputs can be used single ended or in certain differential combinations.
-
-For configuration all possible combinations are mapped to 8 channels:
- 0: Voltage over AIN0 and AIN1.
- 1: Voltage over AIN0 and AIN3.
- 2: Voltage over AIN1 and AIN3.
- 3: Voltage over AIN2 and AIN3.
- 4: Voltage over AIN0 and GND.
- 5: Voltage over AIN1 and GND.
- 6: Voltage over AIN2 and GND.
- 7: Voltage over AIN3 and GND.
-
-Each channel can be configured individually:
- - pga is the programmable gain amplifier (values are full scale)
- 0: +/- 6.144 V
- 1: +/- 4.096 V
- 2: +/- 2.048 V (default)
- 3: +/- 1.024 V
- 4: +/- 0.512 V
- 5: +/- 0.256 V
- - data_rate in samples per second
- 0: 128
- 1: 250
- 2: 490
- 3: 920
- 4: 1600 (default)
- 5: 2400
- 6: 3300
-
-1) The /ads1015 node
-
- Required properties:
-
- - compatible : must be "ti,ads1015"
- - reg : I2C bus address of the device
- - #address-cells : must be <1>
- - #size-cells : must be <0>
-
- The node contains child nodes for each channel that the platform uses.
-
- Example ADS1015 node:
-
- ads1015@49 {
- compatible = "ti,ads1015";
- reg = <0x49>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- [ child node definitions... ]
- }
-
-2) channel nodes
-
- Required properties:
-
- - reg : the channel number
-
- Optional properties:
-
- - ti,gain : the programmable gain amplifier setting
- - ti,datarate : the converter data rate
-
- Example ADS1015 channel node:
-
- channel@4 {
- reg = <4>;
- ti,gain = <3>;
- ti,datarate = <5>;
- };
--- /dev/null
+Bindings for Synaptics AS370 PVT sensors
+
+Required properties:
+- compatible : "syna,as370-hwmon"
+- reg : address and length of the register set.
+
+Example:
+ hwmon@ea0810 {
+ compatible = "syna,as370-hwmon";
+ reg = <0xea0810 0xc>;
+ };
-Device-tree bindings for IBM Common Form Factor Power Supply Version 1
-----------------------------------------------------------------------
+Device-tree bindings for IBM Common Form Factor Power Supply Versions 1 and 2
+-----------------------------------------------------------------------------
Required properties:
- - compatible = "ibm,cffps1";
+ - compatible : Must be one of the following:
+ "ibm,cffps1"
+ "ibm,cffps2"
- reg = < I2C bus address >; : Address of the power supply on the
I2C bus.
"maxim,max31725",
"maxim,max31726",
"maxim,mcp980x",
+ "nxp,pct2075",
"st,stds75",
"st,stlm75",
"microchip,tcn75",
--- /dev/null
+ADS1015 (I2C)
+
+This device is a 12-bit A-D converter with 4 inputs.
+
+The inputs can be used single ended or in certain differential combinations.
+
+For configuration all possible combinations are mapped to 8 channels:
+ 0: Voltage over AIN0 and AIN1.
+ 1: Voltage over AIN0 and AIN3.
+ 2: Voltage over AIN1 and AIN3.
+ 3: Voltage over AIN2 and AIN3.
+ 4: Voltage over AIN0 and GND.
+ 5: Voltage over AIN1 and GND.
+ 6: Voltage over AIN2 and GND.
+ 7: Voltage over AIN3 and GND.
+
+Each channel can be configured individually:
+ - pga is the programmable gain amplifier (values are full scale)
+ 0: +/- 6.144 V
+ 1: +/- 4.096 V
+ 2: +/- 2.048 V (default)
+ 3: +/- 1.024 V
+ 4: +/- 0.512 V
+ 5: +/- 0.256 V
+ - data_rate in samples per second
+ 0: 128
+ 1: 250
+ 2: 490
+ 3: 920
+ 4: 1600 (default)
+ 5: 2400
+ 6: 3300
+
+1) The /ads1015 node
+
+ Required properties:
+
+ - compatible : must be "ti,ads1015"
+ - reg : I2C bus address of the device
+ - #address-cells : must be <1>
+ - #size-cells : must be <0>
+
+ The node contains child nodes for each channel that the platform uses.
+
+ Example ADS1015 node:
+
+ ads1015@49 {
+ compatible = "ti,ads1015";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ [ child node definitions... ]
+ }
+
+2) channel nodes
+
+ Required properties:
+
+ - reg : the channel number
+
+ Optional properties:
+
+ - ti,gain : the programmable gain amplifier setting
+ - ti,datarate : the converter data rate
+
+ Example ADS1015 channel node:
+
+ channel@4 {
+ reg = <4>;
+ ti,gain = <3>;
+ ti,datarate = <5>;
+ };
* ARC-HS Interrupt Distribution Unit
- This optional 2nd level interrupt controller can be used in SMP configurations for
- dynamic IRQ routing, load balancing of common/external IRQs towards core intc.
+ This optional 2nd level interrupt controller can be used in SMP configurations
+ for dynamic IRQ routing, load balancing of common/external IRQs towards core
+ intc.
Properties:
- compatible: "snps,archs-idu-intc"
- interrupt-controller: This is an interrupt controller.
-- #interrupt-cells: Must be <1>.
-
- Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N
- of the particular interrupt line of IDU corresponds to the line N+24 of the
- core interrupt controller.
-
- intc accessed via the special ARC AUX register interface, hence "reg" property
- is not specified.
+- #interrupt-cells: Must be <1> or <2>.
+
+ Value of the first cell specifies the "common" IRQ from peripheral to IDU.
+ Number N of the particular interrupt line of IDU corresponds to the line N+24
+ of the core interrupt controller.
+
+ The (optional) second cell specifies any of the following flags:
+ - bits[3:0] trigger type and level flags
+ 1 = low-to-high edge triggered
+ 2 = NOT SUPPORTED (high-to-low edge triggered)
+ 4 = active high level-sensitive <<< DEFAULT
+ 8 = NOT SUPPORTED (active low level-sensitive)
+ When no second cell is specified, the interrupt is assumed to be level
+ sensitive.
+
+ The interrupt controller is accessed via the special ARC AUX register
+ interface, hence "reg" property is not specified.
Example:
core_intc: core-interrupt-controller {
- "microchip,ksz8565"
- "microchip,ksz9893"
- "microchip,ksz9563"
+ - "microchip,ksz8563"
Optional properties:
- phy-mode : See ethernet.txt file in the same directory
Optional properties:
-- phy-reset-gpios : Should specify the gpio for phy reset
-- phy-reset-duration : Reset duration in milliseconds. Should present
- only if property "phy-reset-gpios" is available. Missing the property
- will have the duration be 1 millisecond. Numbers greater than 1000 are
- invalid and 1 millisecond will be used instead.
-- phy-reset-active-high : If present then the reset sequence using the GPIO
- specified in the "phy-reset-gpios" property is reversed (H=reset state,
- L=operation state).
-- phy-reset-post-delay : Post reset delay in milliseconds. If present then
- a delay of phy-reset-post-delay milliseconds will be observed after the
- phy-reset-gpios has been toggled. Can be omitted thus no delay is
- observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
- phy-supply : regulator that powers the Ethernet PHY.
- phy-handle : phandle to the PHY device connected to this device.
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
per second interrupt associated with 1588 precision time protocol(PTP).
-
Optional subnodes:
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
according to phy.txt in the same directory
+Deprecated optional properties:
+ To avoid these, create a phy node according to phy.txt in the same
+ directory, and point the fec's "phy-handle" property to it. Then use
+ the phy's reset binding, again described by phy.txt.
+- phy-reset-gpios : Should specify the gpio for phy reset
+- phy-reset-duration : Reset duration in milliseconds. Should present
+ only if property "phy-reset-gpios" is available. Missing the property
+ will have the duration be 1 millisecond. Numbers greater than 1000 are
+ invalid and 1 millisecond will be used instead.
+- phy-reset-active-high : If present then the reset sequence using the GPIO
+ specified in the "phy-reset-gpios" property is reversed (H=reset state,
+ L=operation state).
+- phy-reset-post-delay : Post reset delay in milliseconds. If present then
+ a delay of phy-reset-post-delay milliseconds will be observed after the
+ phy-reset-gpios has been toggled. Can be omitted thus no delay is
+ observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
+
Example:
ethernet@83fec000 {
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
- Use "sifive,fu540-macb" for SiFive FU540-C000 SoC.
+ Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
Or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
- For "sifive,fu540-macb", second range is required to specify the
+ For "sifive,fu540-c000-gem", second range is required to specify the
address and length of the registers for GEMGXL Management block.
- interrupts: Should contain macb interrupt
- phy-mode: See ethernet.txt file in the same directory.
hwlocks: true
st,syscfg:
- $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: Should be phandle/offset/mask
items:
- description: Phandle to the syscon node which includes IRQ mux selection.
- infineon,slb9645tt
# Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
- infineon,tlv493d-a1b6
+ # Inspur Power System power supply unit version 1
+ - inspur,ipsps1
# Intersil ISL29028 Ambient Light and Proximity Sensor
- isil,isl29028
# Intersil ISL29030 Ambient Light and Proximity Sensor
description: Micro Crystal AG
"^micron,.*":
description: Micron Technology Inc.
+ "^microsoft,.*":
+ description: Microsoft Corporation
"^mikroe,.*":
description: MikroElektronika d.o.o.
"^miniand,.*":
+++ /dev/null
-Kernel driver ads1015
-=====================
-
-Supported chips:
-
- * Texas Instruments ADS1015
-
- Prefix: 'ads1015'
-
- Datasheet: Publicly available at the Texas Instruments website:
-
- http://focus.ti.com/lit/ds/symlink/ads1015.pdf
-
- * Texas Instruments ADS1115
-
- Prefix: 'ads1115'
-
- Datasheet: Publicly available at the Texas Instruments website:
-
- http://focus.ti.com/lit/ds/symlink/ads1115.pdf
-
-Authors:
- Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
-
-Description
------------
-
-This driver implements support for the Texas Instruments ADS1015/ADS1115.
-
-This device is a 12/16-bit A-D converter with 4 inputs.
-
-The inputs can be used single ended or in certain differential combinations.
-
-The inputs can be made available by 8 sysfs input files in0_input - in7_input:
-
- - in0: Voltage over AIN0 and AIN1.
- - in1: Voltage over AIN0 and AIN3.
- - in2: Voltage over AIN1 and AIN3.
- - in3: Voltage over AIN2 and AIN3.
- - in4: Voltage over AIN0 and GND.
- - in5: Voltage over AIN1 and GND.
- - in6: Voltage over AIN2 and GND.
- - in7: Voltage over AIN3 and GND.
-
-Which inputs are available can be configured using platform data or devicetree.
-
-By default all inputs are exported.
-
-Platform Data
--------------
-
-In linux/platform_data/ads1015.h platform data is defined, channel_data contains
-configuration data for the used input combinations:
-
-- pga is the programmable gain amplifier (values are full scale)
-
- - 0: +/- 6.144 V
- - 1: +/- 4.096 V
- - 2: +/- 2.048 V
- - 3: +/- 1.024 V
- - 4: +/- 0.512 V
- - 5: +/- 0.256 V
-
-- data_rate in samples per second
-
- - 0: 128
- - 1: 250
- - 2: 490
- - 3: 920
- - 4: 1600
- - 5: 2400
- - 6: 3300
-
-Example::
-
- struct ads1015_platform_data data = {
- .channel_data = {
- [2] = { .enabled = true, .pga = 1, .data_rate = 0 },
- [4] = { .enabled = true, .pga = 4, .data_rate = 5 },
- }
- };
-
-In this case only in2_input (FS +/- 4.096 V, 128 SPS) and in4_input
-(FS +/- 0.512 V, 2400 SPS) would be created.
-
-Devicetree
-----------
-
-Configuration is also possible via devicetree:
-Documentation/devicetree/bindings/hwmon/ads1015.txt
adm1031
adm1275
adm9240
- ads1015
ads7828
adt7410
adt7411
pcf8591
pmbus
powr1220
+ pxe1610
pwm-fan
raspberrypi-hwmon
sch5627
--- /dev/null
+Kernel driver inspur-ipsps1
+=======================
+
+Supported chips:
+
+ * Inspur Power System power supply unit
+
+Author: John Wang <wangzqbj@inspur.com>
+
+Description
+-----------
+
+This driver supports Inspur Power System power supplies. This driver
+is a client to the core PMBus driver.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+Sysfs entries
+-------------
+
+The following attributes are supported:
+
+======================= ======================================================
+curr1_input Measured input current
+curr1_label "iin"
+curr1_max Maximum current
+curr1_max_alarm Current high alarm
+curr2_input Measured output current in mA.
+curr2_label "iout1"
+curr2_crit Critical maximum current
+curr2_crit_alarm Current critical high alarm
+curr2_max Maximum current
+curr2_max_alarm Current high alarm
+
+fan1_alarm Fan 1 warning.
+fan1_fault Fan 1 fault.
+fan1_input Fan 1 speed in RPM.
+
+in1_alarm Input voltage under-voltage alarm.
+in1_input Measured input voltage in mV.
+in1_label "vin"
+in2_input Measured output voltage in mV.
+in2_label "vout1"
+in2_lcrit Critical minimum output voltage
+in2_lcrit_alarm Output voltage critical low alarm
+in2_max Maximum output voltage
+in2_max_alarm Output voltage high alarm
+in2_min Minimum output voltage
+in2_min_alarm Output voltage low alarm
+
+power1_alarm Input fault or alarm.
+power1_input Measured input power in uW.
+power1_label "pin"
+power1_max Input power limit
+power2_max_alarm Output power high alarm
+power2_max Output power limit
+power2_input Measured output power in uW.
+power2_label "pout"
+
+temp[1-3]_input Measured temperature
+temp[1-2]_max Maximum temperature
+temp[1-3]_max_alarm Temperature high alarm
+
+vendor Manufacturer name
+model Product model
+part_number Product part number
+serial_number Product serial number
+fw_version Firmware version
+hw_version Hardware version
+mode Work mode. Can be set to active or
+ standby, when set to standby, PSU will
+ automatically switch between standby
+ and redundancy mode.
+======================= ======================================================
http://www.ti.com/product/tmp275
- * NXP LM75B
+ * NXP LM75B, PCT2075
- Prefix: 'lm75b'
+ Prefix: 'lm75b', 'pct2075'
Addresses scanned: none
http://www.nxp.com/documents/data_sheet/LM75B.pdf
+ http://www.nxp.com/docs/en/data-sheet/PCT2075.pdf
+
Author: Frodo Looijaard <frodol@dds.nl>
Description
+++ /dev/null
-Kernel driver pxe1610
-=====================
-
-Supported chips:
- * Infineon PXE1610
- Prefix: 'pxe1610'
- Addresses scanned: -
- Datasheet: Datasheet is not publicly available.
-
- * Infineon PXE1110
- Prefix: 'pxe1110'
- Addresses scanned: -
- Datasheet: Datasheet is not publicly available.
-
- * Infineon PXM1310
- Prefix: 'pxm1310'
- Addresses scanned: -
- Datasheet: Datasheet is not publicly available.
-
-Author: Vijay Khemka <vijaykhemka@fb.com>
-
-
-Description
------------
-
-PXE1610/PXE1110 are Multi-rail/Multiphase Digital Controllers
-and compliant to
- -- Intel VR13 DC-DC converter specifications.
- -- Intel SVID protocol.
-Used for Vcore power regulation for Intel VR13 based microprocessors
- -- Servers, Workstations, and High-end desktops
-
-PXM1310 is a Multi-rail Controller and it is compliant to
- -- Intel VR13 DC-DC converter specifications.
- -- Intel SVID protocol.
-Used for DDR3/DDR4 Memory power regulation for Intel VR13 and
-IMVP8 based systems
-
-
-Usage Notes
------------
-
-This driver does not probe for PMBus devices. You will have
-to instantiate devices explicitly.
-
-Example: the following commands will load the driver for an PXE1610
-at address 0x70 on I2C bus #4:
-
-# modprobe pxe1610
-# echo pxe1610 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
-
-It can also be instantiated by declaring in device tree
-
-
-Sysfs attributes
-----------------
-
-curr1_label "iin"
-curr1_input Measured input current
-curr1_alarm Current high alarm
-
-curr[2-4]_label "iout[1-3]"
-curr[2-4]_input Measured output current
-curr[2-4]_crit Critical maximum current
-curr[2-4]_crit_alarm Current critical high alarm
-
-in1_label "vin"
-in1_input Measured input voltage
-in1_crit Critical maximum input voltage
-in1_crit_alarm Input voltage critical high alarm
-
-in[2-4]_label "vout[1-3]"
-in[2-4]_input Measured output voltage
-in[2-4]_lcrit Critical minimum output voltage
-in[2-4]_lcrit_alarm Output voltage critical low alarm
-in[2-4]_crit Critical maximum output voltage
-in[2-4]_crit_alarm Output voltage critical high alarm
-
-power1_label "pin"
-power1_input Measured input power
-power1_alarm Input power high alarm
-
-power[2-4]_label "pout[1-3]"
-power[2-4]_input Measured output power
-
-temp[1-3]_input Measured temperature
-temp[1-3]_crit Critical high temperature
-temp[1-3]_crit_alarm Chip temperature critical high alarm
-temp[1-3]_max Maximum temperature
-temp[1-3]_max_alarm Chip temperature high alarm
--- /dev/null
+Kernel driver pxe1610
+=====================
+
+Supported chips:
+
+ * Infineon PXE1610
+
+ Prefix: 'pxe1610'
+
+ Addresses scanned: -
+
+ Datasheet: Datasheet is not publicly available.
+
+ * Infineon PXE1110
+
+ Prefix: 'pxe1110'
+
+ Addresses scanned: -
+
+ Datasheet: Datasheet is not publicly available.
+
+ * Infineon PXM1310
+
+ Prefix: 'pxm1310'
+
+ Addresses scanned: -
+
+ Datasheet: Datasheet is not publicly available.
+
+Author: Vijay Khemka <vijaykhemka@fb.com>
+
+
+Description
+-----------
+
+PXE1610/PXE1110 are Multi-rail/Multiphase Digital Controllers
+and compliant to
+
+ - Intel VR13 DC-DC converter specifications.
+ - Intel SVID protocol.
+
+Used for Vcore power regulation for Intel VR13 based microprocessors
+
+ - Servers, Workstations, and High-end desktops
+
+PXM1310 is a Multi-rail Controller and it is compliant to
+
+ - Intel VR13 DC-DC converter specifications.
+ - Intel SVID protocol.
+
+Used for DDR3/DDR4 Memory power regulation for Intel VR13 and
+IMVP8 based systems
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have
+to instantiate devices explicitly.
+
+Example: the following commands will load the driver for an PXE1610
+at address 0x70 on I2C bus #4::
+
+ # modprobe pxe1610
+ # echo pxe1610 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
+
+It can also be instantiated by declaring in device tree
+
+
+Sysfs attributes
+----------------
+
+====================== ====================================
+curr1_label "iin"
+curr1_input Measured input current
+curr1_alarm Current high alarm
+
+curr[2-4]_label "iout[1-3]"
+curr[2-4]_input Measured output current
+curr[2-4]_crit Critical maximum current
+curr[2-4]_crit_alarm Current critical high alarm
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_crit Critical maximum input voltage
+in1_crit_alarm Input voltage critical high alarm
+
+in[2-4]_label "vout[1-3]"
+in[2-4]_input Measured output voltage
+in[2-4]_lcrit Critical minimum output voltage
+in[2-4]_lcrit_alarm Output voltage critical low alarm
+in[2-4]_crit Critical maximum output voltage
+in[2-4]_crit_alarm Output voltage critical high alarm
+
+power1_label "pin"
+power1_input Measured input power
+power1_alarm Input power high alarm
+
+power[2-4]_label "pout[1-3]"
+power[2-4]_input Measured output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_crit Critical high temperature
+temp[1-3]_crit_alarm Chip temperature critical high alarm
+temp[1-3]_max Maximum temperature
+temp[1-3]_max_alarm Chip temperature high alarm
+====================== ====================================
Addresses scanned: none
- Datasheet: Not publicly available
+ Datasheet: http://www.sensirion.com/file/datasheet_shtw1
+
+
+
+ * Sensirion SHTC3
+
+ Prefix: 'shtc3'
+
+ Addresses scanned: none
+
+ Datasheet: http://www.sensirion.com/file/datasheet_shtc3
Description
-----------
-This driver implements support for the Sensirion SHTC1 chip, a humidity and
-temperature sensor. Temperature is measured in degrees celsius, relative
-humidity is expressed as a percentage. Driver can be used as well for SHTW1
-chip, which has the same electrical interface.
+This driver implements support for the Sensirion SHTC1, SHTW1, and SHTC3
+chips, a humidity and temperature sensor. Temperature is measured in degrees
+celsius, relative humidity is expressed as a percentage.
The device communicates with the I2C protocol. All sensors are set to I2C
address 0x70. See Documentation/i2c/instantiating-devices for methods to
errors, no warnings, and few if any check messages. If there are any
messages, please be prepared to explain.
+* Please use the standard multi-line comment style. Do not mix C and C++
+ style comments in a single driver (with the exception of the SPDX license
+ identifier).
+
* If your patch generates checkpatch errors, warnings, or check messages,
please refrain from explanations such as "I prefer that coding style".
Keep in mind that each unnecessary message helps hiding a real problem,
completely initialize your chip and your driver first, then register with
the hwmon subsystem.
-* Use devm_hwmon_device_register_with_groups() or, if your driver needs a remove
- function, hwmon_device_register_with_groups() to register your driver with the
+* Use devm_hwmon_device_register_with_info() or, if your driver needs a remove
+ function, hwmon_device_register_with_info() to register your driver with the
hwmon subsystem. Try using devm_add_action() instead of a remove function if
possible. Do not use hwmon_device_register().
These flags will be acted upon accordingly by the core ``ktls`` code.
TLS device feature flags only control adding of new TLS connection
offloads, old connections will remain active after flags are cleared.
-
-Known bugs
-==========
-
-skb_orphan() leaks clear text
------------------------------
-
-Currently drivers depend on the :c:member:`sk` member of
-:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
-encryption. Any operation which removes or does not preserve the socket
-association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
-will cause the driver to miss the packets and lead to clear text leaks.
-
-Redirects leak clear text
--------------------------
-
-In the RX direction, if segment has already been decrypted by the device
-and it gets redirected or mirrored - clear text will be transmitted out.
media, receives them from user space program and instead of sending
packets via physical media sends them to the user space program.
-Let's say that you configured IPX on the tap0, then whenever
-the kernel sends an IPX packet to tap0, it is passed to the application
+Let's say that you configured IPv6 on the tap0, then whenever
+the kernel sends an IPv6 packet to tap0, it is passed to the application
(VTun for example). The application encrypts, compresses and sends it to
the other side over TCP or UDP. The application on the other side decompresses
and decrypts the data received and writes the packet to the TAP device,
--- /dev/null
+Embargoed hardware issues
+=========================
+
+Scope
+-----
+
+Hardware issues which result in security problems are a different category
+of security bugs than pure software bugs which only affect the Linux
+kernel.
+
+Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
+differently because they usually affect all Operating Systems ("OS") and
+therefore need coordination across different OS vendors, distributions,
+hardware vendors and other parties. For some of the issues, software
+mitigations can depend on microcode or firmware updates, which need further
+coordination.
+
+.. _Contact:
+
+Contact
+-------
+
+The Linux kernel hardware security team is separate from the regular Linux
+kernel security team.
+
+The team only handles the coordination of embargoed hardware security
+issues. Reports of pure software security bugs in the Linux kernel are not
+handled by this team and the reporter will be guided to contact the regular
+Linux kernel security team (:ref:`Documentation/admin-guide/
+<securitybugs>`) instead.
+
+The team can be contacted by email at <hardware-security@kernel.org>. This
+is a private list of security officers who will help you to coordinate an
+issue according to our documented process.
+
+The list is encrypted and email to the list can be sent by either PGP or
+S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
+certificate. The list's PGP key and S/MIME certificate are available from
+https://www.kernel.org/....
+
+While hardware security issues are often handled by the affected hardware
+vendor, we welcome contact from researchers or individuals who have
+identified a potential hardware flaw.
+
+Hardware security officers
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The current team of hardware security officers:
+
+ - Linus Torvalds (Linux Foundation Fellow)
+ - Greg Kroah-Hartman (Linux Foundation Fellow)
+ - Thomas Gleixner (Linux Foundation Fellow)
+
+Operation of mailing-lists
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The encrypted mailing-lists which are used in our process are hosted on
+Linux Foundation's IT infrastructure. By providing this service Linux
+Foundation's director of IT Infrastructure security technically has the
+ability to access the embargoed information, but is obliged to
+confidentiality by his employment contract. Linux Foundation's director of
+IT Infrastructure security is also responsible for the kernel.org
+infrastructure.
+
+The Linux Foundation's current director of IT Infrastructure security is
+Konstantin Ryabitsev.
+
+
+Non-disclosure agreements
+-------------------------
+
+The Linux kernel hardware security team is not a formal body and therefore
+unable to enter into any non-disclosure agreements. The kernel community
+is aware of the sensitive nature of such issues and offers a Memorandum of
+Understanding instead.
+
+
+Memorandum of Understanding
+---------------------------
+
+The Linux kernel community has a deep understanding of the requirement to
+keep hardware security issues under embargo for coordination between
+different OS vendors, distributors, hardware vendors and other parties.
+
+The Linux kernel community has successfully handled hardware security
+issues in the past and has the necessary mechanisms in place to allow
+community compliant development under embargo restrictions.
+
+The Linux kernel community has a dedicated hardware security team for
+initial contact, which oversees the process of handling such issues under
+embargo rules.
+
+The hardware security team identifies the developers (domain experts) who
+will form the initial response team for a particular issue. The initial
+response team can bring in further developers (domain experts) to address
+the issue in the best technical way.
+
+All involved developers pledge to adhere to the embargo rules and to keep
+the received information confidential. Violation of the pledge will lead to
+immediate exclusion from the current issue and removal from all related
+mailing-lists. In addition, the hardware security team will also exclude
+the offender from future issues. The impact of this consequence is a highly
+effective deterrent in our community. In case a violation happens the
+hardware security team will inform the involved parties immediately. If you
+or anyone becomes aware of a potential violation, please report it
+immediately to the Hardware security officers.
+
+
+Process
+^^^^^^^
+
+Due to the globally distributed nature of Linux kernel development,
+face-to-face meetings are almost impossible to address hardware security
+issues. Phone conferences are hard to coordinate due to time zones and
+other factors and should be only used when absolutely necessary. Encrypted
+email has been proven to be the most effective and secure communication
+method for these types of issues.
+
+Start of Disclosure
+"""""""""""""""""""
+
+Disclosure starts by contacting the Linux kernel hardware security team by
+email. This initial contact should contain a description of the problem and
+a list of any known affected hardware. If your organization builds or
+distributes the affected hardware, we encourage you to also consider what
+other hardware could be affected.
+
+The hardware security team will provide an incident-specific encrypted
+mailing-list which will be used for initial discussion with the reporter,
+further disclosure and coordination.
+
+The hardware security team will provide the disclosing party a list of
+developers (domain experts) who should be informed initially about the
+issue after confirming with the developers that they will adhere to this
+Memorandum of Understanding and the documented process. These developers
+form the initial response team and will be responsible for handling the
+issue after initial contact. The hardware security team is supporting the
+response team, but is not necessarily involved in the mitigation
+development process.
+
+While individual developers might be covered by a non-disclosure agreement
+via their employer, they cannot enter individual non-disclosure agreements
+in their role as Linux kernel developers. They will, however, agree to
+adhere to this documented process and the Memorandum of Understanding.
+
+
+Disclosure
+""""""""""
+
+The disclosing party provides detailed information to the initial response
+team via the specific encrypted mailing-list.
+
+From our experience the technical documentation of these issues is usually
+a sufficient starting point and further technical clarification is best
+done via email.
+
+Mitigation development
+""""""""""""""""""""""
+
+The initial response team sets up an encrypted mailing-list or repurposes
+an existing one if appropriate. The disclosing party should provide a list
+of contacts for all other parties who have already been, or should be,
+informed about the issue. The response team contacts these parties so they
+can name experts who should be subscribed to the mailing-list.
+
+Using a mailing-list is close to the normal Linux development process and
+has been successfully used in developing mitigations for various hardware
+security issues in the past.
+
+The mailing-list operates in the same way as normal Linux development.
+Patches are posted, discussed and reviewed and if agreed on applied to a
+non-public git repository which is only accessible to the participating
+developers via a secure connection. The repository contains the main
+development branch against the mainline kernel and backport branches for
+stable kernel versions as necessary.
+
+The initial response team will identify further experts from the Linux
+kernel developer community as needed and inform the disclosing party about
+their participation. Bringing in experts can happen at any time of the
+development process and often needs to be handled in a timely manner.
+
+Coordinated release
+"""""""""""""""""""
+
+The involved parties will negotiate the date and time where the embargo
+ends. At that point the prepared mitigations are integrated into the
+relevant kernel trees and published.
+
+While we understand that hardware security issues need coordinated embargo
+time, the embargo time should be constrained to the minimum time which is
+required for all involved parties to develop, test and prepare the
+mitigations. Extending embargo time artificially to meet conference talk
+dates or other non-technical reasons is creating more work and burden for
+the involved developers and response teams as the patches need to be kept
+up to date in order to follow the ongoing upstream kernel development,
+which might create conflicting changes.
+
+CVE assignment
+""""""""""""""
+
+Neither the hardware security team nor the initial response team assign
+CVEs, nor are CVEs required for the development process. If CVEs are
+provided by the disclosing party they can be used for documentation
+purposes.
+
+Process ambassadors
+-------------------
+
+For assistance with this process we have established ambassadors in various
+organizations, who can answer questions about or provide guidance on the
+reporting process and further handling. Ambassadors are not involved in the
+disclosure of a particular issue, unless requested by a response team or by
+an involved disclosed party. The current ambassadors list:
+
+ ============= ========================================================
+ ARM
+ AMD
+ IBM
+ Intel
+ Qualcomm Trilok Soni <tsoni@codeaurora.org>
+
+ Microsoft Sasha Levin <sashal@kernel.org>
+ VMware
+ Xen Andrew Cooper <andrew.cooper3@citrix.com>
+
+ Canonical Tyler Hicks <tyhicks@canonical.com>
+ Debian Ben Hutchings <ben@decadent.org.uk>
+ Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
+ SUSE Jiri Kosina <jkosina@suse.cz>
+
+ Amazon
+ Google Kees Cook <keescook@chromium.org>
+ ============= ========================================================
+
+If you want your organization to be added to the ambassadors list, please
+contact the hardware security team. The nominated ambassador has to
+understand and support our process fully and is ideally well connected in
+the Linux kernel community.
+
+Encrypted mailing-lists
+-----------------------
+
+We use encrypted mailing-lists for communication. The operating principle
+of these lists is that email sent to the list is encrypted either with the
+list's PGP key or with the list's S/MIME certificate. The mailing-list
+software decrypts the email and re-encrypts it individually for each
+subscriber with the subscriber's PGP key or S/MIME certificate. Details
+about the mailing-list software and the setup which is used to ensure the
+security of the lists and protection of the data can be found here:
+https://www.kernel.org/....
+
+List keys
+^^^^^^^^^
+
+For initial contact see :ref:`Contact`. For incident specific mailing-lists
+the key and S/MIME certificate are conveyed to the subscribers by email
+sent from the specific list.
+
+Subscription to incident specific lists
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Subscription is handled by the response teams. Disclosed parties who want
+to participate in the communication send a list of potential subscribers to
+the response team so the response team can validate subscription requests.
+
+Each subscriber needs to send a subscription request to the response team
+by email. The email must be signed with the subscriber's PGP key or S/MIME
+certificate. If a PGP key is used, it must be available from a public key
+server and is ideally connected to the Linux kernel's PGP web of trust. See
+also: https://www.kernel.org/signature.html.
+
+The response team verifies that the subscriber request is valid and adds
+the subscriber to the list. After subscription the subscriber will receive
+email from the mailing-list which is signed either with the list's PGP key
+or the list's S/MIME certificate. The subscriber's email client can extract
+the PGP key or the S/MIME certificate from the signature so the subscriber
+can send encrypted email to the list.
+
submit-checklist
kernel-docs
deprecated
+ embargoed-hardware-issues
These are some overall technical guides that have been put here for now for
lack of a better place.
u32 res1 = 0; /* Reserved */
u64 res2 = 0; /* Reserved */
u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
- u32 res3; /* Reserved for additional RISC-V specific header */
+ u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */
u32 res4; /* Reserved for PE COFF offset */
This header format is compliant with PE/COFF header and largely inspired from
Bits 16:31 - Major version
This preserves compatibility across newer and older version of the header.
- The current version is defined as 0.1.
+ The current version is defined as 0.2.
-- res3 is reserved for offset to any other additional fields. This makes the
- header extendible in future. One example would be to accommodate ISA
- extension for RISC-V in future. For current version, it is set to be zero.
+- The "magic" field is deprecated as of version 0.2. In a future
+ release, it may be removed. This originally should have matched up
+ with the ARM64 header "magic" field, but unfortunately does not.
+ The "magic2" field replaces it, matching up with the ARM64 header.
-- In current header, the flag field has only one field.
+- In current header, the flags field has only one field.
Bit 0: Kernel endianness. 1 if BE, 0 if LE.
- Image size is mandatory for boot loader to load kernel image. Booting will
tpm_vtpm_proxy
xen-tpmfront
+ tpm_ftpm_tee
--- /dev/null
+=============================================
+Firmware TPM Driver
+=============================================
+
+This document describes the firmware Trusted Platform Module (fTPM)
+device driver.
+
+Introduction
+============
+
+This driver is a shim for firmware implemented in ARM's TrustZone
+environment. The driver allows programs to interact with the TPM in the same
+way they would interact with a hardware TPM.
+
+Design
+======
+
+The driver acts as a thin layer that passes commands to and from a TPM
+implemented in firmware. The driver itself doesn't contain much logic and is
+used more like a dumb pipe between firmware and kernel/userspace.
+
+The firmware itself is based on the following paper:
+https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/ftpm1.pdf
+
+When the driver is loaded it will expose ``/dev/tpmX`` character devices to
+userspace which will enable userspace to communicate with the firmware TPM
+through this device.
M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ethernet/realtek/r8169.c
+F: drivers/net/ethernet/realtek/r8169*
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Supported
F: drivers/video/backlight/adp8860_bl.c
-ADS1015 HARDWARE MONITOR DRIVER
-M: Dirk Eibach <eibach@gdsys.de>
-L: linux-hwmon@vger.kernel.org
-S: Maintained
-F: Documentation/hwmon/ads1015.rst
-F: drivers/hwmon/ads1015.c
-F: include/linux/platform_data/ads1015.h
-
ADT746X FAN DRIVER
M: Colin Leroy <colin@colino.net>
S: Maintained
F: drivers/crypto/sunxi-ss/
ALLWINNER VPU DRIVER
-M: Maxime Ripard <maxime.ripard@bootlin.com>
+M: Maxime Ripard <mripard@kernel.org>
M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support
-M: Maxime Ripard <maxime.ripard@bootlin.com>
+M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: fs/cachefiles/
CADENCE MIPI-CSI2 BRIDGES
-M: Maxime Ripard <maxime.ripard@bootlin.com>
+M: Maxime Ripard <mripard@kernel.org>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/cdns,*.txt
DRM DRIVERS AND MISC GPU PATCHES
M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
-M: Maxime Ripard <maxime.ripard@bootlin.com>
+M: Maxime Ripard <mripard@kernel.org>
M: Sean Paul <sean@poorly.run>
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10
-M: Maxime Ripard <maxime.ripard@bootlin.com>
+M: Maxime Ripard <mripard@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/sun4i/
F: drivers/edac/aspeed_edac.c
F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
+EDAC-BLUEFIELD
+M: Shravan Kumar Ramani <sramani@mellanox.com>
+S: Supported
+F: drivers/edac/bluefield_edac.c
+
EDAC-CALXEDA
M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
EDAC-CORE
M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
+M: Tony Luck <tony.luck@intel.com>
R: James Morse <james.morse@arm.com>
+R: Robert Richter <rrichter@marvell.com>
L: linux-edac@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
S: Supported
F: Documentation/admin-guide/ras.rst
F: Documentation/driver-api/edac.rst
M: Heiner Kallweit <hkallweit1@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/ABI/testing/sysfs-bus-mdio
+F: Documentation/ABI/testing/sysfs-class-net-phydev
F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
F: Documentation/devicetree/bindings/net/mdio*
F: Documentation/networking/phy.rst
F: drivers/perf/fsl_imx8_ddr_perf.c
F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
+FREESCALE IMX I2C DRIVER
+M: Oleksij Rempel <o.rempel@pengutronix.de>
+R: Pengutronix Kernel Team <kernel@pengutronix.de>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/busses/i2c-imx.c
+F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
+
FREESCALE IMX LPI2C DRIVER
M: Dong Aisheng <aisheng.dong@nxp.com>
L: linux-i2c@vger.kernel.org
F: drivers/scsi/storvsc_drv.c
F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
-F: drivers/iommu/hyperv_iommu.c
+F: drivers/iommu/hyperv-iommu.c
F: net/vmw_vsock/hyperv_transport.c
F: include/clocksource/hyperv_timer.h
F: include/linux/hyperv.h
M: Gregory CLEMENT <gregory.clement@bootlin.com>
L: linux-i2c@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
F: drivers/i2c/busses/i2c-mv64xxx.c
I2C OVER PARALLEL PORT
S: Supported
F: drivers/scsi/isci/
+INTEL CPU family model numbers
+M: Tony Luck <tony.luck@intel.com>
+M: x86@kernel.org
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: arch/x86/include/asm/intel-family.h
+
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Jani Nikula <jani.nikula@linux.intel.com>
M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
L: linux-fsdevel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
S: Supported
-F: fs/iomap.c
F: fs/iomap/
F: include/linux/iomap.h
F: fs/io_uring.c
F: include/uapi/linux/io_uring.h
-IP MASQUERADING
-M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
-S: Maintained
-F: net/ipv4/netfilter/ipt_MASQUERADE.c
-
IPMI SUBSYSTEM
M: Corey Minyard <minyard@acm.org>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
F: tools/kvm/
F: tools/testing/selftests/kvm/
-KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
-M: Joerg Roedel <joro@8bytes.org>
-L: kvm@vger.kernel.org
-W: http://www.linux-kvm.org/
-S: Maintained
-F: arch/x86/include/asm/svm.h
-F: arch/x86/kvm/svm.c
-
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
M: Marc Zyngier <maz@kernel.org>
R: James Morse <james.morse@arm.com>
M: Janosch Frank <frankja@linux.ibm.com>
R: David Hildenbrand <david@redhat.com>
R: Cornelia Huck <cohuck@redhat.com>
-L: linux-s390@vger.kernel.org
+L: kvm@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
S: Supported
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Paolo Bonzini <pbonzini@redhat.com>
M: Radim Krčmář <rkrcmar@redhat.com>
+R: Sean Christopherson <sean.j.christopherson@intel.com>
+R: Vitaly Kuznetsov <vkuznets@redhat.com>
+R: Wanpeng Li <wanpengli@tencent.com>
+R: Jim Mattson <jmattson@google.com>
+R: Joerg Roedel <joro@8bytes.org>
L: kvm@vger.kernel.org
W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/x86/kvm/
F: arch/x86/kvm/*/
F: arch/x86/include/uapi/asm/kvm*
+F: arch/x86/include/uapi/asm/vmx.h
+F: arch/x86/include/uapi/asm/svm.h
F: arch/x86/include/asm/kvm*
F: arch/x86/include/asm/pvclock-abi.h
+F: arch/x86/include/asm/svm.h
+F: arch/x86/include/asm/vmx.h
F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c
KEYS-TRUSTED
M: James Bottomley <jejb@linux.ibm.com>
-M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
F: include/linux/libnvdimm.h
F: include/uapi/linux/ndctl.h
+LICENSES and SPDX stuff
+M: Thomas Gleixner <tglx@linutronix.de>
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+L: linux-spdx@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
+F: COPYING
+F: Documentation/process/license-rules.rst
+F: LICENSES/
+F: scripts/spdxcheck-test.sh
+F: scripts/spdxcheck.py
+
LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <mb@lightnvm.io>
W: http://github/OpenChannelSSD
M: Sridhar Samudrala <sridhar.samudrala@intel.com>
L: netdev@vger.kernel.org
S: Supported
-F: driver/net/net_failover.c
+F: drivers/net/net_failover.c
F: include/net/net_failover.h
F: Documentation/networking/net_failover.rst
F: drivers/net/phy/sfp*
F: include/linux/phylink.h
F: include/linux/sfp.h
+K: phylink
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
F: include/uapi/linux/arm_sdei.h
SOFTWARE RAID (Multiple Disks) SUPPORT
-M: Shaohua Li <shli@kernel.org>
+M: Song Liu <song@kernel.org>
L: linux-raid@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
S: Supported
F: drivers/md/Makefile
F: drivers/md/Kconfig
F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER
-M: Anirudha Sarangi <anirudh@xilinx.com>
-M: John Linn <John.Linn@xilinx.com>
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
VERSION = 5
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
NAME = Bobtail Squid
# *DOCUMENTATION*
# for CONFIG_OF_ALL_DTBS test
dtstree := $(srctree)/$(src)
dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+
+# board-specific dtc flags
+DTC_FLAGS_hsdk += --pad 20
.macro FAKE_RET_FROM_EXCPN
lr r9, [status32]
- bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
+ bic r9, r9, STATUS_AE_MASK
or r9, r9, STATUS_IE_MASK
kflag r9
.endm
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_ARC_HAS_ICCM
-#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#define __arcfp_code __section(.text.arcfp)
#else
-#define __arcfp_code __attribute__((__section__(".text")))
+#define __arcfp_code __section(.text)
#endif
#ifdef CONFIG_ARC_HAS_DCCM
-#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#define __arcfp_data __section(.data.arcfp)
#else
-#define __arcfp_data __attribute__((__section__(".data")))
+#define __arcfp_data __section(.data)
#endif
#endif /* __ASSEMBLY__ */
*/
#define MACHINE_START(_type, _name) \
static const struct machine_desc __mach_desc_##_type \
-__used \
-__attribute__((__section__(".arch.info.init"))) = { \
+__used __section(.arch.info.init) = { \
.name = _name,
#define MACHINE_END \
__mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
}
-static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
- unsigned int distr)
+static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
+ bool set_distr, unsigned int distr)
{
union {
unsigned int word;
};
} data;
- data.distr = distr;
- data.lvl = lvl;
+ data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
+ if (set_distr)
+ data.distr = distr;
+ if (set_lvl)
+ data.lvl = lvl;
__mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
}
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
+static void idu_irq_ack(struct irq_data *data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void idu_irq_mask_ack(struct irq_data *data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+ __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
+ __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
bool force)
else
distribution_mode = IDU_M_DISTRI_RR;
- idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
+ idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
return IRQ_SET_MASK_OK;
}
+static int idu_irq_set_type(struct irq_data *data, u32 type)
+{
+ unsigned long flags;
+
+ /*
+ * ARCv2 IDU HW does not support inverse polarity, so these are the
+ * only interrupt types supported.
+ */
+ if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ idu_set_mode(data->hwirq, true,
+ type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
+ IDU_M_TRIG_LEVEL,
+ false, 0);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+ return 0;
+}
+
static void idu_irq_enable(struct irq_data *data)
{
/*
.name = "MCIP IDU Intc",
.irq_mask = idu_irq_mask,
.irq_unmask = idu_irq_unmask,
+ .irq_ack = idu_irq_ack,
+ .irq_mask_ack = idu_irq_mask_ack,
.irq_enable = idu_irq_enable,
+ .irq_set_type = idu_irq_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = idu_irq_set_affinity,
#endif
}
static const struct irq_domain_ops idu_irq_ops = {
- .xlate = irq_domain_xlate_onecell,
+ .xlate = irq_domain_xlate_onetwocell,
.map = idu_irq_map,
};
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
+ /* Fall through */
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
- /*nobreak*/
+ /* fall through */
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
- /*nobreak */
+ /* fall through */
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
if (is_isa_arcv2() && ioc_enable && coherent)
dev->dma_coherent = true;
- dev_info(dev, "use %sncoherent DMA ops\n",
+ dev_info(dev, "use %scoherent DMA ops\n",
dev->dma_coherent ? "" : "non");
}
*/
#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <linux/smp.h>
#include <asm/arcregs.h>
#include <asm/io.h>
#include <asm/mach_desc.h>
+int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
+
#define ARC_CCM_UNUSED_ADDR 0x60000000
static void __init hsdk_init_per_cpu(unsigned int cpu)
iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
}
+static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
+{
+ void *fdt = initial_boot_params;
+ const void *prop;
+ int node, ret;
+ bool dt_coh_set;
+
+ node = fdt_path_offset(fdt, path);
+ if (node < 0)
+ goto tweak_fail;
+
+ prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
+ if (!prop && ret != -FDT_ERR_NOTFOUND)
+ goto tweak_fail;
+
+ dt_coh_set = ret != -FDT_ERR_NOTFOUND;
+ ret = 0;
+
+ /* need to remove "dma-coherent" property */
+ if (dt_coh_set && !coherent)
+ ret = fdt_delprop(fdt, node, "dma-coherent");
+
+ /* need to set "dma-coherent" property */
+ if (!dt_coh_set && coherent)
+ ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
+
+ if (ret < 0)
+ goto tweak_fail;
+
+ return 0;
+
+tweak_fail:
+ pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
+ return -EFAULT;
+}
+
enum hsdk_axi_masters {
M_HS_CORE = 0,
M_HS_RTT,
#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
+static void __init hsdk_init_memory_bridge_axi_dmac(void)
+{
+ bool coherent = !!arc_hsdk_axi_dmac_coherent;
+ u32 axi_m_slv1, axi_m_oft1;
+
+ /*
+ * Don't tweak memory bridge configuration if we failed to tweak DTB
+ * as we will end up in a inconsistent state.
+ */
+ if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
+ return;
+
+ if (coherent) {
+ axi_m_slv1 = 0x77999999;
+ axi_m_oft1 = 0x76DCBA98;
+ } else {
+ axi_m_slv1 = 0x77777777;
+ axi_m_oft1 = 0x76543210;
+ }
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
+ writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
+ writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
+
+ writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
+ writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
+ writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
+ writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
+ writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
+}
+
static void __init hsdk_init_memory_bridge(void)
{
u32 reg;
writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
- writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
- writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
- writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
- writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
- writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
-
- writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
- writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
- writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
- writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
- writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
-
writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
+ hsdk_init_memory_bridge_axi_dmac();
+
/*
* PAE remapping for DMA clients does not work due to an RTL bug, so
* CREG_PAE register must be programmed to all zeroes, otherwise it
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
+ select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
uart0: serial@0 {
compatible = "ti,am3352-uart", "ti,omap3-uart";
clock-frequency = <48000000>;
- reg = <0x0 0x2000>;
+ reg = <0x0 0x1000>;
interrupts = <72>;
status = "disabled";
dmas = <&edma 26 0>, <&edma 27 0>;
uart1: serial@0 {
compatible = "ti,am3352-uart", "ti,omap3-uart";
clock-frequency = <48000000>;
- reg = <0x0 0x2000>;
+ reg = <0x0 0x1000>;
interrupts = <73>;
status = "disabled";
dmas = <&edma 28 0>, <&edma 29 0>;
uart2: serial@0 {
compatible = "ti,am3352-uart", "ti,omap3-uart";
clock-frequency = <48000000>;
- reg = <0x0 0x2000>;
+ reg = <0x0 0x1000>;
interrupts = <74>;
status = "disabled";
dmas = <&edma 30 0>, <&edma 31 0>;
uart3: serial@0 {
compatible = "ti,am3352-uart", "ti,omap3-uart";
clock-frequency = <48000000>;
- reg = <0x0 0x2000>;
+ reg = <0x0 0x1000>;
interrupts = <44>;
status = "disabled";
};
uart4: serial@0 {
compatible = "ti,am3352-uart", "ti,omap3-uart";
clock-frequency = <48000000>;
- reg = <0x0 0x2000>;
+ reg = <0x0 0x1000>;
interrupts = <45>;
status = "disabled";
};
uart5: serial@0 {
compatible = "ti,am3352-uart", "ti,omap3-uart";
clock-frequency = <48000000>;
- reg = <0x0 0x2000>;
+ reg = <0x0 0x1000>;
interrupts = <46>;
status = "disabled";
};
target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0xcc020 0x4>;
+ reg-names = "rev";
ti,hwmods = "d_can0";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0xd0020 0x4>;
+ reg-names = "rev";
ti,hwmods = "d_can1";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
interrupt-names = "edma3_tcerrint";
};
- mmc3: mmc@47810000 {
- compatible = "ti,omap4-hsmmc";
+ target-module@47810000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
ti,hwmods = "mmc3";
- ti,needs-special-reset;
- interrupts = <29>;
- reg = <0x47810000 0x1000>;
- status = "disabled";
+ reg = <0x478102fc 0x4>,
+ <0x47810110 0x4>,
+ <0x47810114 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x47810000 0x1000>;
+
+ mmc3: mmc@0 {
+ compatible = "ti,omap4-hsmmc";
+ ti,needs-special-reset;
+ interrupts = <29>;
+ reg = <0x0 0x1000>;
+ };
};
usb: usb@47400000 {
interrupt-names = "edma3_tcerrint";
};
- mmc3: mmc@47810000 {
- compatible = "ti,omap4-hsmmc";
- reg = <0x47810000 0x1000>;
+ target-module@47810000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
ti,hwmods = "mmc3";
- ti,needs-special-reset;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
+ reg = <0x478102fc 0x4>,
+ <0x47810110 0x4>,
+ <0x47810114 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x47810000 0x1000>;
+
+ mmc3: mmc@0 {
+ compatible = "ti,omap4-hsmmc";
+ ti,needs-special-reset;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x1000>;
+ };
};
sham: sham@53100000 {
target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0xcc020 0x4>;
+ reg-names = "rev";
ti,hwmods = "d_can0";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0xd0020 0x4>;
+ reg-names = "rev";
ti,hwmods = "d_can1";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
};
&mmc1 {
- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+ pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
pinctrl-1 = <&mmc1_pins_hs>;
- pinctrl-2 = <&mmc1_pins_sdr12>;
- pinctrl-3 = <&mmc1_pins_sdr25>;
- pinctrl-4 = <&mmc1_pins_sdr50>;
- pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
};
&mmc2 {
};
&mmc1 {
- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+ pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
pinctrl-1 = <&mmc1_pins_hs>;
- pinctrl-2 = <&mmc1_pins_sdr12>;
- pinctrl-3 = <&mmc1_pins_sdr25>;
- pinctrl-4 = <&mmc1_pins_sdr50>;
- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
};
&mmc2 {
};
&mmc1 {
- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+ pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
pinctrl-1 = <&mmc1_pins_hs>;
- pinctrl-2 = <&mmc1_pins_default>;
- pinctrl-3 = <&mmc1_pins_hs>;
- pinctrl-4 = <&mmc1_pins_sdr50>;
- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
- pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
};
&mmc2 {
};
};
-&gpio7 {
+&gpio7_target {
ti,no-reset-on-init;
ti,no-idle-on-init;
};
bus-width = <4>;
cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
+ no-1-8-v;
};
&mmc2 {
};
&mmc1 {
- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+ pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default>;
pinctrl-1 = <&mmc1_pins_hs>;
- pinctrl-2 = <&mmc1_pins_sdr12>;
- pinctrl-3 = <&mmc1_pins_sdr25>;
- pinctrl-4 = <&mmc1_pins_sdr50>;
- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
vmmc-supply = <&vdd_3v3>;
vqmmc-supply = <&ldo1_reg>;
};
};
&mmc1 {
- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
+ pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default>;
pinctrl-1 = <&mmc1_pins_hs>;
- pinctrl-2 = <&mmc1_pins_sdr12>;
- pinctrl-3 = <&mmc1_pins_sdr25>;
- pinctrl-4 = <&mmc1_pins_sdr50>;
- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
vmmc-supply = <&vdd_3v3>;
vqmmc-supply = <&ldo1_reg>;
};
phy-supply = <&ldousb_reg>;
};
-&gpio7 {
+&gpio7_target {
ti,no-reset-on-init;
ti,no-idle-on-init;
};
};
};
- target-module@51000 { /* 0x48051000, ap 45 2e.0 */
+ gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
ti,hwmods = "gpio7";
reg = <0x51000 0x4>,
target-module@80000 { /* 0x48480000, ap 31 16.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0x80000 0x4>;
+ reg = <0x80020 0x4>;
reg-names = "rev";
clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
clock-names = "fck";
target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0xc000 0x4>;
+ reg = <0xc020 0x4>;
reg-names = "rev";
clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
clock-names = "fck";
*
* Datamanual Revisions:
*
- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016
+ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
* AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
*
*/
mmc3_pins_default: mmc3_pins_default {
pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
>;
};
mmc3_pins_hs: mmc3_pins_hs {
pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
>;
};
mmc3_pins_sdr12: mmc3_pins_sdr12 {
pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
>;
};
mmc3_pins_sdr25: mmc3_pins_sdr25 {
pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
>;
};
reg = <0>;
};
- n25q128a13_2: flash@1 {
+ n25q128a13_2: flash@2 {
compatible = "n25q128a13", "jedec,spi-nor";
#address-cells = <1>;
#size-cells = <1>;
spi-max-frequency = <66000000>;
spi-rx-bus-width = <2>;
- reg = <1>;
+ reg = <2>;
};
};
1003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
- teq r3, r2, lsr #10 @ instruction
+ teq r3, r2, lsr #11 @ instruction
subne r0, sv_pc, #4 @ allow for mov
subeq r0, sv_pc, #8 @ allow for mov + stmia
orr r11, r11, r13 @ mask all requested interrupts
str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+ str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
+
ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
beq hksw @ no - try next source
@@@@@@@@@@@@@@@@@@@@@@
@ Keyboard clock FIQ mode interrupt handler
@ r10 now contains KEYBRD_CLK_MASK, use it
- str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
bic r11, r11, r10 @ unmask it
str r11, [r12, #OMAP1510_GPIO_INT_MASK]
* interrupts default to since commit 80ac93c27441
* requires interrupt already acked and unmasked.
*/
- if (irq_chip->irq_ack)
- irq_chip->irq_ack(d);
- if (irq_chip->irq_unmask)
+ if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
irq_chip->irq_unmask(d);
}
for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
struct device_node *np;
struct gen_pool *sram_pool;
+ if (!soc_is_omap44xx() && !soc_is_omap54xx())
+ return 0;
+
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
if (!np)
pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x4,
- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_RESET_STATUS,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type2,
};
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
+ /* Fall through - ??? */
case 256:
vram_size += PAGE_SIZE * 256;
default:
!CPU_32v4 && !CPU_32v3
select PHYS_ADDR_T_64BIT
select SWIOTLB
- select ARCH_HAS_DMA_COHERENT_TO_PFN
- select ARCH_HAS_DMA_MMAP_PGPROT
- select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_SYNC_DMA_FOR_CPU
help
Say Y if you have an ARMv7 processor supporting the LPAE page
table format and you would like to access memory beyond the
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
- if (!dev_is_dma_coherent(dev))
- return __get_dma_pgprot(attrs, prot);
- return prot;
+ return __get_dma_pgprot(attrs, prot);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
+ phys_addr_t addr = __pfn_to_phys(pfn);
+
+ if (__phys_to_pfn(addr) != pfn)
+ return 0;
+
return memblock_is_map_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
- set_section_perms(perms, n, true, s->mm);
+ if (s->mm)
+ set_section_perms(perms, n, true, s->mm);
}
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
pinctrl-names = "default";
};
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
&pwm_ef {
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
clock-names = "ddr";
phys = <&usb2_phy1>;
+ phy-names = "usb2-phy";
dr_mode = "peripheral";
g-rx-fifo-size = <192>;
g-np-tx-fifo-size = <128>;
gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>;
enable-active-high;
+ regulator-always-on;
};
tf_io: gpio-regulator-tf_io {
mmc-hs200-1_8v;
non-removable;
fixed-emmc-driver-type = <1>;
+ status = "okay";
};
&usb_extal_clk {
reg = <0x0 0x48000000 0x0 0x18000000>;
};
- reg_1p8v: regulator0 {
+ reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
regulator-name = "fixed-1.8V";
regulator-min-microvolt = <1800000>;
regulator-always-on;
};
- reg_3p3v: regulator1 {
+ reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
regulator-always-on;
};
- reg_12p0v: regulator1 {
+ reg_12p0v: regulator-12p0v {
compatible = "regulator-fixed";
regulator-name = "D12.0V";
regulator-min-microvolt = <12000000>;
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ /*
+ * We already refuse to boot CPUs that don't support our configured
+ * page size, so we can only detect mismatches for a page size other
+ * than the one we're currently using. Unfortunately, SoCs like this
+ * exist in the wild so, even though we don't like it, we'll have to go
+ * along with it and treat them as non-strict.
+ */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline;
+ struct plt_entry trampoline, *dst;
struct module *mod;
/*
* to check if the actual opcodes are in fact identical,
* regardless of the offset in memory so use memcmp() instead.
*/
- trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
- if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
- sizeof(trampoline))) {
- if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
+ dst = mod->arch.ftrace_trampoline;
+ trampoline = get_plt_entry(addr, dst);
+ if (memcmp(dst, &trampoline, sizeof(trampoline))) {
+ if (plt_entry_is_initialized(dst)) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
/* point the trampoline to our ftrace entry point */
module_disable_ro(mod);
- *mod->arch.ftrace_trampoline = trampoline;
+ *dst = trampoline;
module_enable_ro(mod, true);
- /* update trampoline before patching in the branch */
- smp_wmb();
+ /*
+ * Ensure updated trampoline is visible to instruction
+ * fetch before we patch in the branch.
+ */
+ __flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
}
- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+ addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
- if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
- return pgprot_writecombine(prot);
- return prot;
+ return pgprot_writecombine(prot);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0000000000003CB0ull;
+ /* Else, fall through */
default:
return 0x0000000000023CB0ull;
}
regs->uregs[0] = -EINTR;
break;
}
+ /* Else, fall through */
case -ERESTARTNOINTR:
regs->uregs[0] = regs->orig_r0;
regs->ipc -= 4;
switch (regs->uregs[0]) {
case -ERESTART_RESTARTBLOCK:
regs->uregs[15] = __NR_restart_syscall;
+ /* Fall through */
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
#ifndef _PARISC_PGTABLE_H
#define _PARISC_PGTABLE_H
+#include <asm/page.h>
#include <asm-generic/4level-fixup.h>
#include <asm/fixmap.h>
#endif /* !__ASSEMBLY__ */
-#include <asm/page.h>
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
- select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o \
- dma-common.o
+ of_platform.o prom_parse.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Contains common dma routines for all powerpc platforms.
- *
- * Copyright (C) 2019 Shawn Anastasio.
- */
-
-#include <linux/mm.h>
-#include <linux/dma-noncoherent.h>
-
-pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
- unsigned long attrs)
-{
- if (!dev_is_dma_coherent(dev))
- return pgprot_noncached(prot);
- return prot;
-}
}
}
-static bool tm_active_with_fp(struct task_struct *tsk)
-{
- return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
- (tsk->thread.ckpt_regs.msr & MSR_FP);
-}
-
-static bool tm_active_with_altivec(struct task_struct *tsk)
-{
- return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
- (tsk->thread.ckpt_regs.msr & MSR_VEC);
-}
#else
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
bool strict_msr_control;
static int restore_fp(struct task_struct *tsk)
{
- if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
+ if (tsk->thread.load_fp) {
load_fp_state(¤t->thread.fp_state);
current->thread.load_fp++;
return 1;
static int restore_altivec(struct task_struct *tsk)
{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
- (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
load_vr_state(&tsk->thread.vr_state);
tsk->thread.used_vr = 1;
tsk->thread.load_vec++;
if (!tsk->thread.regs)
return;
+ check_if_tm_restore_required(tsk);
+
usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0)
return;
msr_check_and_set(msr_all_available);
- check_if_tm_restore_required(tsk);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
}
tce = be64_to_cpu(tce);
- if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
- return H_PARAMETER;
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
+ ret = H_PARAMETER;
+ goto unlock_exit;
+ }
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0;
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
- return H_PARAMETER;
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+ ret = H_PARAMETER;
+ goto unlock_exit;
+ }
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
#ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
- int __maybe_unused cpu = smp_processor_id();
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
__end_of_fixed_addresses
};
-#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
-#define FIXADDR_TOP (VMALLOC_START)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
#define FIXMAP_PAGE_IO PAGE_KERNEL
#define __early_set_fixmap __set_fixmap
#ifndef __ASM_IMAGE_H
#define __ASM_IMAGE_H
-#define RISCV_IMAGE_MAGIC "RISCV"
+#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
+#define RISCV_IMAGE_MAGIC2 "RSC\x05"
#define RISCV_IMAGE_FLAG_BE_SHIFT 0
#define RISCV_IMAGE_FLAG_BE_MASK 0x1
#define __HEAD_FLAGS (__HEAD_FLAG(BE))
#define RISCV_HEADER_VERSION_MAJOR 0
-#define RISCV_HEADER_VERSION_MINOR 1
+#define RISCV_HEADER_VERSION_MINOR 2
#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
RISCV_HEADER_VERSION_MINOR)
* @version: version
* @res1: reserved
* @res2: reserved
- * @magic: Magic number
- * @res3: reserved (will be used for additional RISC-V specific
- * header)
+ * @magic: Magic number (RISC-V specific; deprecated)
+ * @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
* @res4: reserved (will be used for PE COFF offset)
*
* The intention is for this header format to be shared between multiple
u32 res1;
u64 res2;
u64 magic;
- u32 res3;
+ u32 magic2;
u32 res4;
};
#endif /* __ASSEMBLY__ */
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define FIXADDR_TOP VMALLOC_START
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE PMD_SIZE
+#else
+#define FIXADDR_SIZE PGDIR_SIZE
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
/*
- * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32.
+ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
* Note that PGDIR_SIZE must evenly divide TASK_SIZE.
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
#else
-#define TASK_SIZE VMALLOC_START
+#define TASK_SIZE FIXADDR_START
#endif
#include <asm-generic/pgtable.h>
static inline void __fstate_clean(struct pt_regs *regs)
{
- regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
+}
+
+static inline void fstate_off(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
}
static inline void fstate_save(struct task_struct *task,
}
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
+
#define flush_tlb_range(vma, start, end) \
remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-#define flush_tlb_mm(mm) \
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ flush_tlb_range(vma, addr, addr + PAGE_SIZE);
+}
+
+#define flush_tlb_mm(mm) \
remote_sfence_vma(mm_cpumask(mm), 0, -1)
#endif /* CONFIG_SMP */
.word RISCV_HEADER_VERSION
.word 0
.dword 0
- .asciz RISCV_IMAGE_MAGIC
- .word 0
+ .ascii RISCV_IMAGE_MAGIC
.balign 4
+ .ascii RISCV_IMAGE_MAGIC2
.word 0
.global _start_kernel
unsigned long sp)
{
regs->sstatus = SR_SPIE;
- if (has_fpu)
+ if (has_fpu) {
regs->sstatus |= SR_FS_INITIAL;
+ /*
+ * Restore the initial value to the FP register
+ * before starting the user program.
+ */
+ fstate_restore(current, regs);
+ }
regs->sepc = pc;
regs->sp = sp;
set_fs(USER_DS);
{
#ifdef CONFIG_FPU
/*
- * Reset FPU context
+ * Reset FPU state and context
* frm: round to nearest, ties to even (IEEE default)
* fflags: accrued exceptions cleared
*/
+ fstate_off(current, task_pt_regs(current));
memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
#endif
}
case KVM_S390_MCHK:
irq->u.mchk.mcic = s390int->parm64;
break;
+ case KVM_S390_INT_PFAULT_INIT:
+ irq->u.ext.ext_params = s390int->parm;
+ irq->u.ext.ext_params2 = s390int->parm64;
+ break;
+ case KVM_S390_RESTART:
+ case KVM_S390_INT_CLOCK_COMP:
+ case KVM_S390_INT_CPU_TIMER:
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
/* mark all the pages in active slots as dirty */
for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
ms = slots->memslots + slotnr;
+ if (!ms->dirty_bitmap)
+ return -EINVAL;
/*
* The second half of the bitmap is only used on x86,
* and would be wasted otherwise, so we put it to good
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
- struct kvm_s390_irq s390irq;
+ struct kvm_s390_irq s390irq = {};
if (copy_from_user(&s390int, argp, sizeof(s390int)))
return -EFAULT;
break;
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
/* lcgr %dst,%dst */
- EMIT4(0xb9130000, dst_reg, dst_reg);
+ EMIT4(0xb9030000, dst_reg, dst_reg);
break;
/*
* BPF_FROM_BE/LE
/* llgf %w1,map.max_entries(%b2) */
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
REG_W1, 0, 0xa);
/*
* goto out;
*/
- /* sllg %r1,%b3,3: %r1 = index * 8 */
- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+ /* llgfr %r1,%b3: %r1 = (u32) index */
+ EMIT4(0xb9160000, REG_1, BPF_REG_3);
+ /* sllg %r1,%r1,3: %r1 *= 8 */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
/* lg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
printk("dbr");
break;
case FD_REG_N:
- if (0)
- goto d_reg_n;
case F_REG_N:
printk("fr%d", rn);
break;
printk("xd%d", rn & ~1);
break;
}
- d_reg_n:
+ /* else, fall through */
case D_REG_N:
printk("dr%d", rn);
break;
printk("xd%d", rm & ~1);
break;
}
+ /* else, fall through */
case D_REG_M:
printk("dr%d", rm);
break;
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
+ break;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
{
long err;
+ if (!IS_ENABLED(CONFIG_SYSVIPC))
+ return -ENOSYS;
+
/* No need for backward compatibility. We can start fresh... */
if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
- err = sys_semtimedop(first, ptr,
- (unsigned int)second, NULL);
+ err = ksys_semtimedop(first, ptr,
+ (unsigned int)second, NULL);
goto out;
case SEMTIMEDOP:
- err = sys_semtimedop(first, ptr, (unsigned int)second,
+ err = ksys_semtimedop(first, ptr, (unsigned int)second,
(const struct __kernel_timespec __user *)
- (unsigned long) fifth);
+ (unsigned long) fifth);
goto out;
case SEMGET:
- err = sys_semget(first, (int)second, (int)third);
+ err = ksys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
- err = sys_semctl(first, second,
- (int)third | IPC_64,
- (unsigned long) ptr);
+ err = ksys_old_semctl(first, second,
+ (int)third | IPC_64,
+ (unsigned long) ptr);
goto out;
}
default:
if (call <= MSGCTL) {
switch (call) {
case MSGSND:
- err = sys_msgsnd(first, ptr, (size_t)second,
+ err = ksys_msgsnd(first, ptr, (size_t)second,
(int)third);
goto out;
case MSGRCV:
- err = sys_msgrcv(first, ptr, (size_t)second, fifth,
+ err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
(int)third);
goto out;
case MSGGET:
- err = sys_msgget((key_t)first, (int)second);
+ err = ksys_msgget((key_t)first, (int)second);
goto out;
case MSGCTL:
- err = sys_msgctl(first, (int)second | IPC_64, ptr);
+ err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
goto out;
}
case SHMDT:
- err = sys_shmdt(ptr);
+ err = ksys_shmdt(ptr);
goto out;
case SHMGET:
- err = sys_shmget(first, (size_t)second, (int)third);
+ err = ksys_shmget(first, (size_t)second, (int)third);
goto out;
case SHMCTL:
- err = sys_shmctl(first, (int)second | IPC_64, ptr);
+ err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
time_travel_time = ns;
}
-static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
- unsigned long long expiry)
+static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
{
time_travel_timer_mode = mode;
+}
+
+static inline void time_travel_set_timer_expiry(unsigned long long expiry)
+{
time_travel_timer_expiry = expiry;
}
#else
{
}
-static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
- unsigned long long expiry)
+static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
+{
+}
+
+static inline void time_travel_set_timer_expiry(unsigned long long expiry)
{
}
if (time_travel_timer_mode != TT_TMR_DISABLED ||
time_travel_timer_expiry < next) {
if (time_travel_timer_mode == TT_TMR_ONESHOT)
- time_travel_set_timer(TT_TMR_DISABLED, 0);
+ time_travel_set_timer_mode(TT_TMR_DISABLED);
/*
* time_travel_time will be adjusted in the timer
* IRQ handler so it works even when the signal
static int itimer_shutdown(struct clock_event_device *evt)
{
if (time_travel_mode != TT_MODE_OFF)
- time_travel_set_timer(TT_TMR_DISABLED, 0);
+ time_travel_set_timer_mode(TT_TMR_DISABLED);
if (time_travel_mode != TT_MODE_INFCPU)
os_timer_disable();
{
unsigned long long interval = NSEC_PER_SEC / HZ;
- if (time_travel_mode != TT_MODE_OFF)
- time_travel_set_timer(TT_TMR_PERIODIC,
- time_travel_time + interval);
+ if (time_travel_mode != TT_MODE_OFF) {
+ time_travel_set_timer_mode(TT_TMR_PERIODIC);
+ time_travel_set_timer_expiry(time_travel_time + interval);
+ }
if (time_travel_mode != TT_MODE_INFCPU)
os_timer_set_interval(interval);
{
delta += 1;
- if (time_travel_mode != TT_MODE_OFF)
- time_travel_set_timer(TT_TMR_ONESHOT,
- time_travel_time + delta);
+ if (time_travel_mode != TT_MODE_OFF) {
+ time_travel_set_timer_mode(TT_TMR_ONESHOT);
+ time_travel_set_timer_expiry(time_travel_time + delta);
+ }
if (time_travel_mode != TT_MODE_INFCPU)
return os_timer_one_shot(delta);
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
export REALMODE_CFLAGS
/* Find the first usable memory region under bios_start. */
for (i = boot_params->e820_entries - 1; i >= 0; i--) {
+ unsigned long new = bios_start;
+
entry = &boot_params->e820_table[i];
/* Skip all entries above bios_start. */
/* Adjust bios_start to the end of the entry if needed. */
if (bios_start > entry->addr + entry->size)
- bios_start = entry->addr + entry->size;
+ new = entry->addr + entry->size;
/* Keep bios_start page-aligned. */
- bios_start = round_down(bios_start, PAGE_SIZE);
+ new = round_down(new, PAGE_SIZE);
/* Skip the entry if it's too small. */
- if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
+ if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
continue;
+ /* Protect against underflow. */
+ if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
+ break;
+
+ bios_start = new;
break;
}
throttle = perf_event_overflow(event, &data, ®s);
out:
- if (throttle)
+ if (throttle) {
perf_ibs_stop(event, 0);
- else
- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+ } else {
+ period >>= 4;
+
+ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
+ (*config & IBS_OP_CNT_CTL))
+ period |= *config & IBS_OP_CUR_CNT_RAND;
+
+ perf_ibs_enable_event(perf_ibs, hwc, period);
+ }
perf_event_update_userpage(event);
* Add a single event to the PMU.
*
* The event is added to the group of enabled events
- * but only if it can be scehduled with existing events.
+ * but only if it can be scheduled with existing events.
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
return left;
}
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+ return max(left, 32ULL);
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
+ x86_pmu.limit_period = nhm_limit_period;
mem_attr = nhm_mem_events_attrs;
* Lower 12 bits encode the number of additional
* pages to flush (in addition to the 'cur' page).
*/
- if (diff >= HV_TLB_FLUSH_UNIT)
+ if (diff >= HV_TLB_FLUSH_UNIT) {
gva_list[gva_n] |= ~PAGE_MASK;
- else if (diff)
+ cur += HV_TLB_FLUSH_UNIT;
+ } else if (diff) {
gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+ cur = end;
+ }
- cur += HV_TLB_FLUSH_UNIT;
gva_n++;
} while (cur < end);
* Note: efi_info is commonly left uninitialized, but that field has a
* private magic, so it is better to leave it unchanged.
*/
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member) \
+ { \
+ .start = offsetof(struct boot_params, struct_member), \
+ .len = sizeof_mbr(struct boot_params, struct_member), \
+ }
+
+struct boot_params_to_save {
+ unsigned int start;
+ unsigned int len;
+};
+
static void sanitize_boot_params(struct boot_params *boot_params)
{
/*
* problems again.
*/
if (boot_params->sentinel) {
- /* fields in boot_params are left uninitialized, clear them */
- boot_params->acpi_rsdp_addr = 0;
- memset(&boot_params->ext_ramdisk_image, 0,
- (char *)&boot_params->efi_info -
- (char *)&boot_params->ext_ramdisk_image);
- memset(&boot_params->kbd_status, 0,
- (char *)&boot_params->hdr -
- (char *)&boot_params->kbd_status);
- memset(&boot_params->_pad7[0], 0,
- (char *)&boot_params->edd_mbr_sig_buffer[0] -
- (char *)&boot_params->_pad7[0]);
- memset(&boot_params->_pad8[0], 0,
- (char *)&boot_params->eddbuf[0] -
- (char *)&boot_params->_pad8[0]);
- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+ static struct boot_params scratch;
+ char *bp_base = (char *)boot_params;
+ char *save_base = (char *)&scratch;
+ int i;
+
+ const struct boot_params_to_save to_save[] = {
+ BOOT_PARAM_PRESERVE(screen_info),
+ BOOT_PARAM_PRESERVE(apm_bios_info),
+ BOOT_PARAM_PRESERVE(tboot_addr),
+ BOOT_PARAM_PRESERVE(ist_info),
+ BOOT_PARAM_PRESERVE(hd0_info),
+ BOOT_PARAM_PRESERVE(hd1_info),
+ BOOT_PARAM_PRESERVE(sys_desc_table),
+ BOOT_PARAM_PRESERVE(olpc_ofw_header),
+ BOOT_PARAM_PRESERVE(efi_info),
+ BOOT_PARAM_PRESERVE(alt_mem_k),
+ BOOT_PARAM_PRESERVE(scratch),
+ BOOT_PARAM_PRESERVE(e820_entries),
+ BOOT_PARAM_PRESERVE(eddbuf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+ BOOT_PARAM_PRESERVE(secure_boot),
+ BOOT_PARAM_PRESERVE(hdr),
+ BOOT_PARAM_PRESERVE(e820_table),
+ BOOT_PARAM_PRESERVE(eddbuf),
+ };
+
+ memset(&scratch, 0, sizeof(scratch));
+
+ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+ memcpy(save_base + to_save[i].start,
+ bp_base + to_save[i].start, to_save[i].len);
+ }
+
+ memcpy(boot_params, save_base, sizeof(*boot_params));
}
}
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#ifndef __ASSEMBLY__
-extern void mcount(void);
extern atomic_t modifying_ftrace_code;
extern void __fentry__(void);
* While adding a new CPUID for a new microarchitecture, add a new
* group to keep logically sorted out in chronological order. Within
* that group keep the CPUID for the variants sorted by model number.
+ *
+ * The defined symbol names have the following form:
+ * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
+ * where:
+ * OPTFAMILY Describes the family of CPUs that this belongs to. Default
+ * is assumed to be "_CORE" (and should be omitted). Other values
+ * currently in use are _ATOM and _XEON_PHI
+ * MICROARCH Is the code name for the micro-architecture for this core.
+ * N.B. Not the platform name.
+ * OPTDIFF If needed, a short string to differentiate by market segment.
+ * Exact strings here will vary over time. _DESKTOP, _MOBILE, and
+ * _X (short for Xeon server) should be used when they are
+ * appropriate.
+ *
+ * The #define line may optionally include a comment including platform names.
*/
#define INTEL_FAM6_CORE_YONAH 0x0E
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+ unsigned long mmu_valid_gen;
DECLARE_BITMAP(unsync_child_bitmap, 512);
#ifdef CONFIG_X86_32
unsigned long n_requested_mmu_pages;
unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages;
+ unsigned long mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_TSC_RATIO 0xc0000104
#define MSR_AMD64_NB_CFG 0xc001001f
+#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
" lfence;\n" \
" jmp 902b;\n" \
" .align 16\n" \
- "903: addl $4, %%esp;\n" \
+ "903: lea 4(%%esp), %%esp;\n" \
" pushl %[thunk_target];\n" \
" ret;\n" \
" .align 16\n" \
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK 0x0F
-/* ibs fetch bits/masks */
+/* IBS fetch bits/masks */
#define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48)
#define IBS_FETCH_CNT 0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
-/* ibs op bits/masks */
-/* lower 4 bits of the current count are ignored: */
-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
+/*
+ * IBS op bits/masks
+ * The lower 7 bits of the current count are random bits
+ * preloaded by hardware and ignored in software
+ */
+#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
+#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
({ \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
+ __typeof__(ptr) __gu_ptr = (ptr); \
+ __typeof__(size) __gu_size = (size); \
__uaccess_begin_nospec(); \
- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
__uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
/*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
*/
static void __init lapic_cal_handler(struct clock_event_device *dev)
{
if (!boot_cpu_has(X86_FEATURE_APIC))
return true;
+ /* Virt guests may lack ARAT, but still have DEADLINE */
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
+ return true;
+
/* Deadline timer is based on TSC so no further PIT action required */
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
return false;
static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
- void (*real_handler)(struct clock_event_device *dev);
+ u64 tsc_perj = 0, tsc_start = 0;
+ unsigned long jif_start;
unsigned long deltaj;
long delta, deltatsc;
int pm_referenced = 0;
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
"calibrating APIC timer ...\n");
+ /*
+ * There are platforms w/o global clockevent devices. Instead of
+ * making the calibration conditional on that, use a polling based
+ * approach everywhere.
+ */
local_irq_disable();
- /* Replace the global interrupt handler */
- real_handler = global_clock_event->event_handler;
- global_clock_event->event_handler = lapic_cal_handler;
-
/*
* Setup the APIC counter to maximum. There is no way the lapic
* can underflow in the 100ms detection time frame
*/
__setup_APIC_LVTT(0xffffffff, 0, 0);
- /* Let the interrupts run */
+ /*
+ * Methods to terminate the calibration loop:
+ * 1) Global clockevent if available (jiffies)
+ * 2) TSC if available and frequency is known
+ */
+ jif_start = READ_ONCE(jiffies);
+
+ if (tsc_khz) {
+ tsc_start = rdtsc();
+ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+ }
+
+ /*
+ * Enable interrupts so the tick can fire, if a global
+ * clockevent device is available
+ */
local_irq_enable();
- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
- cpu_relax();
+ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+ /* Wait for a tick to elapse */
+ while (1) {
+ if (tsc_khz) {
+ u64 tsc_now = rdtsc();
+ if ((tsc_now - tsc_start) >= tsc_perj) {
+ tsc_start += tsc_perj;
+ break;
+ }
+ } else {
+ unsigned long jif_now = READ_ONCE(jiffies);
- local_irq_disable();
+ if (time_after(jif_now, jif_start)) {
+ jif_start = jif_now;
+ break;
+ }
+ }
+ cpu_relax();
+ }
- /* Restore the real event handler */
- global_clock_event->event_handler = real_handler;
+ /* Invoke the calibration routine */
+ local_irq_disable();
+ lapic_cal_handler(NULL);
+ local_irq_enable();
+ }
+
+ local_irq_disable();
/* Build delta t1-t2 as apic timer counts down */
delta = lapic_cal_t1 - lapic_cal_t2;
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
/*
- * PM timer calibration failed or not turned on
- * so lets try APIC timer based calibration
+ * PM timer calibration failed or not turned on so lets try APIC
+ * timer based calibration, if a global clockevent device is
+ * available.
*/
- if (!pm_referenced) {
+ if (!pm_referenced && global_clock_event) {
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
/*
return early_per_cpu(x86_cpu_to_apicid, cpu);
}
-static inline unsigned long calculate_ldr(int cpu)
-{
- unsigned long val, id;
-
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- id = per_cpu(x86_bios_cpu_apicid, cpu);
- val |= SET_APIC_LOGICAL_ID(id);
-
- return val;
-}
-
/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
+ * bigsmp enables physical destination mode
+ * and doesn't use LDR and DFR
*/
static void bigsmp_init_apic_ldr(void)
{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_FLAT);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
}
static void bigsmp_setup_apic_routing(void)
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+ if (!ioapic_initialized)
+ return gsi_top;
+ /*
+ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+ * updated. So simply return @from if ioapic_dynirq_base == 0.
+ */
+ return ioapic_dynirq_base ? : from;
}
#ifdef CONFIG_X86_32
def_to_bigsmp = 0;
break;
}
- /* If P4 and above fall through */
+ /* P4 and above */
+ /* fall through */
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
msr_set_bit(MSR_AMD64_DE_CFG, 31);
}
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "force"))
+ rdrand_force = true;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+ /*
+ * Saving of the MSR used to hide the RDRAND support during
+ * suspend/resume is done by arch/x86/power/cpu.c, which is
+ * dependent on CONFIG_PM_SLEEP.
+ */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP))
+ return;
+
+ /*
+ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+ * RDRAND support using the CPUID function directly.
+ */
+ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+ return;
+
+ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+ /*
+ * Verify that the CPUID change has occurred in case the kernel is
+ * running virtualized and the hypervisor doesn't support the MSR.
+ */
+ if (cpuid_ecx(1) & BIT(30)) {
+ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+ return;
+ }
+
+ clear_cpu_cap(c, X86_FEATURE_RDRAND);
+ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+ /*
+ * Some BIOS implementations do not restore proper RDRAND support
+ * across suspend and resume. Check on whether to hide the RDRAND
+ * instruction support via CPUID.
+ */
+ clear_rdrand_cpuid_bit(c);
+}
+
static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
wrmsrl_safe(MSR_F15H_IC_CFG, value);
}
}
+
+ /*
+ * Some BIOS implementations do not restore proper RDRAND support
+ * across suspend and resume. Check on whether to hide the RDRAND
+ * instruction support via CPUID.
+ */
+ clear_rdrand_cpuid_bit(c);
}
static void init_amd_zn(struct cpuinfo_x86 *c)
case 0x10: init_amd_gh(c); break;
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
+ case 0x16: init_amd_jg(c); break;
case 0x17: init_amd_zn(c); break;
}
*/
MCESEV(
AO, "Action optional: memory scrubbing error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
+ SER, MASK(MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
),
MCESEV(
AO, "Action optional: last level cache writeback error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+ SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
),
/* ignore OVER for UCNA */
*/
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
+/*
+ * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
+ * hardware or BIOS before kernel boot.
+ */
+static u32 orig_umwait_control_cached __ro_after_init;
+
/*
* Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
* the sysfs write functions.
return 0;
}
+/*
+ * The CPU hotplug callback sets the control MSR to the original control
+ * value.
+ */
+static int umwait_cpu_offline(unsigned int cpu)
+{
+ /*
+ * This code is protected by the CPU hotplug already and
+ * orig_umwait_control_cached is never changed after it caches
+ * the original control MSR value in umwait_init(). So there
+ * is no race condition here.
+ */
+ wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
+
+ return 0;
+}
+
/*
* On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
* is the only active CPU at this time. The MSR is set up on the APs via the
if (!boot_cpu_has(X86_FEATURE_WAITPKG))
return -ENODEV;
+ /*
+ * Cache the original control MSR value before the control MSR is
+ * changed. This is the only place where orig_umwait_control_cached
+ * is modified.
+ */
+ rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
- umwait_cpu_online, NULL);
+ umwait_cpu_online, umwait_cpu_offline);
+ if (ret < 0) {
+ /*
+ * On failure, the control MSR on all CPUs has the
+ * original control value.
+ */
+ return ret;
+ }
register_syscore_ops(&umwait_syscore_ops);
void (*abort)(struct arch_uprobe *, struct pt_regs *);
};
-static inline int sizeof_long(void)
+static inline int sizeof_long(struct pt_regs *regs)
{
- return in_ia32_syscall() ? 4 : 8;
+ /*
+ * Check registers for mode as in_xxx_syscall() does not apply here.
+ */
+ return user_64bit_mode(regs) ? 8 : 4;
}
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
{
- unsigned long new_sp = regs->sp - sizeof_long();
+ unsigned long new_sp = regs->sp - sizeof_long(regs);
- if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
+ if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
return -EFAULT;
regs->sp = new_sp;
long correction = utask->vaddr - utask->xol_vaddr;
regs->ip += correction;
} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
- regs->sp += sizeof_long(); /* Pop incorrect return address */
+ regs->sp += sizeof_long(regs); /* Pop incorrect return address */
if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
return -ERESTART;
}
* "call" insn was executed out-of-line. Just restore ->sp and restart.
* We could also restore ->ip and try to call branch_emulate_op() again.
*/
- regs->sp += sizeof_long();
+ regs->sp += sizeof_long(regs);
return -ERESTART;
}
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
{
- int rasize = sizeof_long(), nleft;
+ int rasize = sizeof_long(regs), nleft;
unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
- uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+ uint16_t evmcs_ver = 0;
struct kvm_cpuid_entry2 cpuid_entries[] = {
{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
{ .function = HYPERV_CPUID_INTERFACE },
};
int i, nent = ARRAY_SIZE(cpuid_entries);
+ if (kvm_x86_ops->nested_get_evmcs_version)
+ evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+
/* Skip NESTED_FEATURES if eVMCS is not supported */
if (!evmcs_ver)
--nent;
if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
new->phys_map[xapic_id] = apic;
+ if (!kvm_apic_sw_enabled(apic))
+ continue;
+
ldr = kvm_lapic_get_reg(apic, APIC_LDR);
if (apic_x2apic_mode(apic)) {
static_key_slow_dec_deferred(&apic_sw_disabled);
else
static_key_slow_inc(&apic_sw_disabled.key);
+
+ recalculate_apic_map(apic->vcpu->kvm);
}
}
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+ /*
+ * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
+ * depends on valid pages being added to the head of the list. See
+ * comments in kvm_zap_obsolete_pages().
+ */
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
- if ((_sp)->role.invalid) { \
+ if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
} else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
static void mmu_audit_disable(void) { }
#endif
+static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+}
+
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
+ sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
clear_page(sp->spt);
trace_kvm_mmu_get_page(sp, true);
return false;
if (cached_root_available(vcpu, new_cr3, new_role)) {
+ /*
+ * It is possible that the cached previous root page is
+ * obsolete because of a change in the MMU generation
+ * number. However, changing the generation number is
+ * accompanied by KVM_REQ_MMU_RELOAD, which will free
+ * the root set here and allocate a new one.
+ */
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
return alloc_mmu_pages(vcpu);
}
-static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node)
+
+static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
- struct kvm_mmu_page *sp;
+ struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
- unsigned long i;
- bool flush;
- gfn_t gfn;
-
- spin_lock(&kvm->mmu_lock);
-
- if (list_empty(&kvm->arch.active_mmu_pages))
- goto out_unlock;
-
- flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
+ int ign;
- for (i = 0; i < slot->npages; i++) {
- gfn = slot->base_gfn + i;
+restart:
+ list_for_each_entry_safe_reverse(sp, node,
+ &kvm->arch.active_mmu_pages, link) {
+ /*
+ * No obsolete valid page exists before a newly created page
+ * since active_mmu_pages is a FIFO list.
+ */
+ if (!is_obsolete_sp(kvm, sp))
+ break;
- for_each_valid_sp(kvm, sp, gfn) {
- if (sp->gfn != gfn)
- continue;
+ /*
+ * Do not repeatedly zap a root page to avoid unnecessary
+ * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
+ * progress:
+ * vcpu 0 vcpu 1
+ * call vcpu_enter_guest():
+ * 1): handle KVM_REQ_MMU_RELOAD
+ * and require mmu-lock to
+ * load mmu
+ * repeat:
+ * 1): zap root page and
+ * send KVM_REQ_MMU_RELOAD
+ *
+ * 2): if (cond_resched_lock(mmu-lock))
+ *
+ * 2): hold mmu-lock and load mmu
+ *
+ * 3): see KVM_REQ_MMU_RELOAD bit
+ * on vcpu->requests is set
+ * then return 1 to call
+ * vcpu_enter_guest() again.
+ * goto repeat;
+ *
+ * Since we are reversely walking the list and the invalid
+ * list will be moved to the head, skip the invalid page
+ * can help us to avoid the infinity list walking.
+ */
+ if (sp->role.invalid)
+ continue;
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
- flush = false;
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
cond_resched_lock(&kvm->mmu_lock);
+ goto restart;
}
+
+ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+ goto restart;
}
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-out_unlock:
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+}
+
+/*
+ * Fast invalidate all shadow pages and use lock-break technique
+ * to zap obsolete pages.
+ *
+ * It's required when memslot is being deleted or VM is being
+ * destroyed, in these cases, we should ensure that KVM MMU does
+ * not use any resource of the being-deleted slot or all slots
+ * after calling the function.
+ */
+static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+{
+ spin_lock(&kvm->mmu_lock);
+ kvm->arch.mmu_valid_gen++;
+
+ kvm_zap_obsolete_pages(kvm);
spin_unlock(&kvm->mmu_lock);
}
+static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+{
+ kvm_mmu_zap_all_fast(kvm);
+}
+
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
if (!entry)
return -EINVAL;
- new_entry = READ_ONCE(*entry);
new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
return ret;
}
-static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
-{
- /* Not supported */
- return 0;
-}
-
static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version)
{
.mem_enc_unreg_region = svm_unregister_enc_region,
.nested_enable_evmcs = nested_enable_evmcs,
- .nested_get_evmcs_version = nested_get_evmcs_version,
+ .nested_get_evmcs_version = NULL,
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
};
int len;
gva_t gva = 0;
struct vmcs12 *vmcs12;
+ struct x86_exception e;
short offset;
if (!nested_vmx_check_permission(vcpu))
vmx_instruction_info, true, len, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
- kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
+ if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+ kvm_inject_page_fault(vcpu, &e);
}
return nested_vmx_succeed(vcpu);
.set_nested_state = NULL,
.get_vmcs12_pages = NULL,
.nested_enable_evmcs = NULL,
+ .nested_get_evmcs_version = NULL,
.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
};
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE && ctxt->tf)
- kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
- exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+ kvm_rip_write(vcpu, ctxt->eip);
+ if (r == EMULATE_DONE && ctxt->tf)
+ kvm_vcpu_do_singlestep(vcpu, &r);
__kvm_set_rflags(vcpu, ctxt->eflags);
+ }
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
for (i = 0; i < 8; i++) {
FPU_REG *r = &st(i);
u_char tagi = FPU_gettagi(i);
+
switch (tagi) {
case TAG_Empty:
continue;
- break;
case TAG_Zero:
case TAG_Special:
+ /* Update tagi for the printk below */
tagi = FPU_Special(r);
+ /* fall through */
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
printk("Whoops! Error in errors.c: tag%d is %d ", i,
tagi);
continue;
- break;
}
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
}
case TW_Denormal:
if (denormal_operand() < 0)
return;
-
+ /* fall through */
case TAG_Zero:
case TAG_Valid:
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
*/
static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
unsigned long pfn, unsigned long npg,
- int warnlvl)
+ unsigned long lpsize, int warnlvl)
{
pgprotval_t forbidden, res;
unsigned long end;
check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
forbidden = res;
- res = protect_kernel_text_ro(start, end);
- check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
- forbidden |= res;
+ /*
+ * Special case to preserve a large page. If the change spawns the
+ * full large page mapping then there is no point to split it
+ * up. Happens with ftrace and is going to be removed once ftrace
+ * switched to text_poke().
+ */
+ if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
+ res = protect_kernel_text_ro(start, end);
+ check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
+ forbidden |= res;
+ }
/* Check the PFN directly */
res = protect_pci_bios(pfn, pfn + npg - 1);
* extra conditional required here.
*/
chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
- CPA_CONFLICT);
+ psize, CPA_CONFLICT);
if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
/*
* protection requirement in the large page.
*/
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
- CPA_DETECT);
+ psize, CPA_DETECT);
/*
* If there is a conflict, split the large page.
if (!cpa->force_static_prot)
goto set;
- prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
+ /* Hand in lpsize = 0 to enforce the protection mechanism */
+ prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
if (pgprot_val(prot) == pgprot_val(ref_prot))
goto set;
pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
cpa_inc_4k_install();
- new_prot = static_protections(new_prot, address, pfn, 1,
+ /* Hand in lpsize = 0 to enforce the protection mechanism */
+ new_prot = static_protections(new_prot, address, pfn, 1, 0,
CPA_PROTECT);
new_prot = pgprot_clear_protnone_bits(new_prot);
emit_prologue(&prog, bpf_prog->aux->stack_depth,
bpf_prog_was_classic(bpf_prog));
+ addrs[0] = prog - temp;
- for (i = 0; i < insn_cnt; i++, insn++) {
+ for (i = 1; i <= insn_cnt; i++, insn++) {
const s32 imm32 = insn->imm;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
extra_pass = true;
goto skip_init_addrs;
}
- addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
+ addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;
* Before first pass, make a rough estimation of addrs[]
* each BPF instruction is translated to less than 64 bytes
*/
- for (proglen = 0, i = 0; i < prog->len; i++) {
+ for (proglen = 0, i = 0; i <= prog->len; i++) {
proglen += 64;
addrs[i] = proglen;
}
if (!image || !prog->is_func || extra_pass) {
if (image)
- bpf_prog_fill_jited_linfo(prog, addrs);
+ bpf_prog_fill_jited_linfo(prog, addrs + 1);
out_addrs:
kfree(addrs);
kfree(jit_data);
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/tboot.h>
+#include <linux/dmi.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
#include <asm/mmu_context.h>
-#include <linux/dmi.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
core_initcall(bsp_pm_check_init);
-static int msr_init_context(const u32 *msr_id, const int total_num)
+static int msr_build_context(const u32 *msr_id, const int num)
{
- int i = 0;
+ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
struct saved_msr *msr_array;
+ int total_num;
+ int i, j;
- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
- return -EINVAL;
- }
+ total_num = saved_msrs->num + num;
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
if (!msr_array) {
return -ENOMEM;
}
- for (i = 0; i < total_num; i++) {
- msr_array[i].info.msr_no = msr_id[i];
+ if (saved_msrs->array) {
+ /*
+ * Multiple callbacks can invoke this function, so copy any
+ * MSR save requests from previous invocations.
+ */
+ memcpy(msr_array, saved_msrs->array,
+ sizeof(struct saved_msr) * saved_msrs->num);
+
+ kfree(saved_msrs->array);
+ }
+
+ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+ msr_array[i].info.msr_no = msr_id[j];
msr_array[i].valid = false;
msr_array[i].info.reg.q = 0;
}
- saved_context.saved_msrs.num = total_num;
- saved_context.saved_msrs.array = msr_array;
+ saved_msrs->num = total_num;
+ saved_msrs->array = msr_array;
return 0;
}
/*
- * The following section is a quirk framework for problematic BIOSen:
+ * The following sections are a quirk framework for problematic BIOSen:
* Sometimes MSRs are modified by the BIOSen after suspended to
* RAM, this might cause unexpected behavior after wakeup.
* Thus we save/restore these specified MSRs across suspend/resume
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
}
static const struct dmi_system_id msr_save_dmi_table[] = {
{}
};
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+ u32 cpuid_msr_id[] = {
+ MSR_AMD64_CPUID_FN_1,
+ };
+
+ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+ c->family);
+
+ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+ {
+ .vendor = X86_VENDOR_AMD,
+ .family = 0x15,
+ .model = X86_MODEL_ANY,
+ .feature = X86_FEATURE_ANY,
+ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+ },
+ {
+ .vendor = X86_VENDOR_AMD,
+ .family = 0x16,
+ .model = X86_MODEL_ANY,
+ .feature = X86_FEATURE_ANY,
+ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+ },
+ {}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+ const struct x86_cpu_id *m;
+ int ret = 0;
+
+ m = x86_match_cpu(msr_save_cpu_table);
+ if (m) {
+ pm_cpu_match_t fn;
+
+ fn = (pm_cpu_match_t)m->driver_data;
+ ret = fn(m);
+ }
+
+ return ret;
+}
+
static int pm_check_save_msr(void)
{
dmi_check_system(msr_save_dmi_table);
+ pm_cpu_check(msr_save_cpu_table);
+
return 0;
}
KASAN_SANITIZE := n
KCOV_INSTRUMENT := n
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
# sure how to relocate those.
ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
+PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
endif
ifdef CONFIG_STACKPROTECTOR
-CFLAGS_REMOVE_sha256.o += -fstack-protector
-CFLAGS_REMOVE_purgatory.o += -fstack-protector
-CFLAGS_REMOVE_string.o += -fstack-protector
-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
+PURGATORY_CFLAGS_REMOVE += -fstack-protector
endif
ifdef CONFIG_STACKPROTECTOR_STRONG
-CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
-CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
-CFLAGS_REMOVE_string.o += -fstack-protector-strong
-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
+PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
endif
ifdef CONFIG_RETPOLINE
-CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
+PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
endif
+CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o += $(PURGATORY_CFLAGS)
+
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
"add %2, %2, %7\n\t"
"addi %0, %0, -1\n\t"
"bnez %0, 1b\n\t"
+ "isync\n\t"
/* Jump to identity mapping */
"jx %3\n"
"2:\n\t"
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
-
- cookie = BLK_QC_T_NONE;
- if (bio->bi_opf & REQ_NOWAIT_INLINE)
- cookie = BLK_QC_T_EAGAIN;
- else if (bio->bi_opf & REQ_NOWAIT)
+ if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
- return cookie;
+ return BLK_QC_T_NONE;
}
trace_block_getrq(q, bio, bio->bi_opf);
struct blk_mq_hw_ctx *hctx, *next;
int i;
- cancel_delayed_work_sync(&q->requeue_work);
-
queue_for_each_hw_ctx(q, hctx, i)
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
blk_free_queue_stats(q->stats);
+ if (queue_is_mq(q))
+ cancel_delayed_work_sync(&q->requeue_work);
+
blk_exit_queue(q);
blk_queue_free_zone_bitmaps(q);
make the card work).
config ATM_NICSTAR_USE_IDT77105
- bool "Use IDT77015 PHY driver (25Mbps)"
+ bool "Use IDT77105 PHY driver (25Mbps)"
depends on ATM_NICSTAR
help
Support for the PHYsical layer chip in ForeRunner LE25 cards. In
choice
prompt "Backlight initial state"
default CHARLCD_BL_FLASH
+ ---help---
+ Select the initial backlight state on boot or module load.
+
+ Previously, there was no option for this: the backlight flashed
+ briefly on init. Now you can also turn it off/on.
config CHARLCD_BL_OFF
bool "Off"
#include <generated/utsrelease.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define LCD_MINOR 156
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ */
+
+#ifndef _CHARLCD_H
+#define _CHARLCD_H
+
+struct charlcd {
+ const struct charlcd_ops *ops;
+ const unsigned char *char_conv; /* Optional */
+
+ int ifwidth; /* 4-bit or 8-bit (default) */
+ int height;
+ int width;
+ int bwidth; /* Default set by charlcd_alloc() */
+ int hwidth; /* Default set by charlcd_alloc() */
+
+ void *drvdata; /* Set by charlcd_alloc() */
+};
+
+struct charlcd_ops {
+ /* Required */
+ void (*write_cmd)(struct charlcd *lcd, int cmd);
+ void (*write_data)(struct charlcd *lcd, int data);
+
+ /* Optional */
+ void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */
+ void (*clear_fast)(struct charlcd *lcd);
+ void (*backlight)(struct charlcd *lcd, int on);
+};
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
+
+int charlcd_register(struct charlcd *lcd);
+int charlcd_unregister(struct charlcd *lcd);
+
+void charlcd_poke(struct charlcd *lcd);
+
+#endif /* CHARLCD_H */
#include <linux/property.h>
#include <linux/slab.h>
-#include <misc/charlcd.h>
-
+#include "charlcd.h"
enum hd44780_pin {
/* Order does matter due to writing to GPIO array subsets! */
struct ht16k33_fbdev fbdev;
};
-static struct fb_fix_screeninfo ht16k33_fb_fix = {
+static const struct fb_fix_screeninfo ht16k33_fb_fix = {
.id = DRIVER_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo ht16k33_fb_var = {
+static const struct fb_var_screeninfo ht16k33_fb_var = {
.xres = HT16K33_MATRIX_LED_MAX_ROWS,
.yres = HT16K33_MATRIX_LED_MAX_COLS,
.xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define KEYPAD_MINOR 185
return;
err_lcd_unreg:
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
config REGMAP_SOUNDWIRE
tristate
- depends on SOUNDWIRE_BUS
+ depends on SOUNDWIRE
config REGMAP_SCCB
tristate
thi->name[0],
resource->name);
+ allow_kernel_signal(DRBD_SIGKILL);
+ allow_kernel_signal(SIGXCPU);
restart:
retval = thi->function(thi);
}
return true;
case RBD_OBJ_READ_PARENT:
+ /*
+ * The parent image is read only up to the overlap -- zero-fill
+ * from the overlap to the end of the request.
+ */
+ if (!*result) {
+ u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
+
+ if (obj_overlap < obj_req->ex.oe_len)
+ rbd_obj_zero_range(obj_req, obj_overlap,
+ obj_req->ex.oe_len - obj_overlap);
+ }
return true;
default:
BUG();
}
}
+ err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
- return err;
+ goto fail;
}
return 0;
}
kfree(req);
}
- return -ENOMEM;
-
+ return err;
}
static int connect_ring(struct backend_info *be)
usb_free_urb(urb);
- return 0;
+ return err;
}
static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
return 0;
}
+int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err;
+
+ bt_dev_dbg(hdev, "QCA pre shutdown cmd");
+
+ skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
+ NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+
static void qca_tlv_check_data(struct rome_config *config,
const struct firmware *fw)
{
BT_DBG("Length\t\t : %d bytes", length);
config->dnld_mode = ROME_SKIP_EVT_NONE;
+ config->dnld_type = ROME_SKIP_EVT_NONE;
switch (config->type) {
case TLV_TYPE_PATCH:
evt = skb_put(skb, sizeof(*evt));
evt->ncmd = 1;
- evt->opcode = QCA_HCI_CC_OPCODE;
+ evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
*/
if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
config->dnld_type == ROME_SKIP_EVT_VSE)
- return qca_inject_cmd_complete_event(hdev);
+ ret = qca_inject_cmd_complete_event(hdev);
out:
release_firmware(fw);
return err;
}
+ /* Give the controller some time to get ready to receive the NVM */
+ msleep(10);
+
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name)
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
+#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
#define EDL_CMD_REQ_RES_EVT (0x00)
#define EDL_PATCH_VER_RES_EVT (0x19)
const char *firmware_name);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
{
return false;
}
+
+static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
#endif
{ USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822CE Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
}
data->intf->needs_remote_wakeup = 1;
- /* device specific wakeup source enabled and required for USB
- * remote wakeup while host is suspended
- */
- device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
goto failed;
data->intf->needs_remote_wakeup = 0;
- device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30)
+ if (fw_size < 30) {
+ err = -EINVAL;
goto err_release_fw;
+ }
fw_size -= 30;
fw_ptr += 30;
ws_awake_device);
struct hci_uart *hu = qca->hu;
unsigned long retrans_delay;
+ unsigned long flags;
BT_DBG("hu %p wq awake device", hu);
/* Vote for serial clock */
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
/* Send wake indication to device */
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
- spin_unlock(&qca->hci_ibs_lock);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
struct qca_data *qca = container_of(work, struct qca_data,
ws_awake_rx);
struct hci_uart *hu = qca->hu;
+ unsigned long flags;
BT_DBG("hu %p wq awake rx", hu);
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
/* Always acknowledge device wake up,
qca->ibs_sent_wacks++;
- spin_unlock(&qca->hci_ibs_lock);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
unsigned long flags;
struct qca_data *qca = hu->priv;
- BT_DBG("hu %p want to sleep", hu);
+ BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
break;
case HCI_IBS_RX_ASLEEP:
- /* Fall through */
+ break;
default:
/* Any other state is illegal */
if (hdr->evt == HCI_EV_VENDOR)
complete(&qca->drop_ev_comp);
- kfree(skb);
+ kfree_skb(skb);
return 0;
}
{
struct hci_uart *hu = hci_get_drvdata(hdev);
+ /* Perform pre shutdown command */
+ qca_send_pre_shutdown_cmd(hdev);
+
qca_power_shutdown(hu);
return 0;
}
size_t pdata_size;
};
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(hostdev);
+ struct acpi_device *child;
+
+ device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+ list_for_each_entry(child, &adev->children, node)
+ acpi_device_clear_enumerated(child);
+}
+
/*
* hisi_lpc_acpi_probe - probe children for ACPI FW
* @hostdev: LPC host device pointer
return 0;
fail:
- device_for_each_child(hostdev, NULL,
- hisi_lpc_acpi_remove_subdev);
+ hisi_lpc_acpi_remove(hostdev);
return ret;
}
{
return -ENODEV;
}
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
#endif // CONFIG_ACPI
/*
range->fwnode = dev->fwnode;
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
+ range->hostdata = lpcdev;
+ range->ops = &hisi_lpc_ops;
+ lpcdev->io_host = range;
ret = logic_pio_register_range(range);
if (ret) {
dev_err(dev, "register IO range failed (%d)!\n", ret);
return ret;
}
- lpcdev->io_host = range;
/* register the LPC host PIO resources */
if (acpi_device)
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret)
+ if (ret) {
+ logic_pio_unregister_range(range);
return ret;
+ }
- lpcdev->io_host->hostdata = lpcdev;
- lpcdev->io_host->ops = &hisi_lpc_ops;
+ dev_set_drvdata(dev, lpcdev);
io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
dev_info(dev, "registered range [%pa - %pa]\n",
return ret;
}
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+ struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+ struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+ if (acpi_device)
+ hisi_lpc_acpi_remove(dev);
+ else
+ of_platform_depopulate(dev);
+
+ logic_pio_unregister_range(range);
+
+ return 0;
+}
+
static const struct of_device_id hisi_lpc_of_match[] = {
{ .compatible = "hisilicon,hip06-lpc", },
{ .compatible = "hisilicon,hip07-lpc", },
.acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
},
.probe = hisi_lpc_probe,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
*best_mode = SYSC_IDLE_SMART_WKUP;
else if (idlemodes & BIT(SYSC_IDLE_SMART))
*best_mode = SYSC_IDLE_SMART;
- else if (idlemodes & SYSC_IDLE_FORCE)
+ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
*best_mode = SYSC_IDLE_FORCE;
else
return -EINVAL;
SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
- SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
if (error)
return 0;
- if (val)
- ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
- else
- ddata->cfg.sysc_val = ddata->cap->sysc_mask;
+ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
return 0;
}
error = sysc_init_dts_quirks(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_map_and_check_registers(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_sysc_mask(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_idlemodes(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_syss_mask(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_pdata(ddata);
if (error)
- goto unprepare;
+ return error;
sysc_init_early_quirks(ddata);
error = sysc_init_resets(ddata);
if (error)
- return error;
+ goto unprepare;
error = sysc_init_module(ddata);
if (error)
/dev/vtpmX and a server-side file descriptor on which the vTPM
can receive commands.
+config TCG_FTPM_TEE
+ tristate "TEE based fTPM Interface"
+ depends on TEE && OPTEE
+ help
+ This driver proxies for firmware TPM running in TEE.
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
+obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
* @dev: device to which the chip is associated.
*
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
- * TPM 2.0 spec.
- * Then, calls bus- and device- specific shutdown code.
+ * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code.
*
- * XXX: This codepath relies on the fact that sysfs is not enabled for
- * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
- * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ * Return: always 0 (i.e. success)
*/
static int tpm_class_shutdown(struct device *dev)
{
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- /* XXX: If you wish to remove this restriction, you must first update
- * tpm_sysfs to explicitly lock chip->ops.
- */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return;
- /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
- * is called before ops is null'd and the sysfs core synchronizes this
- * removal so that no callbacks are running or can run again
- */
WARN_ON(chip->groups_cnt != 0);
chip->groups[chip->groups_cnt++] = &tpm_dev_group;
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Microsoft Corporation
+ *
+ * Implements a firmware TPM as described here:
+ * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+ *
+ * A reference implementation is available here:
+ * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM
+ */
+
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+#include "tpm.h"
+#include "tpm_ftpm_tee.h"
+
+/*
+ * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896
+ *
+ * Randomly generated, and must correspond to the GUID on the TA side.
+ * Defined here in the reference implementation:
+ * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42
+ */
+static const uuid_t ftpm_ta_uuid =
+ UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4,
+ 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
+
+/**
+ * ftpm_tee_tpm_op_recv - retrieve fTPM response.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @buf: the buffer to store data.
+ * @count: the number of bytes to read.
+ *
+ * Return:
+ * In case of success the number of bytes received.
+ * On failure, -errno.
+ */
+static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t len;
+
+ len = pvt_data->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
+ __func__, count, len);
+ return -EIO;
+ }
+
+ memcpy(buf, pvt_data->resp_buf, len);
+ pvt_data->resp_len = 0;
+
+ return len;
+}
+
+/**
+ * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @buf: the buffer to send.
+ * @len: the number of bytes to send.
+ *
+ * Return:
+ * In case of success, returns 0.
+ * On failure, -errno
+ */
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t resp_len;
+ int rc;
+ u8 *temp_buf;
+ struct tpm_header *resp_header;
+ struct tee_ioctl_invoke_arg transceive_args;
+ struct tee_param command_params[4];
+ struct tee_shm *shm = pvt_data->shm;
+
+ if (len > MAX_COMMAND_SIZE) {
+ dev_err(&chip->dev,
+ "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
+ __func__, len);
+ return -EIO;
+ }
+
+ memset(&transceive_args, 0, sizeof(transceive_args));
+ memset(command_params, 0, sizeof(command_params));
+ pvt_data->resp_len = 0;
+
+ /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
+ transceive_args = (struct tee_ioctl_invoke_arg) {
+ .func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
+ .session = pvt_data->session,
+ .num_params = 4,
+ };
+
+ /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
+ command_params[0] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
+ .u.memref = {
+ .shm = shm,
+ .size = len,
+ .shm_offs = 0,
+ },
+ };
+
+ temp_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+ memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
+ memcpy(temp_buf, buf, len);
+
+ command_params[1] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ .u.memref = {
+ .shm = shm,
+ .size = MAX_RESPONSE_SIZE,
+ .shm_offs = MAX_COMMAND_SIZE,
+ },
+ };
+
+ rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
+ command_params);
+ if ((rc < 0) || (transceive_args.ret != 0)) {
+ dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
+ __func__, transceive_args.ret);
+ return (rc < 0) ? rc : transceive_args.ret;
+ }
+
+ temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+
+ resp_header = (struct tpm_header *)temp_buf;
+ resp_len = be32_to_cpu(resp_header->length);
+
+ /* sanity check resp_len */
+ if (resp_len < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "%s: tpm response header too small\n",
+ __func__);
+ return -EIO;
+ }
+ if (resp_len > MAX_RESPONSE_SIZE) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
+ __func__, resp_len);
+ return -EIO;
+ }
+
+ /* sanity checks look good, cache the response */
+ memcpy(pvt_data->resp_buf, temp_buf, resp_len);
+ pvt_data->resp_len = resp_len;
+
+ return 0;
+}
+
+static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
+{
+ return 0;
+}
+
+static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return 0;
+}
+
+static const struct tpm_class_ops ftpm_tee_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = ftpm_tee_tpm_op_recv,
+ .send = ftpm_tee_tpm_op_send,
+ .cancel = ftpm_tee_tpm_op_cancel,
+ .status = ftpm_tee_tpm_op_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+ .req_canceled = ftpm_tee_tpm_req_canceled,
+};
+
+/*
+ * Check whether this driver supports the fTPM TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ /*
+ * Currently this driver only support GP Complaint OPTEE based fTPM TA
+ */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ftpm_tee_probe - initialize the fTPM
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+static int ftpm_tee_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &pdev->dev;
+ struct ftpm_tee_private *pvt_data = NULL;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private),
+ GFP_KERNEL);
+ if (!pvt_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, pvt_data);
+
+ /* Open context with TEE driver */
+ pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data->ctx)) {
+ if (PTR_ERR(pvt_data->ctx) == -ENOENT)
+ return -EPROBE_DEFER;
+ dev_err(dev, "%s: tee_client_open_context failed\n", __func__);
+ return PTR_ERR(pvt_data->ctx);
+ }
+
+ /* Open a session with fTPM TA */
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "%s: tee_client_open_session failed, err=%x\n",
+ __func__, sess_arg.ret);
+ rc = -EINVAL;
+ goto out_tee_session;
+ }
+ pvt_data->session = sess_arg.session;
+
+ /* Allocate dynamic shared memory with fTPM TA */
+ pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
+ MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(pvt_data->shm)) {
+ dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+ rc = -ENOMEM;
+ goto out_shm_alloc;
+ }
+
+ /* Allocate new struct tpm_chip instance */
+ chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops);
+ if (IS_ERR(chip)) {
+ dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__);
+ rc = PTR_ERR(chip);
+ goto out_chip_alloc;
+ }
+
+ pvt_data->chip = chip;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ /* Create a character device for the fTPM */
+ rc = tpm_chip_register(pvt_data->chip);
+ if (rc) {
+ dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n",
+ __func__, rc);
+ goto out_chip;
+ }
+
+ return 0;
+
+out_chip:
+ put_device(&pvt_data->chip->dev);
+out_chip_alloc:
+ tee_shm_free(pvt_data->shm);
+out_shm_alloc:
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+out_tee_session:
+ tee_client_close_context(pvt_data->ctx);
+
+ return rc;
+}
+
+/**
+ * ftpm_tee_remove - remove the TPM device
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * 0 always.
+ */
+static int ftpm_tee_remove(struct platform_device *pdev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+ /* Release the chip */
+ tpm_chip_unregister(pvt_data->chip);
+
+ /* frees chip */
+ put_device(&pvt_data->chip->dev);
+
+ /* Free the shared memory pool */
+ tee_shm_free(pvt_data->shm);
+
+ /* close the existing session with fTPM TA*/
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+
+ /* close the context with TEE driver */
+ tee_client_close_context(pvt_data->ctx);
+
+ /* memory allocated with devm_kzalloc() is freed automatically */
+
+ return 0;
+}
+
+static const struct of_device_id of_ftpm_tee_ids[] = {
+ { .compatible = "microsoft,ftpm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
+
+static struct platform_driver ftpm_tee_driver = {
+ .driver = {
+ .name = "ftpm-tee",
+ .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ },
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+};
+
+module_platform_driver(ftpm_tee_driver);
+
+MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
+MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Microsoft Corporation
+ */
+
+#ifndef __TPM_FTPM_TEE_H__
+#define __TPM_FTPM_TEE_H__
+
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+/* The TAFs ID implemented in this TA */
+#define FTPM_OPTEE_TA_SUBMIT_COMMAND (0)
+#define FTPM_OPTEE_TA_EMULATE_PPI (1)
+
+/* max. buffer size supported by fTPM */
+#define MAX_COMMAND_SIZE 4096
+#define MAX_RESPONSE_SIZE 4096
+
+/**
+ * struct ftpm_tee_private - fTPM's private data
+ * @chip: struct tpm_chip instance registered with tpm framework.
+ * @state: internal state
+ * @session: fTPM TA session identifier.
+ * @resp_len: cached response buffer length.
+ * @resp_buf: cached response buffer.
+ * @ctx: TEE context handler.
+ * @shm: Memory pool shared with fTPM TA in TEE.
+ */
+struct ftpm_tee_private {
+ struct tpm_chip *chip;
+ u32 session;
+ size_t resp_len;
+ u8 resp_buf[MAX_RESPONSE_SIZE];
+ struct tee_context *ctx;
+ struct tee_shm *shm;
+};
+
+#endif /* __TPM_FTPM_TEE_H__ */
goto out_err;
}
+ tpm_chip_start(chip);
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
} else {
tpm_tis_probe_irq(chip, intmask);
}
+ tpm_chip_stop(chip);
}
rc = tpm_chip_register(chip);
return NULL;
}
+#ifdef CONFIG_OF
+static int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name, struct of_phandle_args *out_args);
+static struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
+#else
+static inline int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name,
+ struct of_phandle_args *out_args)
+{
+ return -ENOENT;
+}
+static inline struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
+{
+ return ERR_PTR(-ENOENT);
+}
+#endif
+
/**
* clk_core_get - Find the clk_core parent of a clk
* @core: clk to find parent of
* };
*
* Returns: -ENOENT when the provider can't be found or the clk doesn't
- * exist in the provider. -EINVAL when the name can't be found. NULL when the
- * provider knows about the clk but it isn't provided on this system.
+ * exist in the provider or the name can't be found in the DT node or
+ * in a clkdev lookup. NULL when the provider knows about the clk but it
+ * isn't provided on this system.
* A valid clk_core pointer when the clk can be found in the provider.
*/
static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
struct device *dev = core->dev;
const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node;
+ struct of_phandle_args clkspec;
- if (np && (name || index >= 0))
- hw = of_clk_get_hw(np, index, name);
-
- /*
- * If the DT search above couldn't find the provider or the provider
- * didn't know about this clk, fallback to looking up via clkdev based
- * clk_lookups
- */
- if (PTR_ERR(hw) == -ENOENT && name)
+ if (np && (name || index >= 0) &&
+ !of_parse_clkspec(np, index, name, &clkspec)) {
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
+ of_node_put(clkspec.np);
+ } else if (name) {
+ /*
+ * If the DT search above couldn't find the provider fallback to
+ * looking up via clkdev based clk_lookups.
+ */
hw = clk_find_hw(dev_id, name);
+ }
if (IS_ERR(hw))
return ERR_CAST(hw);
parent = ERR_PTR(-EPROBE_DEFER);
} else {
parent = clk_core_get(core, index);
- if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT)
+ if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
parent = clk_core_lookup(entry->name);
}
break;
/* Fallback to comparing globally unique names */
- if (!strcmp(parent->name, core->parents[i].name))
+ if (core->parents[i].name &&
+ !strcmp(parent->name, core->parents[i].name))
break;
}
#include "clk-exynos5-subcmu.h"
static struct samsung_clk_provider *ctx;
-static const struct exynos5_subcmu_info *cmu;
+static const struct exynos5_subcmu_info **cmu;
static int nr_cmus;
static void exynos5_subcmu_clk_save(void __iomem *base,
* when OF-core populates all device-tree nodes.
*/
void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
- const struct exynos5_subcmu_info *_cmu)
+ const struct exynos5_subcmu_info **_cmu)
{
ctx = _ctx;
cmu = _cmu;
nr_cmus = _nr_cmus;
for (; _nr_cmus--; _cmu++) {
- exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks,
- _cmu->nr_gate_clks);
- exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs,
- _cmu->nr_suspend_regs);
+ exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
+ (*_cmu)->nr_gate_clks);
+ exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
+ (*_cmu)->nr_suspend_regs);
}
}
if (of_property_read_string(np, "label", &name) < 0)
continue;
for (i = 0; i < nr_cmus; i++)
- if (strcmp(cmu[i].pd_name, name) == 0)
+ if (strcmp(cmu[i]->pd_name, name) == 0)
exynos5_clk_register_subcmu(&pdev->dev,
- &cmu[i], np);
+ cmu[i], np);
}
return 0;
}
};
void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
- const struct exynos5_subcmu_info *cmu);
+ const struct exynos5_subcmu_info **cmu);
#endif
.pd_name = "DISP1",
};
+static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
+ &exynos5250_disp_subcmu,
+};
+
static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
ARRAY_SIZE(exynos5250_clk_regs));
- exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
+ exynos5250_subcmus);
samsung_clk_of_add_provider(np, ctx);
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
- GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
- SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
+ /* Maudio Block */
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
+ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
+ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
/* GSCL Block */
DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
- /* MSCL Block */
- DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
-
/* PSGEN */
DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
- /* Maudio Block */
- GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
- GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
- GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
-
/* FSYS Block */
GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
- /* MSCL Block */
- GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
- GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
- GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
- GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
- GATE_IP_MSCL, 8, 0, 0),
- GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
- GATE_IP_MSCL, 9, 0, 0),
- GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
- GATE_IP_MSCL, 10, 0, 0),
-
/* ISP */
GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
{ DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
};
-static const struct exynos5_subcmu_info exynos5x_subcmus[] = {
- {
- .div_clks = exynos5x_disp_div_clks,
- .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
- .gate_clks = exynos5x_disp_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
- .suspend_regs = exynos5x_disp_suspend_regs,
- .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
- .pd_name = "DISP",
- }, {
- .div_clks = exynos5x_gsc_div_clks,
- .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
- .gate_clks = exynos5x_gsc_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
- .suspend_regs = exynos5x_gsc_suspend_regs,
- .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
- .pd_name = "GSC",
- }, {
- .div_clks = exynos5x_mfc_div_clks,
- .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
- .gate_clks = exynos5x_mfc_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
- .suspend_regs = exynos5x_mfc_suspend_regs,
- .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
- .pd_name = "MFC",
- },
+static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
+ /* MSCL Block */
+ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
+ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
+ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
+ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
+ GATE_IP_MSCL, 8, 0, 0),
+ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
+ GATE_IP_MSCL, 9, 0, 0),
+ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
+ GATE_IP_MSCL, 10, 0, 0),
+};
+
+static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
+ DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
+ { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
+ { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
+ { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
+};
+
+static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
+ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
+ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
+ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
+ { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
+};
+
+static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
+ .div_clks = exynos5x_disp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
+ .gate_clks = exynos5x_disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
+ .suspend_regs = exynos5x_disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
+ .pd_name = "DISP",
+};
+
+static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
+ .div_clks = exynos5x_gsc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
+ .gate_clks = exynos5x_gsc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
+ .suspend_regs = exynos5x_gsc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
+ .pd_name = "GSC",
+};
+
+static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
+ .div_clks = exynos5x_mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
+ .gate_clks = exynos5x_mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
+ .suspend_regs = exynos5x_mfc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
+ .pd_name = "MFC",
+};
+
+static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
+ .div_clks = exynos5x_mscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
+ .gate_clks = exynos5x_mscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
+ .suspend_regs = exynos5x_mscl_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
+ .pd_name = "MSC",
+};
+
+static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
+ .gate_clks = exynos5800_mau_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
+ .suspend_regs = exynos5800_mau_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
+ .pd_name = "MAU",
+};
+
+static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
+ &exynos5x_disp_subcmu,
+ &exynos5x_gsc_subcmu,
+ &exynos5x_mfc_subcmu,
+ &exynos5x_mscl_subcmu,
+};
+
+static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
+ &exynos5x_disp_subcmu,
+ &exynos5x_gsc_subcmu,
+ &exynos5x_mfc_subcmu,
+ &exynos5x_mscl_subcmu,
+ &exynos5800_mau_subcmu,
};
static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
samsung_clk_extended_sleep_init(reg_base,
exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
- if (soc == EXYNOS5800)
+
+ if (soc == EXYNOS5800) {
samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
ARRAY_SIZE(exynos5800_clk_regs));
- exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
- exynos5x_subcmus);
+
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
+ exynos5800_subcmus);
+ } else {
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
+ exynos5x_subcmus);
+ }
samsung_clk_of_add_provider(np, ctx);
}
if (socfpgaclk->fixed_div) {
div = socfpgaclk->fixed_div;
} else {
- if (!socfpgaclk->bypass_reg)
+ if (socfpgaclk->hw.reg)
div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
}
}
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
- if (ret)
+ if (ret < 0)
break;
}
unsigned long flags;
unsigned int i;
+ /* If there's no device there's nothing to do */
+ if (!ccp)
+ return 0;
+
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
unsigned long flags;
unsigned int i;
+ /* If there's no device there's nothing to do */
+ if (!ccp)
+ return 0;
+
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
struct dw_edma_region {
phys_addr_t paddr;
- dma_addr_t vaddr;
+ void __iomem *vaddr;
size_t sz;
};
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz;
- dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+ dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz;
- dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+ dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off;
pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
- &dw->rg_region.vaddr, &dw->rg_region.paddr);
+ dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- &dw->ll_region.vaddr, &dw->ll_region.paddr);
+ dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- &dw->dt_region.vaddr, &dw->dt_region.paddr);
+ dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+ return dw->rg_region.vaddr;
}
#define SET(dw, name, value) \
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_v0_lli *lli;
- struct dw_edma_v0_llp *llp;
+ struct dw_edma_v0_lli __iomem *lli;
+ struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
- u64 sar, dar, addr;
int j;
- lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+ lli = chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
/* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */
- sar = cpu_to_le64(child->sar);
- SET_LL(&lli[i].sar_low, lower_32_bits(sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+ SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
+ SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
/* DAR - low, high */
- dar = cpu_to_le64(child->dar);
- SET_LL(&lli[i].dar_low, lower_32_bits(dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+ SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
+ SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
i++;
}
- llp = (struct dw_edma_v0_llp *)&lli[i];
+ llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
/* Channel control */
SET_LL(&llp->control, control);
/* Linked list - low, high */
- addr = cpu_to_le64(chunk->ll_region.paddr);
- SET_LL(&llp->llp_low, lower_32_bits(addr));
- SET_LL(&llp->llp_high, upper_32_bits(addr));
+ SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
- u64 llp;
dw_edma_v0_core_write_chunk(chunk);
SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */
- llp = cpu_to_le64(chunk->ll_region.paddr);
- SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
- SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+ SET_CH(dw, chan->dir, chan->id, llp_low,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH(dw, chan->dir, chan->id, llp_high,
+ upper_32_bits(chunk->ll_region.paddr));
}
/* Doorbell */
SET_RW(dw, chan->dir, doorbell,
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
- ((dma_addr_t *)®s->name)
+ ((void __force *)®s->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
static struct dentry *base_dir;
static struct dw_edma *dw;
-static struct dw_edma_v0_regs *regs;
+static struct dw_edma_v0_regs __iomem *regs;
static struct {
- void *start;
- void *end;
+ void __iomem *start;
+ void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
- char name[24];
+ const char *name;
dma_addr_t *reg;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
+ void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY &&
- data >= (void *)®s->type.legacy.ch) {
- void *ptr = (void *)®s->type.legacy.ch;
+ reg >= (void __iomem *)®s->type.legacy.ch) {
+ void __iomem *ptr = ®s->type.legacy.ch;
u32 viewport_sel = 0;
unsigned long flags;
u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= data && data < lim[0][ch].end) {
- ptr += (data - lim[0][ch].start);
+ if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
+ ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= data && data < lim[1][ch].end) {
- ptr += (data - lim[1][ch].start);
+ if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
+ ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd;
}
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
- *val = readl(data);
+ *val = readl(reg);
}
return 0;
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir)
{
int nr_entries;
if (!dw)
return;
- regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+ regs = dw->rg_region.vaddr;
if (!regs)
return;
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ /* Fall through */
case FSL_DMA_IP_83XX:
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
* @iomem: remapped I/O memory base
* @n_channels: number of available channels
* @channels: array of DMAC channels
+ * @channels_mask: bitfield of which DMA channels are managed by this driver
* @modules: bitmask of client modules in use
*/
struct rcar_dmac {
unsigned int n_channels;
struct rcar_dmac_chan *channels;
+ unsigned int channels_mask;
DECLARE_BITMAP(modules, 256);
};
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
for (i = 0; i < dmac->n_channels; ++i) {
struct rcar_dmac_chan *chan = &dmac->channels[i];
+ if (!(dmac->channels_mask & BIT(i)))
+ continue;
+
/* Stop and reinitialize the channel. */
spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
return 0;
}
+#define RCAR_DMAC_MAX_CHANNELS 32
+
static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
{
struct device_node *np = dev->of_node;
return ret;
}
- if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+ /* The hardware and driver don't support more than 32 bits in CHCLR */
+ if (dmac->n_channels <= 0 ||
+ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
dev_err(dev, "invalid number of channels %u\n",
dmac->n_channels);
return -EINVAL;
}
+ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+
return 0;
}
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
- unsigned int channels_offset = 0;
struct dma_device *engine;
struct rcar_dmac *dmac;
struct resource *mem;
* level we can't disable it selectively, so ignore channel 0 for now if
* the device is part of an IOMMU group.
*/
- if (device_iommu_mapped(&pdev->dev)) {
- dmac->n_channels--;
- channels_offset = 1;
- }
+ if (device_iommu_mapped(&pdev->dev))
+ dmac->channels_mask &= ~BIT(0);
dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
sizeof(*dmac->channels), GFP_KERNEL);
INIT_LIST_HEAD(&engine->channels);
for (i = 0; i < dmac->n_channels; ++i) {
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
- i + channels_offset);
+ if (!(dmac->channels_mask & BIT(i)))
+ continue;
+
+ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
if (ret < 0)
goto error;
}
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct dma_slave_config *slave_cfg = &schan->slave_cfg;
dma_addr_t src = 0, dst = 0;
+ dma_addr_t start_src = 0, start_dst = 0;
struct sprd_dma_desc *sdesc;
struct scatterlist *sg;
u32 len = 0;
dst = sg_dma_address(sg);
}
+ if (!i) {
+ start_src = src;
+ start_dst = dst;
+ }
+
/*
* The link-list mode needs at least 2 link-list
* configurations. If there is only one sg, it doesn't
}
}
- ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
- dir, flags, slave_cfg);
+ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
+ start_dst, len, dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
chan = &dmadev->chan[id];
if (!chan) {
- dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
goto exit;
}
return chan;
}
-static int tegra_adma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
return 0;
}
-static int tegra_adma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
nelm * 2);
- if (ret)
+ if (ret) {
+ kfree(rsv_events);
return ret;
+ }
for (i = 0; i < nelm; i++) {
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
if (src_icg) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1;
- d->fi = src_icg;
+ d->fi = src_icg + 1;
} else if (xt->src_inc) {
d->ccr |= CCR_SRC_AMODE_POSTINC;
d->fi = 0;
if (dst_icg) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
sg->ei = 1;
- sg->fi = dst_icg;
+ sg->fi = dst_icg + 1;
} else if (xt->dst_inc) {
d->ccr |= CCR_DST_AMODE_POSTINC;
sg->fi = 0;
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
IRQF_SHARED, "omap-dma-engine", od);
- if (rc)
+ if (rc) {
+ omap_dma_free(od);
return rc;
+ }
}
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
First, ECC must be configured in the bootloader. Then, this driver
will expose error counters via the EDAC kernel framework.
+config EDAC_BLUEFIELD
+ tristate "Mellanox BlueField Memory ECC"
+ depends on ARM64 && ((MELLANOX_PLATFORM && ACPI) || COMPILE_TEST)
+ help
+ Support for error detection and correction on the
+ Mellanox BlueField SoCs.
+
endif # EDAC
obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
+obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
- { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
return 0;
}
+/*********************** SDRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+
+static const struct edac_device_prv_data s10_sdramecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_S10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_S10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_S10_ECC_EN,
+ .ecc_en_ofst = ALTR_S10_ECC_CTRL_SDRAM_OFST,
+ .ce_set_mask = ALTR_S10_ECC_TSERRA,
+ .ue_set_mask = ALTR_S10_ECC_TDERRA,
+ .set_err_ofst = ALTR_S10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+#endif /* CONFIG_EDAC_ALTERA_SDRAM */
+
/*********************** OCRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
#endif
#ifdef CONFIG_EDAC_ALTERA_SDMMC
{ .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+ { .compatible = "altr,sdram-edac-s10", .data = &s10_sdramecc_data },
#endif
{},
};
struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
+ unsigned long bits;
dberr = (irq == edac->db_irq) ? 1 : 0;
sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+ bits = irq_status;
+ for_each_set_bit(bit, &bits, 32) {
irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
if (irq)
generic_handle_irq(irq);
struct device_node *parent;
int ret = 0;
+ /* SDRAM must be present for Linux (implied parent) */
+ if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+ return 0;
+
/* Ensure parent device is enabled if parent node exists */
parent = of_parse_phandle(np, "altr,ecc-parent", 0);
if (parent && !of_device_is_available(parent))
return ret;
}
+static int get_s10_sdram_edac_resource(struct device_node *np,
+ struct resource *res)
+{
+ struct device_node *parent;
+ int ret;
+
+ parent = of_parse_phandle(np, "altr,sdr-syscon", 0);
+ if (!parent)
+ return -ENODEV;
+
+ ret = of_address_to_resource(parent, 0, res);
+ of_node_put(parent);
+
+ return ret;
+}
+
static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
struct device_node *np)
{
if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
return -ENOMEM;
- rc = of_address_to_resource(np, 0, &res);
+ if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+ rc = get_s10_sdram_edac_resource(np, &res);
+ else
+ rc = of_address_to_resource(np, 0, &res);
+
if (rc < 0) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: no resource address\n", ecc_name);
of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+ of_device_is_compatible(child, "altr,sdram-edac-s10") ||
+#endif
of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
altr_edac_a10_device_add(edac, child);
#ifdef CONFIG_EDAC_ALTERA_SDRAM
- else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
- (of_device_is_compatible(child, "altr,sdram-edac-s10")))
+ else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
/************* Stratix10 Defines **************/
+#define ALTR_S10_ECC_CTRL_SDRAM_OFST 0x00
+#define ALTR_S10_ECC_EN BIT(0)
+
+#define ALTR_S10_ECC_ERRINTEN_OFST 0x10
+#define ALTR_S10_ECC_ERRINTENS_OFST 0x14
+#define ALTR_S10_ECC_ERRINTENR_OFST 0x18
+#define ALTR_S10_ECC_SERRINTEN BIT(0)
+
+#define ALTR_S10_ECC_INTMODE_OFST 0x1C
+#define ALTR_S10_ECC_INTMODE BIT(0)
+
+#define ALTR_S10_ECC_INTSTAT_OFST 0x20
+#define ALTR_S10_ECC_SERRPENA BIT(0)
+#define ALTR_S10_ECC_DERRPENA BIT(8)
+#define ALTR_S10_ECC_ERRPENA_MASK (ALTR_S10_ECC_SERRPENA | \
+ ALTR_S10_ECC_DERRPENA)
+
+#define ALTR_S10_ECC_INTTEST_OFST 0x24
+#define ALTR_S10_ECC_TSERRA BIT(0)
+#define ALTR_S10_ECC_TDERRA BIT(8)
+#define ALTR_S10_ECC_TSERRB BIT(16)
+#define ALTR_S10_ECC_TDERRB BIT(24)
+
#define ALTR_S10_DERR_ADDRA_OFST 0x2C
/* Stratix10 ECC Manager Defines */
#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
-#define S10_DBE_IRQ_MASK 0x3FE
+#define S10_DBE_IRQ_MASK 0x3FFFE
/* Define ECC Block Offsets for peripherals */
#define ECC_BLK_ADDRESS_OFST 0x40
(dclr & BIT(15)) ? "yes" : "no");
}
-/*
- * The Address Mask should be a contiguous set of bits in the non-interleaved
- * case. So to check for CS interleaving, find the most- and least-significant
- * bits of the mask, generate a contiguous bitmask, and compare the two.
- */
-static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+#define CS_EVEN_PRIMARY BIT(0)
+#define CS_ODD_PRIMARY BIT(1)
+#define CS_EVEN_SECONDARY BIT(2)
+#define CS_ODD_SECONDARY BIT(3)
+
+#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
+#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
+
+static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
- u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
- u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
- u32 test_mask = GENMASK(msb, lsb);
+ int cs_mode = 0;
- edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+ if (csrow_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_PRIMARY;
- return mask ^ test_mask;
+ if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_PRIMARY;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_SECONDARY;
+
+ return cs_mode;
}
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
- int dimm, size0, size1, cs0, cs1;
+ int dimm, size0, size1, cs0, cs1, cs_mode;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
- for (dimm = 0; dimm < 4; dimm++) {
- size0 = 0;
+ for (dimm = 0; dimm < 2; dimm++) {
cs0 = dimm * 2;
-
- if (csrow_enabled(cs0, ctrl, pvt))
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
-
- size1 = 0;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt)) {
- /*
- * CS interleaving is only supported if both CSes have
- * the same amount of memory. Because they are
- * interleaved, it will look like both CSes have the
- * full amount of memory. Save the size for both as
- * half the amount we found on CS0, if interleaved.
- */
- if (f17_cs_interleaved(pvt, ctrl, cs1))
- size1 = size0 = (size0 >> 1);
- else
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
- }
+ cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
+
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
+ } else if (pvt->fam >= 0x17) {
+ int umc;
+
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 4;
+ pvt->csels[umc].m_cnt = 2;
+ }
+
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
+static void read_umc_base_mask(struct amd64_pvt *pvt)
+{
+ u32 umc_base_reg, umc_base_reg_sec;
+ u32 umc_mask_reg, umc_mask_reg_sec;
+ u32 base_reg, base_reg_sec;
+ u32 mask_reg, mask_reg_sec;
+ u32 *base, *base_sec;
+ u32 *mask, *mask_sec;
+ int cs, umc;
+
+ for_each_umc(umc) {
+ umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
+ umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
+
+ for_each_chip_select(cs, umc, pvt) {
+ base = &pvt->csels[umc].csbases[cs];
+ base_sec = &pvt->csels[umc].csbases_sec[cs];
+
+ base_reg = umc_base_reg + (cs * 4);
+ base_reg_sec = umc_base_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
+ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base, base_reg);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
+ edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base_sec, base_reg_sec);
+ }
+
+ umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
+ umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
+
+ for_each_chip_select_mask(cs, umc, pvt) {
+ mask = &pvt->csels[umc].csmasks[cs];
+ mask_sec = &pvt->csels[umc].csmasks_sec[cs];
+
+ mask_reg = umc_mask_reg + (cs * 4);
+ mask_reg_sec = umc_mask_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
+ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask, mask_reg);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
+ edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask_sec, mask_reg_sec);
+ }
+ }
+}
+
/*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
+ int cs;
prep_chip_selects(pvt);
- if (pvt->umc) {
- base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
- base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
- mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
- mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
- } else {
- base_reg0 = DCSB0;
- base_reg1 = DCSB1;
- mask_reg0 = DCSM0;
- mask_reg1 = DCSM1;
- }
+ if (pvt->umc)
+ return read_umc_base_mask(pvt);
for_each_chip_select(cs, 0, pvt) {
- int reg0 = base_reg0 + (cs * 4);
- int reg1 = base_reg1 + (cs * 4);
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
- cs, *base0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
- if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
- cs, *base1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
-
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = mask_reg0 + (cs * 4);
- int reg1 = mask_reg1 + (cs * 4);
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
- cs, *mask0, reg0);
-
- if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
- cs, *mask1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
}
return ddr3_cs_size(cs_mode, false);
}
-static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+ u32 addr_mask_orig, addr_mask_deinterleaved;
+ u32 msb, weight, num_zero_bits;
+ int dimm, size = 0;
- /* Each mask is used for every two base addresses. */
- u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+ /* No Chip Selects are enabled. */
+ if (!cs_mode)
+ return size;
- /* Register [31:1] = Address [39:9]. Size is in kBs here. */
- u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+ /* Requested size of an even CS but none are enabled. */
+ if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
+ return size;
- edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+ /* Requested size of an odd CS but none are enabled. */
+ if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
+ return size;
+
+ /*
+ * There is one mask per DIMM, and two Chip Selects per DIMM.
+ * CS0 and CS1 -> DIMM0
+ * CS2 and CS3 -> DIMM1
+ */
+ dimm = csrow_nr >> 1;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
+ addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
+ else
+ addr_mask_orig = pvt->csels[umc].csmasks[dimm];
+
+ /*
+ * The number of zero bits in the mask is equal to the number of bits
+ * in a full mask minus the number of bits in the current mask.
+ *
+ * The MSB is the number of bits in the full mask because BIT[0] is
+ * always 0.
+ */
+ msb = fls(addr_mask_orig) - 1;
+ weight = hweight_long(addr_mask_orig);
+ num_zero_bits = msb - weight;
+
+ /* Take the number of zero bits off from the top of the mask. */
+ addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+
+ edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+ edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ size = (addr_mask_deinterleaved >> 2) + 1;
/* Return size in MBs. */
return size >> 10;
.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M10H_CPUS] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M30H_CPUS] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
+ [F17_M70H_CPUS] = {
+ .ctl_name = "F17h_M70h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
};
err.channel = find_umc_channel(m);
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
- err.err_code = ERR_NORM_ADDR;
- goto log_error;
- }
-
- error_address_to_page_and_offset(sys_addr, &err);
-
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
err.csrow = m->synd & 0x7;
+ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ err.err_code = ERR_NORM_ADDR;
+ goto log_error;
+ }
+
+ error_address_to_page_and_offset(sys_addr, &err);
+
log_error:
__log_ecc_error(mci, &err, ecc_type);
}
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
- if (!pvt->umc)
+ if (!pvt->umc) {
csrow_nr >>= 1;
-
- cs_mode = DBAM_DIMM(csrow_nr, dbam);
+ cs_mode = DBAM_DIMM(csrow_nr, dbam);
+ } else {
+ cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
+ }
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
return nr_pages;
}
+static int init_csrows_df(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
+ enum dev_type dev_type = DEV_UNKNOWN;
+ struct dimm_info *dimm;
+ int empty = 1;
+ u8 umc, cs;
+
+ if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
+ edac_mode = EDAC_S16ECD16ED;
+ dev_type = DEV_X16;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
+ edac_mode = EDAC_S8ECD8ED;
+ dev_type = DEV_X8;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
+ edac_mode = EDAC_S4ECD4ED;
+ dev_type = DEV_X4;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
+ edac_mode = EDAC_SECDED;
+ }
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ empty = 0;
+ dimm = mci->csrows[cs]->channels[umc]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
+ dimm->dtype = dev_type;
+ }
+ }
+
+ return empty;
+}
+
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
int nr_pages = 0;
u32 val;
- if (!pvt->umc) {
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ if (pvt->umc)
+ return init_csrows_df(mci);
- pvt->nbcfg = val;
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- }
+ pvt->nbcfg = val;
+
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/* Determine DIMM ECC mode: */
- if (pvt->umc) {
- if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
- edac_mode = EDAC_S4ECD4ED;
- else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
-
- } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
? EDAC_S4ECD4ED
: EDAC_SECDED;
static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
- u8 i, ecc_en = 1, cpk_en = 1;
+ u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+ dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+ dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
}
}
if (ecc_en) {
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (cpk_en)
+ if (!cpk_en)
+ return;
+
+ if (dev_x4)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ else if (dev_x16)
+ mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+ else
+ mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
}
}
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
+ fam_type = &family_types[F17_M70H_CPUS];
+ pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
+#define NUM_CONTROLLERS 8
#define ON true
#define OFF false
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
/*
* Function 1 - Address Map
#define DCSM0 0x60
#define DCSM1 0x160
-#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_sec_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE)
#define DRAM_CONTROL 0x78
/* UMC CH register offsets */
#define UMCCH_BASE_ADDR 0x0
+#define UMCCH_BASE_ADDR_SEC 0x10
#define UMCCH_ADDR_MASK 0x20
+#define UMCCH_ADDR_MASK_SEC 0x28
#define UMCCH_ADDR_CFG 0x30
#define UMCCH_DIMM_CFG 0x80
#define UMCCH_UMC_CFG 0x100
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
+ F17_M70H_CPUS,
NUM_FAMILIES,
};
/* A DCT chip selects collection */
struct chip_select {
u32 csbases[NUM_CHIPSELECTS];
+ u32 csbases_sec[NUM_CHIPSELECTS];
u8 b_cnt;
u32 csmasks[NUM_CHIPSELECTS];
+ u32 csmasks_sec[NUM_CHIPSELECTS];
u8 m_cnt;
};
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* one for each DCT */
- struct chip_select csels[2];
+ /* one for each DCT/UMC */
+ struct chip_select csels[NUM_CONTROLLERS];
/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
struct dram_range ranges[DRAM_RANGES];
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bluefield-specific EDAC driver.
+ *
+ * Copyright (c) 2019 Mellanox Technologies.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+#define DRIVER_NAME "bluefield-edac"
+
+/*
+ * Mellanox BlueField EMI (External Memory Interface) register definitions.
+ */
+
+#define MLXBF_ECC_CNT 0x340
+#define MLXBF_ECC_CNT__SERR_CNT GENMASK(15, 0)
+#define MLXBF_ECC_CNT__DERR_CNT GENMASK(31, 16)
+
+#define MLXBF_ECC_ERR 0x348
+#define MLXBF_ECC_ERR__SECC BIT(0)
+#define MLXBF_ECC_ERR__DECC BIT(16)
+
+#define MLXBF_ECC_LATCH_SEL 0x354
+#define MLXBF_ECC_LATCH_SEL__START BIT(24)
+
+#define MLXBF_ERR_ADDR_0 0x358
+
+#define MLXBF_ERR_ADDR_1 0x37c
+
+#define MLXBF_SYNDROM 0x35c
+#define MLXBF_SYNDROM__DERR BIT(0)
+#define MLXBF_SYNDROM__SERR BIT(1)
+#define MLXBF_SYNDROM__SYN GENMASK(25, 16)
+
+#define MLXBF_ADD_INFO 0x364
+#define MLXBF_ADD_INFO__ERR_PRANK GENMASK(9, 8)
+
+#define MLXBF_EDAC_MAX_DIMM_PER_MC 2
+#define MLXBF_EDAC_ERROR_GRAIN 8
+
+/*
+ * Request MLNX_SIP_GET_DIMM_INFO
+ *
+ * Retrieve information about DIMM on a certain slot.
+ *
+ * Call register usage:
+ * a0: MLNX_SIP_GET_DIMM_INFO
+ * a1: (Memory controller index) << 16 | (Dimm index in memory controller)
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: MLXBF_DIMM_INFO defined below describing the DIMM.
+ * a1-3: not used.
+ */
+#define MLNX_SIP_GET_DIMM_INFO 0x82000008
+
+/* Format for the SMC response about the memory information */
+#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
+#define MLXBF_DIMM_INFO__IS_RDIMM BIT(16)
+#define MLXBF_DIMM_INFO__IS_LRDIMM BIT(17)
+#define MLXBF_DIMM_INFO__IS_NVDIMM BIT(18)
+#define MLXBF_DIMM_INFO__RANKS GENMASK_ULL(23, 21)
+#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
+
+struct bluefield_edac_priv {
+ int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
+ void __iomem *emi_base;
+ int dimm_per_mc;
+};
+
+static u64 smc_call1(u64 smc_op, u64 smc_arg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+/*
+ * Gather the ECC information from the External Memory Interface registers
+ * and report it to the edac handler.
+ */
+static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
+ int error_cnt,
+ int is_single_ecc)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ u32 dram_additional_info, err_prank, edea0, edea1;
+ u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
+ enum hw_event_mc_err_type ecc_type;
+ u64 ecc_dimm_addr;
+ int ecc_dimm;
+
+ ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
+ HW_EVENT_ERR_UNCORRECTED;
+
+ /*
+ * Tell the External Memory Interface to populate the relevant
+ * registers with information about the last ECC error occurrence.
+ */
+ ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
+ writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
+
+ /*
+ * Verify that the ECC reported info in the registers is of the
+ * same type as the one asked to report. If not, just report the
+ * error without the detailed information.
+ */
+ dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
+ serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
+ derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
+ syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
+
+ if ((is_single_ecc && !serr) || (!is_single_ecc && !derr)) {
+ edac_mc_handle_error(ecc_type, mci, error_cnt, 0, 0, 0,
+ 0, 0, -1, mci->ctl_name, "");
+ return;
+ }
+
+ dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
+ err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
+
+ ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
+
+ edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
+ edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
+
+ ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
+
+ edac_mc_handle_error(ecc_type, mci, error_cnt,
+ PFN_DOWN(ecc_dimm_addr),
+ offset_in_page(ecc_dimm_addr),
+ syndrom, ecc_dimm, 0, 0, mci->ctl_name, "");
+}
+
+static void bluefield_edac_check(struct mem_ctl_info *mci)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
+
+ /*
+ * The memory controller might not be initialized by the firmware
+ * when there isn't memory, which may lead to bad register readings.
+ */
+ if (mci->edac_cap == EDAC_FLAG_NONE)
+ return;
+
+ ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
+ single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
+ double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
+
+ if (single_error_count) {
+ ecc_error |= MLXBF_ECC_ERR__SECC;
+
+ bluefield_gather_report_ecc(mci, single_error_count, 1);
+ }
+
+ if (double_error_count) {
+ ecc_error |= MLXBF_ECC_ERR__DECC;
+
+ bluefield_gather_report_ecc(mci, double_error_count, 0);
+ }
+
+ /* Write to clear reported errors. */
+ if (ecc_count)
+ writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
+}
+
+/* Initialize the DIMMs information for the given memory controller. */
+static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ int mem_ctrl_idx = mci->mc_idx;
+ struct dimm_info *dimm;
+ u64 smc_info, smc_arg;
+ int is_empty = 1, i;
+
+ for (i = 0; i < priv->dimm_per_mc; i++) {
+ dimm = mci->dimms[i];
+
+ smc_arg = mem_ctrl_idx << 16 | i;
+ smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
+
+ if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
+ dimm->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ is_empty = 0;
+
+ dimm->edac_mode = EDAC_SECDED;
+
+ if (FIELD_GET(MLXBF_DIMM_INFO__IS_NVDIMM, smc_info))
+ dimm->mtype = MEM_NVDIMM;
+ else if (FIELD_GET(MLXBF_DIMM_INFO__IS_LRDIMM, smc_info))
+ dimm->mtype = MEM_LRDDR4;
+ else if (FIELD_GET(MLXBF_DIMM_INFO__IS_RDIMM, smc_info))
+ dimm->mtype = MEM_RDDR4;
+ else
+ dimm->mtype = MEM_DDR4;
+
+ dimm->nr_pages =
+ FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info) *
+ (SZ_1G / PAGE_SIZE);
+ dimm->grain = MLXBF_EDAC_ERROR_GRAIN;
+
+ /* Mem controller for BlueField only supports x4, x8 and x16 */
+ switch (FIELD_GET(MLXBF_DIMM_INFO__PACKAGE_X, smc_info)) {
+ case 4:
+ dimm->dtype = DEV_X4;
+ break;
+ case 8:
+ dimm->dtype = DEV_X8;
+ break;
+ case 16:
+ dimm->dtype = DEV_X16;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ }
+
+ priv->dimm_ranks[i] =
+ FIELD_GET(MLXBF_DIMM_INFO__RANKS, smc_info);
+ }
+
+ if (is_empty)
+ mci->edac_cap = EDAC_FLAG_NONE;
+ else
+ mci->edac_cap = EDAC_FLAG_SECDED;
+}
+
+static int bluefield_edac_mc_probe(struct platform_device *pdev)
+{
+ struct bluefield_edac_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct edac_mc_layer layers[1];
+ struct mem_ctl_info *mci;
+ struct resource *emi_res;
+ unsigned int mc_idx, dimm_count;
+ int rc, ret;
+
+ /* Read the MSS (Memory SubSystem) index from ACPI table. */
+ if (device_property_read_u32(dev, "mss_number", &mc_idx)) {
+ dev_warn(dev, "bf_edac: MSS number unknown\n");
+ return -EINVAL;
+ }
+
+ /* Read the DIMMs per MC from ACPI table. */
+ if (device_property_read_u32(dev, "dimm_per_mc", &dimm_count)) {
+ dev_warn(dev, "bf_edac: DIMMs per MC unknown\n");
+ return -EINVAL;
+ }
+
+ if (dimm_count > MLXBF_EDAC_MAX_DIMM_PER_MC) {
+ dev_warn(dev, "bf_edac: DIMMs per MC not valid\n");
+ return -EINVAL;
+ }
+
+ emi_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!emi_res)
+ return -EINVAL;
+
+ layers[0].type = EDAC_MC_LAYER_SLOT;
+ layers[0].size = dimm_count;
+ layers[0].is_virt_csrow = true;
+
+ mci = edac_mc_alloc(mc_idx, ARRAY_SIZE(layers), layers, sizeof(*priv));
+ if (!mci)
+ return -ENOMEM;
+
+ priv = mci->pvt_info;
+
+ priv->dimm_per_mc = dimm_count;
+ priv->emi_base = devm_ioremap_resource(dev, emi_res);
+ if (IS_ERR(priv->emi_base)) {
+ dev_err(dev, "failed to map EMI IO resource\n");
+ ret = PTR_ERR(priv->emi_base);
+ goto err;
+ }
+
+ mci->pdev = dev;
+ mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
+ MEM_FLAG_LRDDR4 | MEM_FLAG_NVDIMM;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = DRIVER_NAME;
+ mci->ctl_name = "BlueField_Memory_Controller";
+ mci->dev_name = dev_name(dev);
+ mci->edac_check = bluefield_edac_check;
+
+ /* Initialize mci with the actual populated DIMM information. */
+ bluefield_edac_init_dimms(mci);
+
+ platform_set_drvdata(pdev, mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(dev, "failed to register with EDAC core\n");
+ ret = rc;
+ goto err;
+ }
+
+ /* Only POLL mode supported so far. */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+
+ return ret;
+
+}
+
+static int bluefield_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
+ {"MLNXBF08", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, bluefield_mc_acpi_ids);
+
+static struct platform_driver bluefield_edac_mc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = bluefield_mc_acpi_ids,
+ },
+ .probe = bluefield_edac_mc_probe,
+ .remove = bluefield_edac_mc_remove,
+};
+
+module_platform_driver(bluefield_edac_mc_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField memory edac driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL v2");
module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
-unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
- unsigned len)
+unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+ unsigned int len)
{
struct mem_ctl_info *mci = dimm->mci;
int i, n, count = 0;
* At return, the pointer 'p' will be incremented to be used on a next call
* to this function.
*/
-void *edac_align_ptr(void **p, unsigned size, int n_elems)
+void *edac_align_ptr(void **p, unsigned int size, int n_elems)
{
- unsigned align, r;
+ unsigned int align, r;
void *ptr = *p;
*p += size * n_elems;
static void _edac_mc_free(struct mem_ctl_info *mci)
{
- int i, chn, row;
struct csrow_info *csr;
- const unsigned int tot_dimms = mci->tot_dimms;
- const unsigned int tot_channels = mci->num_cschannel;
- const unsigned int tot_csrows = mci->nr_csrows;
+ int i, chn, row;
if (mci->dimms) {
- for (i = 0; i < tot_dimms; i++)
+ for (i = 0; i < mci->tot_dimms; i++)
kfree(mci->dimms[i]);
kfree(mci->dimms);
}
+
if (mci->csrows) {
- for (row = 0; row < tot_csrows; row++) {
+ for (row = 0; row < mci->nr_csrows; row++) {
csr = mci->csrows[row];
- if (csr) {
- if (csr->channels) {
- for (chn = 0; chn < tot_channels; chn++)
- kfree(csr->channels[chn]);
- kfree(csr->channels);
- }
- kfree(csr);
+ if (!csr)
+ continue;
+
+ if (csr->channels) {
+ for (chn = 0; chn < mci->num_cschannel; chn++)
+ kfree(csr->channels[chn]);
+ kfree(csr->channels);
}
+ kfree(csr);
}
kfree(mci->csrows);
}
kfree(mci);
}
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
struct edac_mc_layer *layers,
- unsigned sz_pvt)
+ unsigned int sz_pvt)
{
struct mem_ctl_info *mci;
struct edac_mc_layer *layer;
struct rank_info *chan;
struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
- unsigned pos[EDAC_MAX_LAYERS];
- unsigned size, tot_dimms = 1, count = 1;
- unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ unsigned int pos[EDAC_MAX_LAYERS];
+ unsigned int size, tot_dimms = 1, count = 1;
+ unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL;
int i, j, row, chn, n, len, off;
bool per_rank = false;
if (p > e->location)
*(p - 1) = '\0';
- /* Report the error via the trace interface */
- grain_bits = fls_long(e->grain) + 1;
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+ grain_bits = fls_long(e->grain - 1);
+
+ /* Report the error via the trace interface */
if (IS_ENABLED(CONFIG_RAS))
trace_mc_event(type, e->msg, e->label, e->error_count,
mci->mc_idx, e->top_layer, e->mid_layer,
* On success, return a pointer to struct mem_ctl_info pointer;
* %NULL otherwise
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
struct edac_mc_layer *layers,
- unsigned sz_pvt);
+ unsigned int sz_pvt);
/**
* edac_get_owner - Return the owner's mod_name of EDAC MC
struct dev_ch_attribute {
struct device_attribute attr;
- int channel;
+ unsigned int channel;
};
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
char *data)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
/* if field has not been initialized, there is nothing to send */
const char *data, size_t count)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
size_t copy_count = count;
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
return sprintf(data, "%u\n", rank->ce_count);
{
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
- edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(csrow);
}
dev_set_name(&csrow->dev, "csrow%d", index);
dev_set_drvdata(&csrow->dev, csrow);
- edac_dbg(0, "creating (virtual) csrow node %s\n",
- dev_name(&csrow->dev));
-
err = device_add(&csrow->dev);
- if (err)
+ if (err) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
put_device(&csrow->dev);
+ return err;
+ }
- return err;
+ edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
+
+ return 0;
}
/* Create a CSROW object under specifed edac_mc_device */
if (!nr_pages_per_csrow(csrow))
continue;
err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0) {
- edac_dbg(1,
- "failure: create csrow objects for csrow %d\n",
- i);
+ if (err < 0)
goto error;
- }
}
return 0;
{
struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
- edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dimm);
}
pm_runtime_forbid(&mci->dev);
err = device_add(&dimm->dev);
- if (err)
+ if (err) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
put_device(&dimm->dev);
+ return err;
+ }
- edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));
+ if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
+ char location[80];
- return err;
+ edac_dimm_info_location(dimm, location, sizeof(location));
+ edac_dbg(0, "device %s created at location %s\n",
+ dev_name(&dimm->dev), location);
+ }
+
+ return 0;
}
/*
{
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(mci);
}
dev_set_drvdata(&mci->dev, mci);
pm_runtime_forbid(&mci->dev);
- edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
put_device(&mci->dev);
- goto out;
+ return err;
}
+ edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
+
/*
* Create the dimm/rank devices
*/
if (!dimm->nr_pages)
continue;
-#ifdef CONFIG_EDAC_DEBUG
- edac_dbg(1, "creating dimm%d, located at ", i);
- if (edac_debug_level >= 1) {
- int lay;
- for (lay = 0; lay < mci->n_layers; lay++)
- printk(KERN_CONT "%s %d ",
- edac_layer_name[mci->layers[lay].type],
- dimm->location[lay]);
- printk(KERN_CONT "\n");
- }
-#endif
err = edac_create_dimm_object(mci, dimm, i);
- if (err) {
- edac_dbg(1, "failure: create dimm %d obj\n", i);
+ if (err)
goto fail_unregister_dimm;
- }
}
#ifdef CONFIG_EDAC_LEGACY_SYSFS
}
device_unregister(&mci->dev);
-out:
return err;
}
struct dimm_info *dimm = mci->dimms[i];
if (dimm->nr_pages == 0)
continue;
- edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
+ edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
device_unregister(&dimm->dev);
}
}
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
- edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ edac_dbg(1, "unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
}
* parent device, used to create the /sys/devices/mc sysfs node.
* So, there are no attributes on it.
*/
- edac_dbg(1, "Releasing device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dev);
}
int err;
mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
- if (!mci_pdev) {
- err = -ENOMEM;
- goto out;
- }
+ if (!mci_pdev)
+ return -ENOMEM;
mci_pdev->bus = edac_get_sysfs_subsys();
mci_pdev->type = &mc_attr_type;
dev_set_name(mci_pdev, "mc");
err = device_add(mci_pdev);
- if (err < 0)
- goto out_put_device;
+ if (err < 0) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
+ put_device(mci_pdev);
+ return err;
+ }
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
-
- out_put_device:
- put_device(mci_pdev);
- out:
- return err;
}
void edac_mc_sysfs_exit(void)
struct ghes_edac_dimm_fill {
struct mem_ctl_info *mci;
- unsigned count;
+ unsigned int count;
};
static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
}
/* convert csrow index into a rank (per channel -- 0..5) */
-static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
+ unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
}
/* convert csrow index into a channel (0..1) */
-static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
+ unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
return ret;
}
-static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow)
+static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned chan = i5100_csrow_to_chan(mci, csrow);
+ const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
for (i = 0; i < mci->tot_dimms; i++) {
struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
- const unsigned chan = i5100_csrow_to_chan(mci, i);
- const unsigned rank = i5100_csrow_to_rank(mci, i);
+ const unsigned int chan = i5100_csrow_to_chan(mci, i);
+ const unsigned int rank = i5100_csrow_to_rank(mci, i);
if (!npages)
continue;
}
}
+#define DNV_MCHBAR_SIZE 0x8000
+#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
char *base;
u64 addr;
+ unsigned long size;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
addr = get_mem_ctrl_hub_base_addr();
if (!addr)
return -ENODEV;
+ size = DNV_MCHBAR_SIZE;
} else {
/* MMIO via sideband register base address */
addr = get_sideband_reg_base_addr();
if (!addr)
return -ENODEV;
addr += (port << 16);
+ size = DNV_SB_PORT_SIZE;
}
- base = ioremap((resource_size_t)addr, 0x10000);
+ base = ioremap((resource_size_t)addr, size);
if (!base)
return -ENODEV;
return status;
}
+#define GET_EFI_CONFIG_TABLE(bits) \
+static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
+ efi_guid_t guid) \
+{ \
+ efi_system_table_##bits##_t *sys_table; \
+ efi_config_table_##bits##_t *tables; \
+ int i; \
+ \
+ sys_table = (typeof(sys_table))_sys_table; \
+ tables = (typeof(tables))(unsigned long)sys_table->tables; \
+ \
+ for (i = 0; i < sys_table->nr_tables; i++) { \
+ if (efi_guidcmp(tables[i].guid, guid) != 0) \
+ continue; \
+ \
+ return (void *)(unsigned long)tables[i].table; \
+ } \
+ \
+ return NULL; \
+}
+GET_EFI_CONFIG_TABLE(32)
+GET_EFI_CONFIG_TABLE(64)
+
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
{
- efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables;
- int i;
-
- for (i = 0; i < sys_table->nr_tables; i++) {
- if (efi_guidcmp(tables[i].guid, guid) != 0)
- continue;
-
- return (void *)tables[i].table;
- }
-
- return NULL;
+ if (efi_is_64bit())
+ return get_efi_config_table64(sys_table, guid);
+ else
+ return get_efi_config_table32(sys_table, guid);
}
return -EIO;
}
- if (!IS_ERR(conf->confd)) {
+ if (conf->confd) {
if (!gpiod_get_raw_value_cansleep(conf->confd)) {
dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
return -EIO;
return PTR_ERR(conf->status);
}
- conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
if (IS_ERR(conf->confd)) {
- dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
- PTR_ERR(conf->confd));
+ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
+ PTR_ERR(conf->confd));
+ return PTR_ERR(conf->confd);
+ } else if (!conf->confd) {
+ dev_warn(&spi->dev, "Not using confd gpio");
}
/* Register manager with unique name */
#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
#define SCOM_STATUS_PIB_RESP_SHIFT 12
-#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \
- SCOM_STATUS_PROTECTION | \
+#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
SCOM_STATUS_PARITY | \
SCOM_STATUS_PIB_ABORT | \
SCOM_STATUS_PIB_RESP_MASK)
/* Return -EBUSY on PIB abort to force a retry */
if (status & SCOM_STATUS_PIB_ABORT)
return -EBUSY;
- if (status & SCOM_STATUS_ERR_SUMMARY) {
- fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
- sizeof(uint32_t));
- return -EIO;
- }
return 0;
}
.read = gpio_mockup_debugfs_read,
.write = gpio_mockup_debugfs_write,
.llseek = no_llseek,
+ .release = single_release,
};
static void gpio_mockup_debugfs_setup(struct device *dev,
u8 new_irqs;
int level, i;
u8 invert_irq_mask[MAX_BANK];
- int reg_direction[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
- NBANK(chip));
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
if (chip->driver_data & PCA_PCAL) {
/* Enable latch on interrupt-enabled inputs */
bool pending_seen = false;
bool trigger_seen = false;
u8 trigger[MAX_BANK];
- int reg_direction[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
int ret, i;
if (chip->driver_data & PCA_PCAL) {
return false;
/* Remove output pins from the equation */
- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
- NBANK(chip));
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
for (i = 0; i < NBANK(chip); i++)
cur_stat[i] &= reg_direction[i];
{
struct i2c_client *client = chip->client;
struct irq_chip *irq_chip = &chip->irq_chip;
- int reg_direction[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
int ret, i;
if (!client->irq)
* interrupt. We have to rely on the previous read for
* this purpose.
*/
- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
- NBANK(chip));
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
for (i = 0; i < NBANK(chip); i++)
chip->irq_stat[i] &= reg_direction[i];
mutex_init(&chip->irq_lock);
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include "gpiolib.h"
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
- value = gpiod_get_raw_value_cansleep(event->desc);
- if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
- ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
- event->handler(event->irq, event);
+ if (run_edge_events_on_boot &&
+ (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+ value = gpiod_get_raw_value_cansleep(event->desc);
+ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+ event->handler(event->irq, event);
+ }
}
static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
}
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ }
+ },
+ {} /* Terminating entry */
+};
+
+static int acpi_gpio_setup_params(void)
+{
+ if (run_edge_events_on_boot < 0) {
+ if (dmi_check_system(run_edge_events_on_boot_blacklist))
+ run_edge_events_on_boot = 0;
+ else
+ run_edge_events_on_boot = 1;
+ }
+
+ return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
- /*
- * -EPROBE_DEFER in our case means that we found a
- * valid GPIO property, but no controller has been
- * registered so far.
- *
- * This means we don't need to look any further for
- * alternate name conventions, and we should really
- * preserve the return code for our user to be able to
- * retry probing later.
- */
- if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
- return desc;
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
break;
}
- /* Special handling for SPI GPIOs if used */
- if (IS_ERR(desc))
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ /* Special handling for SPI GPIOs if used */
desc = of_find_spi_gpio(dev, con_id, &of_flags);
- if (IS_ERR(desc)) {
+ }
+
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
/* This quirk looks up flags and all */
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
if (!IS_ERR(desc))
return desc;
}
- /* Special handling for regulator GPIOs if used */
- if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ /* Special handling for regulator GPIOs if used */
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+ }
if (IS_ERR(desc))
return desc;
if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
return -EINVAL;
+ /*
+ * Do not allow both INPUT & OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+ return -EINVAL;
+
/*
* Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
* the hardware actually supports enabling both at the same time the
}
/* This is just wrong: we don't look for events on output lines */
- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
ret = -EINVAL;
goto out_free_label;
}
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+ GPIOLINE_FLAG_IS_OUT);
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+ GPIOLINE_FLAG_IS_OUT);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
if (status)
goto err_remove_from_list;
- status = gpiochip_irqchip_init_valid_mask(chip);
- if (status)
- goto err_remove_from_list;
-
status = gpiochip_alloc_valid_mask(chip);
if (status)
- goto err_remove_irqchip_mask;
-
- status = gpiochip_add_irqchip(chip, lock_key, request_key);
- if (status)
- goto err_free_gpiochip_mask;
+ goto err_remove_from_list;
status = of_gpiochip_add(chip);
if (status)
- goto err_remove_chip;
+ goto err_free_gpiochip_mask;
status = gpiochip_init_valid_mask(chip);
if (status)
machine_gpiochip_add(chip);
+ status = gpiochip_irqchip_init_valid_mask(chip);
+ if (status)
+ goto err_remove_acpi_chip;
+
+ status = gpiochip_add_irqchip(chip, lock_key, request_key);
+ if (status)
+ goto err_remove_irqchip_mask;
+
/*
* By first adding the chardev, and then adding the device,
* we get a device node entry in sysfs under
if (gpiolib_initialized) {
status = gpiochip_setup_dev(gdev);
if (status)
- goto err_remove_acpi_chip;
+ goto err_remove_irqchip;
}
return 0;
+err_remove_irqchip:
+ gpiochip_irqchip_remove(chip);
+err_remove_irqchip_mask:
+ gpiochip_irqchip_free_valid_mask(chip);
err_remove_acpi_chip:
acpi_gpiochip_remove(chip);
err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
-err_remove_chip:
- gpiochip_irqchip_remove(chip);
err_free_gpiochip_mask:
gpiochip_free_valid_mask(chip);
-err_remove_irqchip_mask:
- gpiochip_irqchip_free_valid_mask(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
+ if (p->post_deps)
+ return -EINVAL;
+
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk
- *chunk)
+ struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ if (p->post_deps)
+ return -EINVAL;
+
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
struct drm_sched_entity *entity)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
- unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
- struct dma_fence *other = centity->fences[idx];
+ struct dma_fence *other;
+ unsigned idx;
+ long r;
- if (other) {
- signed long r;
- r = dma_fence_wait(other, true);
- if (r < 0) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ spin_lock(&ctx->ring_lock);
+ idx = centity->sequence & (amdgpu_sched_jobs - 1);
+ other = dma_fence_get(centity->fences[idx]);
+ spin_unlock(&ctx->ring_lock);
- return r;
- }
- }
+ if (!other)
+ return 0;
- return 0;
+ r = dma_fence_wait(other, true);
+ if (r < 0 && r != -ERESTARTSYS)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
+ dma_fence_put(other);
+ return r;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- break;
- if ((adev->gfx.rlc_fw_version != 106 &&
- adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
- (adev->gfx.rlc_feature_version < 1) ||
- !adev->gfx.rlc.is_rlc_v2_1)
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ &&((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
break;
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- WREG32(mmSQ_CMD, value);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
- AMD_PG_SUPPORT_MMHUB |
AMD_PG_SUPPORT_ATHUB;
adev->external_rev_id = adev->rev_id + 0x1;
break;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
-
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
/* FIXME: not supported yet */
convert_color_depth_from_display_info(const struct drm_connector *connector,
const struct drm_connector_state *state)
{
- uint32_t bpc = connector->display_info.bpc;
+ uint8_t bpc = (uint8_t)connector->display_info.bpc;
+
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
if (!state)
state = connector->state;
if (state) {
- bpc = state->max_bpc;
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min(bpc, state->max_requested_bpc);
+
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
}
*/
#include <linux/slab.h>
+#include <linux/mm.h>
#include "dm_services.h"
struct dc_state *dc_create_state(struct dc *dc)
{
- struct dc_state *context = kzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+ struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
if (!context)
return NULL;
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
- struct dc_state *new_ctx = kmemdup(src_ctx,
- sizeof(struct dc_state), GFP_KERNEL);
+ struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
if (!new_ctx)
return NULL;
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
{
struct dc_state *context = container_of(kref, struct dc_state, refcount);
dc_resource_state_destruct(context);
- kfree(context);
+ kvfree(context);
}
void dc_release_state(struct dc_state *context)
if (ret)
return ret;
- *query = metrics_table.CurrSocketPower << 8;
+ /* For the 40.46 release, they changed the value name */
+ if (hwmgr->smu_version == 0x282e00)
+ *query = metrics_table.AverageSocketPower << 8;
+ else
+ *query = metrics_table.CurrSocketPower << 8;
return ret;
}
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ uint32_t soft_min_level, soft_max_level;
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ /* gfxclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
+
+ /* uclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ /* socclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
+
+ data->dpm_table.soc_table.dpm_state.soft_min_level =
+ data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.soc_table.dpm_state.soft_max_level =
+ data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
#define smu_set_azalia_d3_pme(smu) \
((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
int ret, index;
- uint32_t size;
+ uint32_t size = 0;
+ uint16_t atom_table_size;
uint8_t frev, crev;
void *table;
uint16_t version_major, version_minor;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
- ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev,
+ ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
+ size = atom_table_size;
}
if (!smu->smu_table.power_play_table)
static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
{
+ uint32_t smu_version;
int ret = 0;
SmuMetrics_t metrics;
if (ret)
return ret;
- *value = metrics.CurrSocketPower << 8;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /* For the 40.46 release, they changed the value name */
+ if (smu_version == 0x282e00)
+ *value = metrics.AverageSocketPower << 8;
+ else
+ *value = metrics.CurrSocketPower << 8;
return 0;
}
#include <linux/iommu.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
pipe->of_output_port =
of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
- pipe->of_node = np;
+ pipe->of_node = of_node_get(np);
return 0;
}
return mdev->irq;
}
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+ ret = 0;
+
for_each_available_child_of_node(np, child) {
if (of_node_cmp(child->name, "pipeline") == 0) {
ret = komeda_parse_pipe_dt(mdev, child);
mdev->n_pipelines = 0;
+ of_reserved_mem_device_release(dev);
+
if (funcs && funcs->cleanup)
funcs->cleanup(mdev);
return NULL;
}
+u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
+{
+ u32 bpp;
+
+ switch (info->format) {
+ case DRM_FORMAT_YUV420_8BIT:
+ bpp = 12;
+ break;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+ default:
+ bpp = info->cpp[0] * 8;
+ break;
+ }
+
+ return bpp;
+}
+
/* Two assumptions
* 1. RGB always has YTR
* 2. Tiled RGB always has SC
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier);
+u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
+ u64 modifier);
+
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
u32 layer_type, u32 *n_fmts);
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
- u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks;
+ u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
u64 min_size;
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
alignment_header);
+ bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
kfb->afbc_size = kfb->offset_payload + n_blocks *
- ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS,
+ ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
AFBC_SUPERBLK_ALIGNMENT);
min_size = kfb->afbc_size + fb->offsets[0];
if (min_size > obj->size) {
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct komeda_plane_state *kplane_st;
struct drm_plane_state *plane_st;
- struct drm_framebuffer *fb;
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
plane_st = &kplane_st->base;
- fb = plane_st->fb;
plane = plane_st->plane;
plane_st->normalized_zpos = order++;
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+ struct drm_crtc_state *new_crtc_st;
int i, err;
err = drm_atomic_helper_check_modeset(dev, state);
* so need to add all affected_planes (even unchanged) to
* drm_atomic_state.
*/
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
err = drm_atomic_add_affected_planes(state, crtc);
if (err)
return err;
komeda_kms_irq_handler, IRQF_SHARED,
drm->driver->name, drm);
if (err)
- goto cleanup_mode_config;
+ goto free_component_binding;
err = mdev->funcs->enable_irq(mdev);
if (err)
- goto cleanup_mode_config;
+ goto free_component_binding;
drm->irq_enabled = true;
+ drm_kms_helper_poll_init(drm);
+
err = drm_dev_register(drm, 0);
if (err)
- goto cleanup_mode_config;
+ goto free_interrupts;
return kms;
-cleanup_mode_config:
+free_interrupts:
+ drm_kms_helper_poll_fini(drm);
drm->irq_enabled = false;
+ mdev->funcs->disable_irq(mdev);
+free_component_binding:
+ component_unbind_all(mdev->dev, drm);
+cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
+ drm->dev_private = NULL;
+ drm_dev_put(drm);
free_kms:
kfree(kms);
return ERR_PTR(err);
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm->irq_enabled = false;
mdev->funcs->disable_irq(mdev);
- drm_dev_unregister(drm);
component_unbind_all(mdev->dev, drm);
- komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);
+ komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
drm_dev_put(drm);
}
struct seq_file *sf);
/* component APIs */
+extern __printf(10, 11)
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
if (!kcrtc->master->wb_layer)
return 0;
- kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL);
+ kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
if (!kwb_conn)
return -ENOMEM;
/* Enable extended register access */
- ast_enable_mmio(dev);
ast_open_key(ast);
+ ast_enable_mmio(dev);
/* Find out whether P2A works or whether to use device-tree */
ast_detect_config_mode(dev, &scu_rev);
{
struct ast_private *ast = dev->dev_private;
+ /* enable standard VGA decode */
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
return -EINVAL;
ast_open_key(ast);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
{
struct ast_private *ast = dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
}
static int drm_mode_parse_cmdline_extra(const char *str, int length,
+ bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
for (i = 0; i < length; i++) {
switch (str[i]) {
case 'i':
+ if (freestanding)
+ return -EINVAL;
+
mode->interlace = true;
break;
case 'm':
+ if (freestanding)
+ return -EINVAL;
+
mode->margins = true;
break;
case 'D':
if (extras) {
int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
1,
+ false,
connector,
mode);
if (ret)
return 0;
}
+static const char * const drm_named_modes_whitelist[] = {
+ "NTSC",
+ "PAL",
+};
+
+static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
+ if (!strncmp(mode, drm_named_modes_whitelist[i], size))
+ return true;
+
+ return false;
+}
+
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
* @mode_option: optional per connector mode option
* bunch of things:
* - We need to make sure that the first character (which
* would be our resolution in X) is a digit.
- * - However, if the X resolution is missing, then we end up
- * with something like x<yres>, with our first character
- * being an alpha-numerical character, which would be
- * considered a named mode.
+ * - If not, then it's either a named mode or a force on/off.
+ * To distinguish between the two, we need to run the
+ * extra parsing function, and if not, then we consider it
+ * a named mode.
*
* If this isn't enough, we should add more heuristics here,
* and matching unit-tests.
*/
- if (!isdigit(name[0]) && name[0] != 'x')
+ if (!isdigit(name[0]) && name[0] != 'x') {
+ unsigned int namelen = strlen(name);
+
+ /*
+ * Only the force on/off options can be in that case,
+ * and they all take a single character.
+ */
+ if (namelen == 1) {
+ ret = drm_mode_parse_cmdline_extra(name, namelen, true,
+ connector, mode);
+ if (!ret)
+ return true;
+ }
+
named_mode = true;
+ }
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strchr(name, '-');
if (named_mode) {
if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
return false;
+
+ if (!drm_named_mode_is_in_whitelist(name, mode_end))
+ return false;
+
strscpy(mode->name, name, mode_end + 1);
} else {
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
extra_ptr != options_ptr) {
int len = strlen(name) - (extra_ptr - name);
- ret = drm_mode_parse_cmdline_extra(extra_ptr, len,
+ ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
connector, mode);
if (ret)
return false;
else if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
- dotclock = pipe_config->port_clock * 2 / 3;
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
else
dotclock = pipe_config->port_clock;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
limits.min_bpp = intel_dp_min_bpp(pipe_config);
- limits.max_bpp = pipe_config->pipe_bpp;
+ /*
+ * FIXME: If all the streams can't fit into the link with
+ * their current pipe_bpp we should reduce pipe_bpp across
+ * the board until things start to fit. Until then we
+ * limit to <= 8bpc since that's what was hardcoded for all
+ * MST streams previously. This hack should be removed once
+ * we have the proper retry logic in place.
+ */
+ limits.max_bpp = min(pipe_config->pipe_bpp, 24);
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ /*
+ * Reuse the prop from the SST connector because we're
+ * not allowed to create new props after device registration.
+ */
+ connector->max_bpc_property =
+ intel_dp->attached_connector->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
return connector;
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
DRM_INFO("PPS2 = 0x%08x\n", pps_val);
- if (encoder->type == INTEL_OUTPUT_EDP) {
+ if (cpu_transcoder == TRANSCODER_EDP) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
- /*
- * As this may not be anonymous memory (e.g. shmem)
- * but exist on a real mapping, we have to lock
- * the page in order to dirty it -- holding
- * the page reference is not sufficient to
- * prevent the inode from being truncated.
- * Play safe and take the lock.
- */
- set_page_dirty_lock(page);
+ set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.indirect_ctx.guest_gma,
workload->wa_ctx.indirect_ctx.size)) {
- kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
workload->wa_ctx.indirect_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
return ERR_PTR(-EINVAL);
}
}
if (!intel_gvt_ggtt_validate_range(vgpu,
workload->wa_ctx.per_ctx.guest_gma,
CACHELINE_BYTES)) {
- kmem_cache_free(s->workloads, workload);
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
workload->wa_ctx.per_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
return ERR_PTR(-EINVAL);
}
}
pci_set_master(pdev);
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ if (!drm_mm_node_allocated(node))
+ return;
+
DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
node->start,
node->start + node->size,
/*
* Frequence the dpll for the port should run at. Differs from the
- * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
int port_clock;
return ret;
}
- if (panel) {
+ if (panel)
bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_Unknown);
- }
+ DRM_MODE_CONNECTOR_DPI);
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
&priv->dma_hwdesc_phys,
timeout = drm_timeout_abs_to_jiffies(timeout_ns);
ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
- if (ret == 0)
+ if (ret == -ETIME)
ret = timeout ? -ETIMEDOUT : -EBUSY;
return ret;
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
struct mtk_drm_private *private = drm->dev_private;
struct platform_device *pdev;
struct device_node *np;
+ struct device *dma_dev;
int ret;
if (!iommu_present(&platform_bus_type))
goto err_component_unbind;
}
- private->dma_dev = &pdev->dev;
+ dma_dev = &pdev->dev;
+ private->dma_dev = dma_dev;
+
+ /*
+ * Configure the DMA segment size to make sure we get contiguous IOVA
+ * when importing PRIME buffers.
+ */
+ if (!dma_dev->dma_parms) {
+ private->dma_parms_allocated = true;
+ dma_dev->dma_parms =
+ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
+ GFP_KERNEL);
+ }
+ if (!dma_dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_component_unbind;
+ }
+
+ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dma_dev, "Failed to set DMA segment size\n");
+ goto err_unset_dma_parms;
+ }
/*
* We don't use the drm_irq_install() helpers provided by the DRM
drm->irq_enabled = true;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
- goto err_component_unbind;
+ goto err_unset_dma_parms;
drm_kms_helper_poll_init(drm);
drm_mode_config_reset(drm);
return 0;
+err_unset_dma_parms:
+ if (private->dma_parms_allocated)
+ dma_dev->dma_parms = NULL;
err_component_unbind:
component_unbind_all(drm->dev, drm);
err_config_cleanup:
static void mtk_drm_kms_deinit(struct drm_device *drm)
{
+ struct mtk_drm_private *private = drm->dev_private;
+
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
+ if (private->dma_parms_allocated)
+ private->dma_dev->dma_parms = NULL;
+
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
}
.compat_ioctl = drm_compat_ioctl,
};
+/*
+ * We need to override this because the device used to import the memory is
+ * not dev->dev, as drm_gem_prime_import() expects.
+ */
+struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct mtk_drm_private *private = dev->dev_private;
+
+ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
+}
+
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = mtk_drm_gem_prime_import,
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
ret = -ENOMEM;
+ of_node_put(node);
goto err_node;
}
ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
- if (ret)
+ if (ret) {
+ of_node_put(node);
goto err_node;
+ }
private->ddp_comp[comp_id] = comp;
}
} commit;
struct drm_atomic_state *suspend_state;
+
+ bool dma_parms_allocated;
};
extern struct platform_driver mtk_ddp_driver;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
- /* When restoring duplicated states, we need to make sure that the
- * bw remains the same and avoid recalculating it, as the connector's
- * bpc may have changed after the state was duplicated
- */
- if (!state->duplicated)
- asyh->dp.pbn =
- drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
- connector->display_info.bpc * 3);
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ /*
+ * When restoring duplicated states, we need to make sure that
+ * the bw remains the same and avoid recalculating it, as the
+ * connector's bpc may have changed after the state was
+ * duplicated
+ */
+ if (!state->duplicated) {
+ const int bpp = connector->display_info.bpc * 3;
+ const int clock = crtc_state->adjusted_mode.clock;
+
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+ }
- if (crtc_state->mode_changed) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port,
asyh->dp.pbn);
u8 *ptr = msg->buf;
while (remaining) {
- u8 cnt = (remaining > 16) ? 16 : remaining;
- u8 cmd;
+ u8 cnt, retries, cmd;
if (msg->flags & I2C_M_RD)
cmd = 1;
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
- if (ret < 0) {
- nvkm_i2c_aux_release(aux);
- return ret;
+ for (retries = 0, cnt = 0;
+ retries < 32 && !cnt;
+ retries++) {
+ cnt = min_t(u8, remaining, 16);
+ ret = aux->func->xfer(aux, true, cmd,
+ msg->addr, ptr, &cnt);
+ if (ret < 0)
+ goto out;
+ }
+ if (!cnt) {
+ AUX_TRACE(aux, "no data after 32 retries");
+ ret = -EIO;
+ goto out;
}
ptr += cnt;
msg++;
}
+ ret = num;
+out:
nvkm_i2c_aux_release(aux);
- return num;
+ return ret;
}
static u32
MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
* Author: Archit Taneja <archit@ti.com>
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
{
struct device_node *remote_node;
- remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+ remote_node = of_graph_get_remote_node(out->dev->of_node,
+ ffs(out->of_ports) - 1, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
return ret;
static struct drm_driver qxl_driver;
static struct pci_driver qxl_pci_driver;
+static bool is_vga(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
+}
+
static int
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
if (ret)
goto disable_pci;
+ if (is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
+ if (ret) {
+ DRM_ERROR("can't get legacy vga ioports\n");
+ goto disable_pci;
+ }
+ }
+
ret = qxl_device_init(qdev, &qxl_driver, pdev);
if (ret)
- goto disable_pci;
+ goto put_vga;
ret = qxl_modeset_init(qdev);
if (ret)
qxl_modeset_fini(qdev);
unload:
qxl_device_fini(qdev);
+put_vga:
+ if (is_vga(pdev))
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
free_dev:
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
+ if (is_vga(pdev))
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
dev->dev_private = NULL;
kfree(qdev);
/* Locate the companion LVDS encoder for dual-link operation, if any. */
companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
- if (!companion) {
- dev_err(dev, "Companion LVDS encoder not found\n");
- return -ENXIO;
- }
+ if (!companion)
+ return 0;
/*
* Sanity check: the companion encoder must have the same compatible
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
- spsc_queue_peek(&entity->job_queue) == NULL)
+ spsc_queue_count(&entity->job_queue) == 0)
return true;
return false;
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
*/
- if (spsc_queue_peek(&entity->job_queue)) {
+ if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
#define cmdline_test(test) selftest(test, test)
+cmdline_test(drm_cmdline_test_force_d_only)
+cmdline_test(drm_cmdline_test_force_D_only_dvi)
+cmdline_test(drm_cmdline_test_force_D_only_hdmi)
+cmdline_test(drm_cmdline_test_force_D_only_not_digital)
+cmdline_test(drm_cmdline_test_force_e_only)
+cmdline_test(drm_cmdline_test_margin_only)
+cmdline_test(drm_cmdline_test_interlace_only)
cmdline_test(drm_cmdline_test_res)
cmdline_test(drm_cmdline_test_res_missing_x)
cmdline_test(drm_cmdline_test_res_missing_y)
static const struct drm_connector no_connector = {};
+static int drm_cmdline_test_force_e_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static const struct drm_connector connector_hdmi = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &connector_hdmi,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+ return 0;
+}
+
+static const struct drm_connector connector_dvi = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static int drm_cmdline_test_force_D_only_dvi(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &connector_dvi,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+ return 0;
+}
+
+static int drm_cmdline_test_force_d_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_OFF);
+
+ return 0;
+}
+
+static int drm_cmdline_test_margin_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("m",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
+static int drm_cmdline_test_interlace_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("i",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
static int drm_cmdline_test_res(void *ignored)
{
struct drm_cmdline_mode mode = { };
/* R and B components are only 5 bits deep */
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
+ /* Fall through */
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
/* Fall through: enable dithering */
ret = sun6i_dsi_dcs_read(dsi, msg);
break;
}
+ /* Else, fall through */
default:
ret = -EINVAL;
.interruptible = false,
.no_wait_gpu = false
};
+ size_t max_segment;
/* wtf swapping */
if (bo->pages)
if (!bo->pages)
goto out;
- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ max_segment = virtio_max_dma_size(qdev->vdev);
+ max_segment &= PAGE_MASK;
+ if (max_segment > SCATTERLIST_MAX_SEGMENT)
+ max_segment = SCATTERLIST_MAX_SEGMENT;
+ ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
if (ret)
goto out;
return 0;
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
break;
}
- if (retries == RETRIES) {
- kfree(reply);
+ if (!reply)
return -EINVAL;
- }
*msg_len = reply_len;
*msg = reply;
INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
- cp2112_gpio_direction_input(gc, d->hwirq);
-
if (!dev->gpio_poll) {
dev->gpio_poll = true;
schedule_delayed_work(&dev->gpio_poll_worker, 0);
return PTR_ERR(dev->desc[pin]);
}
+ ret = cp2112_gpio_direction_input(&dev->gc, pin);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
+ goto err_desc;
+ }
+
ret = gpiochip_lock_as_irq(&dev->gc, pin);
if (ret) {
dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
{ L27MHZ_DEVICE(HID_ANY_ID) },
- { /* Logitech G203/Prodigy Gaming Mouse */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
- { /* Logitech G302 Gaming Mouse */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
- { /* Logitech G303 Gaming Mouse */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
- { /* Logitech G400 Gaming Mouse */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
{ /* Logitech G403 Wireless Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
- { /* Logitech G403 Gaming Mouse */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
- { /* Logitech G403 Hero Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
- { /* Logitech G502 Proteus Core Gaming Mouse */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
- { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
- { /* Logitech G502 Hero Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
- { /* Logitech G700 Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
- { /* Logitech G700s Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
{ /* Logitech G703 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
{ /* Logitech G703 Hero Gaming Mouse over USB */
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
#define CML_LP_DEVICE_ID 0x02FC
+#define EHL_Ax_DEVICE_ID 0x4BB3
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
y >>= 1;
distance >>= 1;
}
+ if (features->type == INTUOSHT2)
+ distance = features->distance_max - distance;
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_DISTANCE, distance);
input_report_key(input, BTN_BASE2, (data[11] & 0x02));
if (data[12] & 0x80)
- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
else
input_report_abs(input, ABS_WHEEL, 0);
}
if (wacom->tool[0]) {
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
- if (wacom->features.type == INTUOSP2_BT) {
+ if (wacom->features.type == INTUOSP2_BT ||
+ wacom->features.type == INTUOSP2S_BT) {
input_report_abs(pen_input, ABS_DISTANCE,
range ? frame[13] : wacom->features.distance_max);
} else {
static unsigned long virt_to_hvpfn(void *addr)
{
- unsigned long paddr;
+ phys_addr_t paddr;
if (is_vmalloc_addr(addr))
paddr = page_to_phys(vmalloc_to_page(addr)) +
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hyperv
*/
u64 guestid;
- void *tsc_page;
-
struct hv_per_cpu_context __percpu *cpu_context;
/*
This driver can also be built as a module. If so, the module
will be called adt7475.
+config SENSORS_AS370
+ tristate "Synaptics AS370 SoC hardware monitoring driver"
+ help
+ If you say yes here you get support for the PVT sensors of
+ the Synaptics AS370 SoC
+
+ This driver can also be built as a module. If so, the module
+ will be called as370-hwmon.
+
+
config SENSORS_ASC7621
tristate "Andigilog aSC7621"
depends on I2C
tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
depends on I2C
help
- If you say yes here you get support for the Sensiron SHTC1 and SHTW1
- humidity and temperature sensors.
+ If you say yes here you get support for the Sensiron SHTC1, SHTW1,
+ and SHTC3 humidity and temperature sensors.
This driver can also be built as a module. If so, the module
will be called shtc1.
This driver can also be built as a module. If so, the module
will be called adc128d818.
-config SENSORS_ADS1015
- tristate "Texas Instruments ADS1015"
- depends on I2C
- help
- If you say yes here you get support for Texas Instruments
- ADS1015/ADS1115 12/16-bit 4-input ADC device.
-
- This driver can also be built as a module. If so, the module
- will be called ads1015.
-
config SENSORS_ADS7828
tristate "Texas Instruments ADS7828 and compatibles"
depends on I2C
will be called w83795.
config SENSORS_W83795_FANCTRL
- bool "Include automatic fan control support (DANGEROUS)"
+ bool "Include automatic fan control support"
depends on SENSORS_W83795
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
- This part of the code wasn't carefully reviewed and tested yet,
- so enabling this option is strongly discouraged on production
- servers. Only developers and testers should enable it for the
- time being.
-
Please also note that this option will create sysfs attribute
files which may change in the future, so you shouldn't rely
on them being stable.
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
-obj-$(CONFIG_SENSORS_ADS1015) += ads1015.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7X10) += adt7x10.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
+obj-$(CONFIG_SENSORS_AS370) += as370-hwmon.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
if (resource->caps.flags & POWER_METER_CAN_CAP) {
if (!can_cap_in_hardware()) {
- dev_err(&resource->acpi_dev->dev,
- "Ignoring unsafe software power cap!\n");
+ dev_warn(&resource->acpi_dev->dev,
+ "Ignoring unsafe software power cap!\n");
goto skip_unsafe_cap;
}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ads1015.c - lm_sensors driver for ads1015 12-bit 4-input ADC
- * (C) Copyright 2010
- * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
- *
- * Based on the ads7828 driver by Steve Hardy.
- *
- * Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads1015.pdf
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-
-#include <linux/platform_data/ads1015.h>
-
-/* ADS1015 registers */
-enum {
- ADS1015_CONVERSION = 0,
- ADS1015_CONFIG = 1,
-};
-
-/* PGA fullscale voltages in mV */
-static const unsigned int fullscale_table[8] = {
- 6144, 4096, 2048, 1024, 512, 256, 256, 256 };
-
-/* Data rates in samples per second */
-static const unsigned int data_rate_table_1015[8] = {
- 128, 250, 490, 920, 1600, 2400, 3300, 3300
-};
-
-static const unsigned int data_rate_table_1115[8] = {
- 8, 16, 32, 64, 128, 250, 475, 860
-};
-
-#define ADS1015_DEFAULT_CHANNELS 0xff
-#define ADS1015_DEFAULT_PGA 2
-#define ADS1015_DEFAULT_DATA_RATE 4
-
-enum ads1015_chips {
- ads1015,
- ads1115,
-};
-
-struct ads1015_data {
- struct device *hwmon_dev;
- struct mutex update_lock; /* mutex protect updates */
- struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
- enum ads1015_chips id;
-};
-
-static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
-{
- u16 config;
- struct ads1015_data *data = i2c_get_clientdata(client);
- unsigned int pga = data->channel_data[channel].pga;
- unsigned int data_rate = data->channel_data[channel].data_rate;
- unsigned int conversion_time_ms;
- const unsigned int * const rate_table = data->id == ads1115 ?
- data_rate_table_1115 : data_rate_table_1015;
- int res;
-
- mutex_lock(&data->update_lock);
-
- /* get channel parameters */
- res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
- if (res < 0)
- goto err_unlock;
- config = res;
- conversion_time_ms = DIV_ROUND_UP(1000, rate_table[data_rate]);
-
- /* setup and start single conversion */
- config &= 0x001f;
- config |= (1 << 15) | (1 << 8);
- config |= (channel & 0x0007) << 12;
- config |= (pga & 0x0007) << 9;
- config |= (data_rate & 0x0007) << 5;
-
- res = i2c_smbus_write_word_swapped(client, ADS1015_CONFIG, config);
- if (res < 0)
- goto err_unlock;
-
- /* wait until conversion finished */
- msleep(conversion_time_ms);
- res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
- if (res < 0)
- goto err_unlock;
- config = res;
- if (!(config & (1 << 15))) {
- /* conversion not finished in time */
- res = -EIO;
- goto err_unlock;
- }
-
- res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
-
-err_unlock:
- mutex_unlock(&data->update_lock);
- return res;
-}
-
-static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
- s16 reg)
-{
- struct ads1015_data *data = i2c_get_clientdata(client);
- unsigned int pga = data->channel_data[channel].pga;
- int fullscale = fullscale_table[pga];
- const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
-
- return DIV_ROUND_CLOSEST(reg * fullscale, mask);
-}
-
-/* sysfs callback function */
-static ssize_t in_show(struct device *dev, struct device_attribute *da,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
- int res;
- int index = attr->index;
-
- res = ads1015_read_adc(client, index);
- if (res < 0)
- return res;
-
- return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res));
-}
-
-static const struct sensor_device_attribute ads1015_in[] = {
- SENSOR_ATTR_RO(in0_input, in, 0),
- SENSOR_ATTR_RO(in1_input, in, 1),
- SENSOR_ATTR_RO(in2_input, in, 2),
- SENSOR_ATTR_RO(in3_input, in, 3),
- SENSOR_ATTR_RO(in4_input, in, 4),
- SENSOR_ATTR_RO(in5_input, in, 5),
- SENSOR_ATTR_RO(in6_input, in, 6),
- SENSOR_ATTR_RO(in7_input, in, 7),
-};
-
-/*
- * Driver interface
- */
-
-static int ads1015_remove(struct i2c_client *client)
-{
- struct ads1015_data *data = i2c_get_clientdata(client);
- int k;
-
- hwmon_device_unregister(data->hwmon_dev);
- for (k = 0; k < ADS1015_CHANNELS; ++k)
- device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
- return 0;
-}
-
-#ifdef CONFIG_OF
-static int ads1015_get_channels_config_of(struct i2c_client *client)
-{
- struct ads1015_data *data = i2c_get_clientdata(client);
- struct device_node *node;
-
- if (!client->dev.of_node
- || !of_get_next_child(client->dev.of_node, NULL))
- return -EINVAL;
-
- for_each_child_of_node(client->dev.of_node, node) {
- u32 pval;
- unsigned int channel;
- unsigned int pga = ADS1015_DEFAULT_PGA;
- unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
-
- if (of_property_read_u32(node, "reg", &pval)) {
- dev_err(&client->dev, "invalid reg on %pOF\n", node);
- continue;
- }
-
- channel = pval;
- if (channel >= ADS1015_CHANNELS) {
- dev_err(&client->dev,
- "invalid channel index %d on %pOF\n",
- channel, node);
- continue;
- }
-
- if (!of_property_read_u32(node, "ti,gain", &pval)) {
- pga = pval;
- if (pga > 6) {
- dev_err(&client->dev, "invalid gain on %pOF\n",
- node);
- return -EINVAL;
- }
- }
-
- if (!of_property_read_u32(node, "ti,datarate", &pval)) {
- data_rate = pval;
- if (data_rate > 7) {
- dev_err(&client->dev,
- "invalid data_rate on %pOF\n", node);
- return -EINVAL;
- }
- }
-
- data->channel_data[channel].enabled = true;
- data->channel_data[channel].pga = pga;
- data->channel_data[channel].data_rate = data_rate;
- }
-
- return 0;
-}
-#endif
-
-static void ads1015_get_channels_config(struct i2c_client *client)
-{
- unsigned int k;
- struct ads1015_data *data = i2c_get_clientdata(client);
- struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
-
- /* prefer platform data */
- if (pdata) {
- memcpy(data->channel_data, pdata->channel_data,
- sizeof(data->channel_data));
- return;
- }
-
-#ifdef CONFIG_OF
- if (!ads1015_get_channels_config_of(client))
- return;
-#endif
-
- /* fallback on default configuration */
- for (k = 0; k < ADS1015_CHANNELS; ++k) {
- data->channel_data[k].enabled = true;
- data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
- data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
- }
-}
-
-static int ads1015_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ads1015_data *data;
- int err;
- unsigned int k;
-
- data = devm_kzalloc(&client->dev, sizeof(struct ads1015_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (client->dev.of_node)
- data->id = (enum ads1015_chips)
- of_device_get_match_data(&client->dev);
- else
- data->id = id->driver_data;
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- /* build sysfs attribute group */
- ads1015_get_channels_config(client);
- for (k = 0; k < ADS1015_CHANNELS; ++k) {
- if (!data->channel_data[k].enabled)
- continue;
- err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- for (k = 0; k < ADS1015_CHANNELS; ++k)
- device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
- return err;
-}
-
-static const struct i2c_device_id ads1015_id[] = {
- { "ads1015", ads1015},
- { "ads1115", ads1115},
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ads1015_id);
-
-static const struct of_device_id __maybe_unused ads1015_of_match[] = {
- {
- .compatible = "ti,ads1015",
- .data = (void *)ads1015
- },
- {
- .compatible = "ti,ads1115",
- .data = (void *)ads1115
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, ads1015_of_match);
-
-static struct i2c_driver ads1015_driver = {
- .driver = {
- .name = "ads1015",
- .of_match_table = of_match_ptr(ads1015_of_match),
- },
- .probe = ads1015_probe,
- .remove = ads1015_remove,
- .id_table = ads1015_id,
-};
-
-module_i2c_driver(ads1015_driver);
-
-MODULE_AUTHOR("Dirk Eibach <eibach@gdsys.de>");
-MODULE_DESCRIPTION("ADS1015 driver");
-MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, adt7475_of_match);
struct adt7475_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex lock;
unsigned long measure_updated;
u8 vid;
u8 vrm;
+ const struct attribute_group *groups[9];
};
static struct i2c_driver adt7475_driver;
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg;
long val;
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg = 0;
u8 out;
int temp;
char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
long val;
switch (sattr->index) {
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg;
int shift, idx;
ulong val;
static ssize_t point2_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int temp;
long val;
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val))
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg = 0;
long val;
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+
u8 mask = BIT(5 + sattr->index);
return sprintf(buf, "%d\n", !!(data->enh_acoustics[0] & mask));
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
u8 mask = BIT(5 + sattr->index);
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int r;
long val;
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int r;
long val;
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int out;
long val;
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
if (kstrtol(buf, 10, &val))
return 0;
}
-static void adt7475_remove_files(struct i2c_client *client,
- struct adt7475_data *data)
-{
- sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
- if (data->has_fan4)
- sysfs_remove_group(&client->dev.kobj, &fan4_attr_group);
- if (data->has_pwm2)
- sysfs_remove_group(&client->dev.kobj, &pwm2_attr_group);
- if (data->has_voltage & (1 << 0))
- sysfs_remove_group(&client->dev.kobj, &in0_attr_group);
- if (data->has_voltage & (1 << 3))
- sysfs_remove_group(&client->dev.kobj, &in3_attr_group);
- if (data->has_voltage & (1 << 4))
- sysfs_remove_group(&client->dev.kobj, &in4_attr_group);
- if (data->has_voltage & (1 << 5))
- sysfs_remove_group(&client->dev.kobj, &in5_attr_group);
- if (data->has_vid)
- sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
-}
-
static int adt7475_update_limits(struct i2c_client *client)
{
struct adt7475_data *data = i2c_get_clientdata(client);
};
struct adt7475_data *data;
- int i, ret = 0, revision;
+ struct device *hwmon_dev;
+ int i, ret = 0, revision, group_num = 0;
u8 config2, config3;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
return -ENOMEM;
mutex_init(&data->lock);
+ data->client = client;
i2c_set_clientdata(client, data);
if (client->dev.of_node)
break;
}
- ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
- if (ret)
- return ret;
+ data->groups[group_num++] = &adt7475_attr_group;
/* Features that can be disabled individually */
if (data->has_fan4) {
- ret = sysfs_create_group(&client->dev.kobj, &fan4_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &fan4_attr_group;
}
if (data->has_pwm2) {
- ret = sysfs_create_group(&client->dev.kobj, &pwm2_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &pwm2_attr_group;
}
if (data->has_voltage & (1 << 0)) {
- ret = sysfs_create_group(&client->dev.kobj, &in0_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in0_attr_group;
}
if (data->has_voltage & (1 << 3)) {
- ret = sysfs_create_group(&client->dev.kobj, &in3_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in3_attr_group;
}
if (data->has_voltage & (1 << 4)) {
- ret = sysfs_create_group(&client->dev.kobj, &in4_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in4_attr_group;
}
if (data->has_voltage & (1 << 5)) {
- ret = sysfs_create_group(&client->dev.kobj, &in5_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in5_attr_group;
}
if (data->has_vid) {
data->vrm = vid_which_vrm();
- ret = sysfs_create_group(&client->dev.kobj, &vid_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num] = &vid_attr_group;
}
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto eremove;
+ /* register device with all the acquired attributes */
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
+ return ret;
}
dev_info(&client->dev, "%s device, revision %d\n",
/* Limits and settings, should never change update more than once */
ret = adt7475_update_limits(client);
if (ret)
- goto eremove;
-
- return 0;
-
-eremove:
- adt7475_remove_files(client, data);
- return ret;
-}
-
-static int adt7475_remove(struct i2c_client *client)
-{
- struct adt7475_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- adt7475_remove_files(client, data);
+ return ret;
return 0;
}
.of_match_table = of_match_ptr(adt7475_of_match),
},
.probe = adt7475_probe,
- .remove = adt7475_remove,
.id_table = adt7475_id,
.detect = adt7475_detect,
.address_list = normal_i2c,
static int adt7475_update_measure(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u16 ext;
int i;
int ret;
static struct adt7475_data *adt7475_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
int ret;
mutex_lock(&data->lock);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics AS370 SoC Hardware Monitoring Driver
+ *
+ * Copyright (C) 2018 Synaptics Incorporated
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#define CTRL 0x0
+#define PD BIT(0)
+#define EN BIT(1)
+#define T_SEL BIT(2)
+#define V_SEL BIT(3)
+#define NMOS_SEL BIT(8)
+#define PMOS_SEL BIT(9)
+#define STS 0x4
+#define BN_MASK GENMASK(11, 0)
+#define EOC BIT(12)
+
+struct as370_hwmon {
+ void __iomem *base;
+};
+
+static void init_pvt(struct as370_hwmon *hwmon)
+{
+ u32 val;
+ void __iomem *addr = hwmon->base + CTRL;
+
+ val = PD;
+ writel_relaxed(val, addr);
+ val |= T_SEL;
+ writel_relaxed(val, addr);
+ val |= EN;
+ writel_relaxed(val, addr);
+ val &= ~PD;
+ writel_relaxed(val, addr);
+}
+
+static int as370_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ int val;
+ struct as370_hwmon *hwmon = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_input:
+ val = readl_relaxed(hwmon->base + STS) & BN_MASK;
+ *temp = DIV_ROUND_CLOSEST(val * 251802, 4096) - 85525;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t
+as370_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const u32 as370_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info as370_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = as370_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *as370_hwmon_info[] = {
+ &as370_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops as370_hwmon_ops = {
+ .is_visible = as370_hwmon_is_visible,
+ .read = as370_hwmon_read,
+};
+
+static const struct hwmon_chip_info as370_chip_info = {
+ .ops = &as370_hwmon_ops,
+ .info = as370_hwmon_info,
+};
+
+static int as370_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct as370_hwmon *hwmon;
+ struct device *dev = &pdev->dev;
+
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ hwmon->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hwmon->base))
+ return PTR_ERR(hwmon->base);
+
+ init_pvt(hwmon);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ "as370",
+ hwmon,
+ &as370_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id as370_hwmon_match[] = {
+ { .compatible = "syna,as370-hwmon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, as370_hwmon_match);
+
+static struct platform_driver as370_hwmon_driver = {
+ .probe = as370_hwmon_probe,
+ .driver = {
+ .name = "as370-hwmon",
+ .of_match_table = as370_hwmon_match,
+ },
+};
+module_platform_driver(as370_hwmon_driver);
+
+MODULE_AUTHOR("Jisheng Zhang<jszhang@kernel.org>");
+MODULE_DESCRIPTION("Synaptics AS370 SoC hardware monitor");
+MODULE_LICENSE("GPL v2");
goto ERROR_SC_2;
}
- data->lm75[0] = i2c_new_dummy(adapter, sc_addr[0]);
- if (!data->lm75[0]) {
+ data->lm75[0] = i2c_new_dummy_device(adapter, sc_addr[0]);
+ if (IS_ERR(data->lm75[0])) {
dev_err(&client->dev,
"subclient %d registration at address 0x%x failed.\n",
1, sc_addr[0]);
- err = -ENOMEM;
+ err = PTR_ERR(data->lm75[0]);
goto ERROR_SC_2;
}
- data->lm75[1] = i2c_new_dummy(adapter, sc_addr[1]);
- if (!data->lm75[1]) {
+ data->lm75[1] = i2c_new_dummy_device(adapter, sc_addr[1]);
+ if (IS_ERR(data->lm75[1])) {
dev_err(&client->dev,
"subclient %d registration at address 0x%x failed.\n",
2, sc_addr[1]);
- err = -ENOMEM;
+ err = PTR_ERR(data->lm75[1]);
goto ERROR_SC_3;
}
err = platform_driver_register(&coretemp_driver);
if (err)
- return err;
+ goto outzone;
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
coretemp_cpu_online, coretemp_cpu_offline);
outdrv:
platform_driver_unregister(&coretemp_driver);
+outzone:
kfree(zone_devices);
return err;
}
int ret;
struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
struct iio_hwmon_state *state = dev_get_drvdata(dev);
+ struct iio_channel *chan = &state->channels[sattr->index];
+ enum iio_chan_type type;
+
+ ret = iio_read_channel_processed(chan, &result);
+ if (ret < 0)
+ return ret;
- ret = iio_read_channel_processed(&state->channels[sattr->index],
- &result);
+ ret = iio_get_channel_type(chan, &type);
if (ret < 0)
return ret;
+ if (type == IIO_POWER)
+ result *= 1000; /* mili-Watts to micro-Watts conversion */
+
return sprintf(buf, "%d\n", result);
}
struct iio_hwmon_state *st;
struct sensor_device_attribute *a;
int ret, i;
- int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
+ int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1, power_i = 1;
enum iio_chan_type type;
struct iio_channel *channels;
struct device *hwmon_dev;
n = curr_i++;
prefix = "curr";
break;
+ case IIO_POWER:
+ n = power_i++;
+ prefix = "power";
+ break;
case IIO_HUMIDITYRELATIVE:
n = humidity_i++;
prefix = "humidity";
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/jiffies.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <asm/processor.h>
#define SEL_CORE 0x04
struct k8temp_data {
- struct device *hwmon_dev;
struct mutex update_lock;
- const char *name;
- char valid; /* zero until following fields are valid */
- unsigned long last_updated; /* in jiffies */
/* registers values */
u8 sensorsp; /* sensor presence bits - SEL_CORE, SEL_PLACE */
- u32 temp[2][2]; /* core, place */
u8 swap_core_select; /* meaning of SEL_CORE is inverted */
u32 temp_offset;
};
-static struct k8temp_data *k8temp_update_device(struct device *dev)
-{
- struct k8temp_data *data = dev_get_drvdata(dev);
- struct pci_dev *pdev = to_pci_dev(dev);
- u8 tmp;
-
- mutex_lock(&data->update_lock);
-
- if (!data->valid
- || time_after(jiffies, data->last_updated + HZ)) {
- pci_read_config_byte(pdev, REG_TEMP, &tmp);
- tmp &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP, &data->temp[0][0]);
-
- if (data->sensorsp & SEL_PLACE) {
- tmp |= SEL_PLACE; /* Select sensor 1, core0 */
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP,
- &data->temp[0][1]);
- }
-
- if (data->sensorsp & SEL_CORE) {
- tmp &= ~SEL_PLACE; /* Select sensor 0, core1 */
- tmp |= SEL_CORE;
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP,
- &data->temp[1][0]);
-
- if (data->sensorsp & SEL_PLACE) {
- tmp |= SEL_PLACE; /* Select sensor 1, core1 */
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP,
- &data->temp[1][1]);
- }
- }
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
- return data;
-}
-
-/*
- * Sysfs stuff
- */
-
-static ssize_t name_show(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct k8temp_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute_2 *attr =
- to_sensor_dev_attr_2(devattr);
- int core = attr->nr;
- int place = attr->index;
- int temp;
- struct k8temp_data *data = k8temp_update_device(dev);
-
- if (data->swap_core_select && (data->sensorsp & SEL_CORE))
- core = core ? 0 : 1;
-
- temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
-
- return sprintf(buf, "%d\n", temp);
-}
-
-/* core, place */
-
-static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temp, 0, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_input, temp, 0, 1);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_input, temp, 1, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_input, temp, 1, 1);
-static DEVICE_ATTR_RO(name);
-
static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ 0 },
};
-
MODULE_DEVICE_TABLE(pci, k8temp_ids);
static int is_rev_g_desktop(u8 model)
return 1;
}
+static umode_t
+k8temp_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct k8temp_data *data = drvdata;
+
+ if ((channel & 1) && !(data->sensorsp & SEL_PLACE))
+ return 0;
+
+ if ((channel & 2) && !(data->sensorsp & SEL_CORE))
+ return 0;
+
+ return 0444;
+}
+
+static int
+k8temp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct k8temp_data *data = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int core, place;
+ u32 temp;
+ u8 tmp;
+
+ core = (channel >> 1) & 1;
+ place = channel & 1;
+
+ core ^= data->swap_core_select;
+
+ mutex_lock(&data->update_lock);
+ pci_read_config_byte(pdev, REG_TEMP, &tmp);
+ tmp &= ~(SEL_PLACE | SEL_CORE);
+ if (core)
+ tmp |= SEL_CORE;
+ if (place)
+ tmp |= SEL_PLACE;
+ pci_write_config_byte(pdev, REG_TEMP, tmp);
+ pci_read_config_dword(pdev, REG_TEMP, &temp);
+ mutex_unlock(&data->update_lock);
+
+ *val = TEMP_FROM_REG(temp) + data->temp_offset;
+
+ return 0;
+}
+
+static const struct hwmon_ops k8temp_ops = {
+ .is_visible = k8temp_is_visible,
+ .read = k8temp_read,
+};
+
+static const struct hwmon_channel_info *k8temp_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT, HWMON_T_INPUT, HWMON_T_INPUT, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info k8temp_chip_info = {
+ .ops = &k8temp_ops,
+ .info = k8temp_info,
+};
+
static int k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err;
u8 scfg;
u32 temp;
u8 model, stepping;
struct k8temp_data *data;
+ struct device *hwmon_dev;
data = devm_kzalloc(&pdev->dev, sizeof(struct k8temp_data), GFP_KERNEL);
if (!data)
data->sensorsp &= ~SEL_CORE;
}
- data->name = "k8temp";
mutex_init(&data->update_lock);
- pci_set_drvdata(pdev, data);
-
- /* Register sysfs hooks */
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- if (err)
- goto exit_remove;
-
- /* sensor can be changed and reports something */
- if (data->sensorsp & SEL_PLACE) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp2_input.dev_attr);
- if (err)
- goto exit_remove;
- }
-
- /* core can be changed and reports something */
- if (data->sensorsp & SEL_CORE) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp3_input.dev_attr);
- if (err)
- goto exit_remove;
- if (data->sensorsp & SEL_PLACE) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp4_input.
- dev_attr);
- if (err)
- goto exit_remove;
- }
- }
-
- err = device_create_file(&pdev->dev, &dev_attr_name);
- if (err)
- goto exit_remove;
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "k8temp",
+ data,
+ &k8temp_chip_info,
+ NULL);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp2_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp3_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp4_input.dev_attr);
- device_remove_file(&pdev->dev, &dev_attr_name);
- return err;
-}
-
-static void k8temp_remove(struct pci_dev *pdev)
-{
- struct k8temp_data *data = pci_get_drvdata(pdev);
-
- hwmon_device_unregister(data->hwmon_dev);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp2_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp3_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp4_input.dev_attr);
- device_remove_file(&pdev->dev, &dev_attr_name);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct pci_driver k8temp_driver = {
.name = "k8temp",
.id_table = k8temp_ids,
.probe = k8temp_probe,
- .remove = k8temp_remove,
};
module_pci_driver(k8temp_driver);
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
#include "lm75.h"
-
/*
* This driver handles the LM75 and compatible digital temperature sensors.
*/
max6626,
max31725,
mcp980x,
+ pct2075,
stds75,
stlm75,
tcn75,
tmp75c,
};
+/**
+ * struct lm75_params - lm75 configuration parameters.
+ * @set_mask: Bits to set in configuration register when configuring
+ * the chip.
+ * @clr_mask: Bits to clear in configuration register when configuring
+ * the chip.
+ * @default_resolution: Default number of bits to represent the temperature
+ * value.
+ * @resolution_limits: Limit register resolution. Optional. Should be set if
+ * the resolution of limit registers does not match the
+ * resolution of the temperature register.
+ * @resolutions: List of resolutions associated with sample times.
+ * Optional. Should be set if num_sample_times is larger
+ * than 1, and if the resolution changes with sample times.
+ * If set, number of entries must match num_sample_times.
+ * @default_sample_time:Sample time to be set by default.
+ * @num_sample_times: Number of possible sample times to be set. Optional.
+ * Should be set if the number of sample times is larger
+ * than one.
+ * @sample_times: All the possible sample times to be set. Mandatory if
+ * num_sample_times is larger than 1. If set, number of
+ * entries must match num_sample_times.
+ */
+
+struct lm75_params {
+ u8 set_mask;
+ u8 clr_mask;
+ u8 default_resolution;
+ u8 resolution_limits;
+ const u8 *resolutions;
+ unsigned int default_sample_time;
+ u8 num_sample_times;
+ const unsigned int *sample_times;
+};
+
/* Addresses scanned */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
#define LM75_REG_CONF 0x01
#define LM75_REG_HYST 0x02
#define LM75_REG_MAX 0x03
+#define PCT2075_REG_IDLE 0x04
/* Each client has this additional data */
struct lm75_data {
- struct i2c_client *client;
- struct regmap *regmap;
- u8 orig_conf;
- u8 resolution; /* In bits, between 9 and 16 */
- u8 resolution_limits;
- unsigned int sample_time; /* In ms */
+ struct i2c_client *client;
+ struct regmap *regmap;
+ u8 orig_conf;
+ u8 current_conf;
+ u8 resolution; /* In bits, 9 to 16 */
+ unsigned int sample_time; /* In ms */
+ enum lm75_type kind;
+ const struct lm75_params *params;
};
/*-----------------------------------------------------------------------*/
+static const u8 lm75_sample_set_masks[] = { 0 << 5, 1 << 5, 2 << 5, 3 << 5 };
+
+#define LM75_SAMPLE_CLEAR_MASK (3 << 5)
+
+/* The structure below stores the configuration values of the supported devices.
+ * In case of being supported multiple configurations, the default one must
+ * always be the first element of the array
+ */
+static const struct lm75_params device_params[] = {
+ [adt75] = {
+ .clr_mask = 1 << 5, /* not one-shot mode */
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [ds1775] = {
+ .clr_mask = 3 << 5,
+ .set_mask = 2 << 5, /* 11-bit mode */
+ .default_resolution = 11,
+ .default_sample_time = 500,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 125, 250, 500, 1000 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [ds75] = {
+ .clr_mask = 3 << 5,
+ .set_mask = 2 << 5, /* 11-bit mode */
+ .default_resolution = 11,
+ .default_sample_time = 600,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 150, 300, 600, 1200 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [stds75] = {
+ .clr_mask = 3 << 5,
+ .set_mask = 2 << 5, /* 11-bit mode */
+ .default_resolution = 11,
+ .default_sample_time = 600,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 150, 300, 600, 1200 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [stlm75] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 6,
+ },
+ [ds7505] = {
+ .set_mask = 3 << 5, /* 12-bit mode*/
+ .default_resolution = 12,
+ .default_sample_time = 200,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 25, 50, 100, 200 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [g751] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [lm75] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [lm75a] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [lm75b] = {
+ .default_resolution = 11,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [max6625] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 7,
+ },
+ [max6626] = {
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 7,
+ .resolution_limits = 9,
+ },
+ [max31725] = {
+ .default_resolution = 16,
+ .default_sample_time = MSEC_PER_SEC / 20,
+ },
+ [tcn75] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 18,
+ },
+ [pct2075] = {
+ .default_resolution = 11,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ .num_sample_times = 31,
+ .sample_times = (unsigned int []){ 100, 200, 300, 400, 500, 600,
+ 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700,
+ 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700,
+ 2800, 2900, 3000, 3100 },
+ },
+ [mcp980x] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode */
+ .default_resolution = 12,
+ .resolution_limits = 9,
+ .default_sample_time = 240,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 30, 60, 120, 240 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp100] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode */
+ .default_resolution = 12,
+ .default_sample_time = 320,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 40, 80, 160, 320 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp101] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode */
+ .default_resolution = 12,
+ .default_sample_time = 320,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 40, 80, 160, 320 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp105] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp112] = {
+ .set_mask = 3 << 5, /* 8 samples / second */
+ .clr_mask = 1 << 7, /* no one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 125,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+ },
+ [tmp175] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp275] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp75] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp75b] = { /* not one-shot mode, Conversion rate 37Hz */
+ .clr_mask = 1 << 7 | 3 << 5,
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 37,
+ .sample_times = (unsigned int []){ MSEC_PER_SEC / 37,
+ MSEC_PER_SEC / 18,
+ MSEC_PER_SEC / 9, MSEC_PER_SEC / 4 },
+ .num_sample_times = 4,
+ },
+ [tmp75c] = {
+ .clr_mask = 1 << 5, /*not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 12,
+ }
+};
+
static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
{
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
+static int lm75_write_config(struct lm75_data *data, u8 set_mask,
+ u8 clr_mask)
+{
+ u8 value;
+
+ clr_mask |= LM75_SHUTDOWN;
+ value = data->current_conf & ~clr_mask;
+ value |= set_mask;
+
+ if (data->current_conf != value) {
+ s32 err;
+
+ err = i2c_smbus_write_byte_data(data->client, LM75_REG_CONF,
+ value);
+ if (err)
+ return err;
+ data->current_conf = value;
+ }
+ return 0;
+}
+
static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
return 0;
}
-static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long temp)
+static int lm75_write_temp(struct device *dev, u32 attr, long temp)
{
struct lm75_data *data = dev_get_drvdata(dev);
u8 resolution;
int reg;
- if (type != hwmon_temp)
- return -EINVAL;
-
switch (attr) {
case hwmon_temp_max:
reg = LM75_REG_MAX;
* Resolution of limit registers is assumed to be the same as the
* temperature input register resolution unless given explicitly.
*/
- if (data->resolution_limits)
- resolution = data->resolution_limits;
+ if (data->params->resolution_limits)
+ resolution = data->params->resolution_limits;
else
resolution = data->resolution;
temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
1000) << (16 - resolution);
- return regmap_write(data->regmap, reg, temp);
+ return regmap_write(data->regmap, reg, (u16)temp);
+}
+
+static int lm75_update_interval(struct device *dev, long val)
+{
+ struct lm75_data *data = dev_get_drvdata(dev);
+ unsigned int reg;
+ u8 index;
+ s32 err;
+
+ index = find_closest(val, data->params->sample_times,
+ (int)data->params->num_sample_times);
+
+ switch (data->kind) {
+ default:
+ err = lm75_write_config(data, lm75_sample_set_masks[index],
+ LM75_SAMPLE_CLEAR_MASK);
+ if (err)
+ return err;
+
+ data->sample_time = data->params->sample_times[index];
+ if (data->params->resolutions)
+ data->resolution = data->params->resolutions[index];
+ break;
+ case tmp112:
+ err = regmap_read(data->regmap, LM75_REG_CONF, ®);
+ if (err < 0)
+ return err;
+ reg &= ~0x00c0;
+ reg |= (3 - index) << 6;
+ err = regmap_write(data->regmap, LM75_REG_CONF, reg);
+ if (err < 0)
+ return err;
+ data->sample_time = data->params->sample_times[index];
+ break;
+ case pct2075:
+ err = i2c_smbus_write_byte_data(data->client, PCT2075_REG_IDLE,
+ index + 1);
+ if (err)
+ return err;
+ data->sample_time = data->params->sample_times[index];
+ break;
+ }
+ return 0;
+}
+
+static int lm75_write_chip(struct device *dev, u32 attr, long val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return lm75_update_interval(dev, val);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return lm75_write_chip(dev, attr, val);
+ case hwmon_temp:
+ return lm75_write_temp(dev, attr, val);
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct lm75_data *config_data = data;
+
switch (type) {
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
+ if (config_data->params->num_sample_times > 1)
+ return 0644;
return 0444;
}
break;
static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
{
- return reg == LM75_REG_TEMP;
+ return reg == LM75_REG_TEMP || reg == LM75_REG_CONF;
}
static const struct regmap_config lm75_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
- .max_register = LM75_REG_MAX,
+ .max_register = PCT2075_REG_IDLE,
.writeable_reg = lm75_is_writeable_reg,
.volatile_reg = lm75_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
struct device *hwmon_dev;
struct lm75_data *data;
int status, err;
- u8 set_mask, clr_mask;
- int new;
enum lm75_type kind;
if (client->dev.of_node)
return -ENOMEM;
data->client = client;
+ data->kind = kind;
data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
if (IS_ERR(data->regmap))
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
*/
- set_mask = 0;
- clr_mask = LM75_SHUTDOWN; /* continuous conversions */
-
- switch (kind) {
- case adt75:
- clr_mask |= 1 << 5; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 8;
- break;
- case ds1775:
- case ds75:
- case stds75:
- clr_mask |= 3 << 5;
- set_mask |= 2 << 5; /* 11-bit mode */
- data->resolution = 11;
- data->sample_time = MSEC_PER_SEC;
- break;
- case stlm75:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 5;
- break;
- case ds7505:
- set_mask |= 3 << 5; /* 12-bit mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case g751:
- case lm75:
- case lm75a:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 2;
- break;
- case lm75b:
- data->resolution = 11;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case max6625:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case max6626:
- data->resolution = 12;
- data->resolution_limits = 9;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case max31725:
- data->resolution = 16;
- data->sample_time = MSEC_PER_SEC / 8;
- break;
- case tcn75:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 8;
- break;
- case mcp980x:
- data->resolution_limits = 9;
- /* fall through */
- case tmp100:
- case tmp101:
- set_mask |= 3 << 5; /* 12-bit mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC;
- clr_mask |= 1 << 7; /* not one-shot mode */
- break;
- case tmp112:
- set_mask |= 3 << 5; /* 12-bit mode */
- clr_mask |= 1 << 7; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case tmp105:
- case tmp175:
- case tmp275:
- case tmp75:
- set_mask |= 3 << 5; /* 12-bit mode */
- clr_mask |= 1 << 7; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 2;
- break;
- case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
- clr_mask |= 1 << 7 | 0x3 << 5;
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 37;
- break;
- case tmp75c:
- clr_mask |= 1 << 5; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- }
- /* configure as specified */
+ data->params = &device_params[data->kind];
+
+ /* Save default sample time and resolution*/
+ data->sample_time = data->params->default_sample_time;
+ data->resolution = data->params->default_resolution;
+
+ /* Cache original configuration */
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(dev, "Can't read config? %d\n", status);
return status;
}
data->orig_conf = status;
- new = status & ~clr_mask;
- new |= set_mask;
- if (status != new)
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
+ data->current_conf = status;
- err = devm_add_action_or_reset(dev, lm75_remove, data);
+ err = lm75_write_config(data, data->params->set_mask,
+ data->params->clr_mask);
if (err)
return err;
- dev_dbg(dev, "Config %02x\n", new);
+ err = devm_add_action_or_reset(dev, lm75_remove, data);
+ if (err)
+ return err;
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &lm75_chip_info,
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "pct2075", pct2075, },
{ "stds75", stds75, },
{ "stlm75", stlm75, },
{ "tcn75", tcn75, },
.compatible = "maxim,mcp980x",
.data = (void *)mcp980x
},
+ {
+ .compatible = "nxp,pct2075",
+ .data = (void *)pct2075
+ },
{
.compatible = "st,stds75",
.data = (void *)stds75
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#define LTC2990_STATUS 0x00
#define LTC2990_CONTROL 0x01
int ret;
struct device *hwmon_dev;
struct ltc2990_data *data;
- struct device_node *of_node = i2c->dev.of_node;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
data->i2c = i2c;
- if (of_node) {
- ret = of_property_read_u32_array(of_node, "lltc,meas-mode",
- data->mode, 2);
+ if (dev_fwnode(&i2c->dev)) {
+ ret = device_property_read_u32_array(&i2c->dev,
+ "lltc,meas-mode",
+ data->mode, 2);
if (ret < 0)
return ret;
*
* Chip #vin #fan #pwm #temp chip IDs man ID
* nct6106d 9 3 3 6+3 0xc450 0xc1 0x5ca3
+ * nct6116d 9 5 5 3+3 0xd280 0xc1 0x5ca3
* nct6775f 9 4 3 6+3 0xb470 0xc1 0x5ca3
* nct6776f 9 5 3 6+3 0xc330 0xc1 0x5ca3
* nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3
#define USE_ALTERNATE
-enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
- nct6795, nct6796, nct6797, nct6798 };
+enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
+ nct6793, nct6795, nct6796, nct6797, nct6798 };
/* used to set data->name = nct6775_device_names[data->sio_kind] */
static const char * const nct6775_device_names[] = {
"nct6106",
+ "nct6116",
"nct6775",
"nct6776",
"nct6779",
static const char * const nct6775_sio_names[] __initconst = {
"NCT6106D",
+ "NCT6116D",
"NCT6775F",
"NCT6776D/F",
"NCT6779D",
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_NCT6106_ID 0xc450
+#define SIO_NCT6116_ID 0xd280
#define SIO_NCT6775_ID 0xb470
#define SIO_NCT6776_ID 0xc330
#define SIO_NCT6779_ID 0xc560
static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
-static const u16 NCT6106_REG_PWM[] = { 0x119, 0x129, 0x139 };
static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c };
static const u16 NCT6106_REG_FAN_MODE[] = { 0x113, 0x123, 0x133 };
-static const u16 NCT6106_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130 };
static const u16 NCT6106_REG_TEMP_SOURCE[] = {
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5 };
[12] = 0x205,
};
+/* NCT6112D/NCT6114D/NCT6116D specific data */
+
+static const u16 NCT6116_REG_FAN[] = { 0x20, 0x22, 0x24, 0x26, 0x28 };
+static const u16 NCT6116_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4, 0xe6, 0xe8 };
+static const u16 NCT6116_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0xf6, 0xf5 };
+static const u16 NCT6116_FAN_PULSE_SHIFT[] = { 0, 2, 4, 6, 6 };
+
+static const u16 NCT6116_REG_PWM[] = { 0x119, 0x129, 0x139, 0x199, 0x1a9 };
+static const u16 NCT6116_REG_FAN_MODE[] = { 0x113, 0x123, 0x133, 0x193, 0x1a3 };
+static const u16 NCT6116_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130, 0x190, 0x1a0 };
+static const u16 NCT6116_REG_TEMP_SOURCE[] = {
+ 0xb0, 0xb1, 0xb2 };
+
+static const u16 NCT6116_REG_CRITICAL_TEMP[] = {
+ 0x11a, 0x12a, 0x13a, 0x19a, 0x1aa };
+static const u16 NCT6116_REG_CRITICAL_TEMP_TOLERANCE[] = {
+ 0x11b, 0x12b, 0x13b, 0x19b, 0x1ab };
+
+static const u16 NCT6116_REG_CRITICAL_PWM_ENABLE[] = {
+ 0x11c, 0x12c, 0x13c, 0x19c, 0x1ac };
+static const u16 NCT6116_REG_CRITICAL_PWM[] = {
+ 0x11d, 0x12d, 0x13d, 0x19d, 0x1ad };
+
+static const u16 NCT6116_REG_FAN_STEP_UP_TIME[] = {
+ 0x114, 0x124, 0x134, 0x194, 0x1a4 };
+static const u16 NCT6116_REG_FAN_STEP_DOWN_TIME[] = {
+ 0x115, 0x125, 0x135, 0x195, 0x1a5 };
+static const u16 NCT6116_REG_FAN_STOP_OUTPUT[] = {
+ 0x116, 0x126, 0x136, 0x196, 0x1a6 };
+static const u16 NCT6116_REG_FAN_START_OUTPUT[] = {
+ 0x117, 0x127, 0x137, 0x197, 0x1a7 };
+static const u16 NCT6116_REG_FAN_STOP_TIME[] = {
+ 0x118, 0x128, 0x138, 0x198, 0x1a8 };
+static const u16 NCT6116_REG_TOLERANCE_H[] = {
+ 0x112, 0x122, 0x132, 0x192, 0x1a2 };
+
+static const u16 NCT6116_REG_TARGET[] = {
+ 0x111, 0x121, 0x131, 0x191, 0x1a1 };
+
+static const u16 NCT6116_REG_AUTO_TEMP[] = {
+ 0x160, 0x170, 0x180, 0x1d0, 0x1e0 };
+static const u16 NCT6116_REG_AUTO_PWM[] = {
+ 0x164, 0x174, 0x184, 0x1d4, 0x1e4 };
+
+static const s8 NCT6116_ALARM_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
+ 9, -1, -1, -1, -1, -1, -1, /* in8..in9 */
+ -1, /* unused */
+ 32, 33, 34, 35, 36, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, -1, -1, -1, /* temp1..temp6 */
+ 48, -1 /* intrusion0, intrusion1 */
+};
+
+static const s8 NCT6116_BEEP_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
+ 9, 10, 11, 12, -1, -1, -1, /* in8..in14 */
+ 32, /* global beep enable */
+ 24, 25, 26, 27, 28, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, -1, -1, -1, /* temp1..temp6 */
+ 34, -1 /* intrusion0, intrusion1 */
+};
+
static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
{
if (mode == 0 && pwm == 255)
return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
reg == 0x111 || reg == 0x121 || reg == 0x131;
+ case nct6116:
+ return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+ reg == 0x26 || reg == 0x28 || reg == 0xe0 || reg == 0xe2 ||
+ reg == 0xe4 || reg == 0xe6 || reg == 0xe8 || reg == 0x111 ||
+ reg == 0x121 || reg == 0x131 || reg == 0x191 || reg == 0x1a1;
case nct6775:
return (((reg & 0xff00) == 0x100 ||
(reg & 0xff00) == 0x200) &&
data->auto_pwm[i][data->auto_pwm_num] = 0xff;
break;
case nct6106:
+ case nct6116:
case nct6779:
case nct6791:
case nct6792:
case nct6776:
break; /* always enabled, nothing to do */
case nct6106:
+ case nct6116:
case nct6779:
case nct6791:
case nct6792:
fan3pin = !(cr24 & 0x80);
pwm3pin = cr24 & 0x08;
+ } else if (data->kind == nct6116) {
+ int cr1a = superio_inb(sioreg, 0x1a);
+ int cr1b = superio_inb(sioreg, 0x1b);
+ int cr24 = superio_inb(sioreg, 0x24);
+ int cr2a = superio_inb(sioreg, 0x2a);
+ int cr2b = superio_inb(sioreg, 0x2b);
+ int cr2f = superio_inb(sioreg, 0x2f);
+
+ fan3pin = !(cr2b & 0x10);
+ fan4pin = (cr2b & 0x80) || // pin 1(2)
+ (!(cr2f & 0x10) && (cr1a & 0x04)); // pin 65(66)
+ fan5pin = (cr2b & 0x80) || // pin 126(127)
+ (!(cr1b & 0x03) && (cr2a & 0x02)); // pin 94(96)
+
+ pwm3pin = fan3pin && (cr24 & 0x08);
+ pwm4pin = fan4pin;
+ pwm5pin = fan5pin;
} else {
/*
* NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D,
data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
- data->REG_PWM[0] = NCT6106_REG_PWM;
+ data->REG_PWM[0] = NCT6116_REG_PWM;
data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
data->REG_CRITICAL_PWM = NCT6106_REG_CRITICAL_PWM;
data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
data->REG_TEMP_SOURCE = NCT6106_REG_TEMP_SOURCE;
- data->REG_TEMP_SEL = NCT6106_REG_TEMP_SEL;
+ data->REG_TEMP_SEL = NCT6116_REG_TEMP_SEL;
data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
+ break;
+ case nct6116:
+ data->in_num = 9;
+ data->pwm_num = 3;
+ data->auto_pwm_num = 4;
+ data->temp_fixed_num = 3;
+ data->num_temp_alarms = 3;
+ data->num_temp_beeps = 3;
+
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+
+ data->temp_label = nct6776_temp_label;
+ data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
+
+ data->REG_VBAT = NCT6106_REG_VBAT;
+ data->REG_DIODE = NCT6106_REG_DIODE;
+ data->DIODE_MASK = NCT6106_DIODE_MASK;
+ data->REG_VIN = NCT6106_REG_IN;
+ data->REG_IN_MINMAX[0] = NCT6106_REG_IN_MIN;
+ data->REG_IN_MINMAX[1] = NCT6106_REG_IN_MAX;
+ data->REG_TARGET = NCT6116_REG_TARGET;
+ data->REG_FAN = NCT6116_REG_FAN;
+ data->REG_FAN_MODE = NCT6116_REG_FAN_MODE;
+ data->REG_FAN_MIN = NCT6116_REG_FAN_MIN;
+ data->REG_FAN_PULSES = NCT6116_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6116_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6116_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6116_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6116_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6116_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6116_REG_PWM;
+ data->REG_PWM[1] = NCT6116_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6116_REG_FAN_STOP_OUTPUT;
+ data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6106_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM_READ = NCT6106_REG_PWM_READ;
+ data->REG_PWM_MODE = NCT6106_REG_PWM_MODE;
+ data->PWM_MODE_MASK = NCT6106_PWM_MODE_MASK;
+ data->REG_AUTO_TEMP = NCT6116_REG_AUTO_TEMP;
+ data->REG_AUTO_PWM = NCT6116_REG_AUTO_PWM;
+ data->REG_CRITICAL_TEMP = NCT6116_REG_CRITICAL_TEMP;
+ data->REG_CRITICAL_TEMP_TOLERANCE
+ = NCT6116_REG_CRITICAL_TEMP_TOLERANCE;
+ data->REG_CRITICAL_PWM_ENABLE = NCT6116_REG_CRITICAL_PWM_ENABLE;
+ data->CRITICAL_PWM_ENABLE_MASK
+ = NCT6106_CRITICAL_PWM_ENABLE_MASK;
+ data->REG_CRITICAL_PWM = NCT6116_REG_CRITICAL_PWM;
+ data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
+ data->REG_TEMP_SOURCE = NCT6116_REG_TEMP_SOURCE;
+ data->REG_TEMP_SEL = NCT6116_REG_TEMP_SEL;
+ data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6106_REG_WEIGHT_TEMP_BASE;
+ data->REG_ALARM = NCT6106_REG_ALARM;
+ data->ALARM_BITS = NCT6116_ALARM_BITS;
+ data->REG_BEEP = NCT6106_REG_BEEP;
+ data->BEEP_BITS = NCT6116_BEEP_BITS;
+
+ reg_temp = NCT6106_REG_TEMP;
+ reg_temp_mon = NCT6106_REG_TEMP_MON;
+ num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+ reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
+
break;
case nct6775:
data->in_num = 9;
data->have_vid = (cr2a & 0x60) == 0x40;
break;
case nct6106:
+ case nct6116:
case nct6779:
case nct6791:
case nct6792:
NCT6775_REG_CR_FAN_DEBOUNCE);
switch (data->kind) {
case nct6106:
+ case nct6116:
tmp |= 0xe0;
break;
case nct6775:
case SIO_NCT6106_ID:
sio_data->kind = nct6106;
break;
+ case SIO_NCT6116_ID:
+ sio_data->kind = nct6116;
+ break;
case SIO_NCT6775_ID:
sio_data->kind = nct6775;
break;
#define DTS_T_CTRL1_REG 0x27
#define VT_ADC_MD_REG 0x2E
+#define VSEN1_HV_LL_REG 0x02 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_LV_LL_REG 0x03 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_HV_HL_REG 0x00 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_LV_HL_REG 0x01 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define SMI_STS1_REG 0xC1 /* Bank 0; SMI Status Register */
+#define SMI_STS3_REG 0xC3 /* Bank 0; SMI Status Register */
+#define SMI_STS5_REG 0xC5 /* Bank 0; SMI Status Register */
+#define SMI_STS7_REG 0xC7 /* Bank 0; SMI Status Register */
+#define SMI_STS8_REG 0xC8 /* Bank 0; SMI Status Register */
+
#define VSEN1_HV_REG 0x40 /* Bank 0; 2 regs (HV/LV) per sensor */
#define TEMP_CH1_HV_REG 0x42 /* Bank 0; same as VSEN2_HV */
#define LTD_HV_REG 0x62 /* Bank 0; 2 regs in VSEN range */
+#define LTD_HV_HL_REG 0x44 /* Bank 1; 1 reg for LTD */
+#define LTD_LV_HL_REG 0x45 /* Bank 1; 1 reg for LTD */
+#define LTD_HV_LL_REG 0x46 /* Bank 1; 1 reg for LTD */
+#define LTD_LV_LL_REG 0x47 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_CH_REG 0x05 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_W_REG 0x06 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_WH_REG 0x07 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_C_REG 0x04 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_C_REG 0x90 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_CH_REG 0x91 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_W_REG 0x92 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_WH_REG 0x93 /* Bank 1; 1 reg per sensor */
#define FANIN1_HV_REG 0x80 /* Bank 0; 2 regs (HV/LV) per sensor */
+#define FANIN1_HV_HL_REG 0x60 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define FANIN1_LV_HL_REG 0x61 /* Bank 1; 2 regs (HV/LV) per sensor */
#define T_CPU1_HV_REG 0xA0 /* Bank 0; 2 regs (HV/LV) per sensor */
#define PRTS_REG 0x03 /* Bank 2 */
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
+#define ENABLE_TSI BIT(1)
+
static const unsigned short normal_i2c[] = {
0x2d, 0x2e, I2C_CLIENT_END
};
u8 fan_mode[FANCTL_MAX];
u8 enable_dts;
u8 has_dts;
+ u8 temp_mode; /* 0: TR mode, 1: TD mode */
};
/* Access functions */
rpm = 1350000 / cnt;
*val = rpm;
return 0;
+ case hwmon_fan_min:
+ ret = nct7904_read_reg16(data, BANK_1,
+ FANIN1_HV_HL_REG + channel * 2);
+ if (ret < 0)
+ return ret;
+ cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
+ if (cnt == 0x1fff)
+ rpm = 0;
+ else
+ rpm = 1350000 / cnt;
+ *val = rpm;
+ return 0;
+ case hwmon_fan_alarm:
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS5_REG + (channel >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (channel & 0x07)) & 1;
+ return 0;
default:
return -EOPNOTSUPP;
}
{
const struct nct7904_data *data = _data;
- if (attr == hwmon_fan_input && data->fanin_mask & (1 << channel))
- return 0444;
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_alarm:
+ if (data->fanin_mask & (1 << channel))
+ return 0444;
+ break;
+ case hwmon_fan_min:
+ if (data->fanin_mask & (1 << channel))
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
volt *= 6; /* 0.006V scale */
*val = volt;
return 0;
+ case hwmon_in_min:
+ ret = nct7904_read_reg16(data, BANK_1,
+ VSEN1_HV_LL_REG + index * 4);
+ if (ret < 0)
+ return ret;
+ volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+ if (index < 14)
+ volt *= 2; /* 0.002V scale */
+ else
+ volt *= 6; /* 0.006V scale */
+ *val = volt;
+ return 0;
+ case hwmon_in_max:
+ ret = nct7904_read_reg16(data, BANK_1,
+ VSEN1_HV_HL_REG + index * 4);
+ if (ret < 0)
+ return ret;
+ volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+ if (index < 14)
+ volt *= 2; /* 0.002V scale */
+ else
+ volt *= 6; /* 0.006V scale */
+ *val = volt;
+ return 0;
+ case hwmon_in_alarm:
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS1_REG + (index >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (index & 0x07)) & 1;
+ return 0;
default:
return -EOPNOTSUPP;
}
const struct nct7904_data *data = _data;
int index = nct7904_chan_to_index[channel];
- if (channel > 0 && attr == hwmon_in_input &&
- (data->vsen_mask & BIT(index)))
- return 0444;
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_alarm:
+ if (channel > 0 && (data->vsen_mask & BIT(index)))
+ return 0444;
+ break;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ if (channel > 0 && (data->vsen_mask & BIT(index)))
+ return 0644;
+ break;
+ default:
+ break;
+ }
return 0;
}
{
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
+ unsigned int reg1, reg2, reg3;
switch (attr) {
case hwmon_temp_input:
temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
*val = sign_extend32(temp, 10) * 125;
return 0;
+ case hwmon_temp_alarm:
+ if (channel == 4) {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS3_REG);
+ if (ret < 0)
+ return ret;
+ *val = (ret >> 1) & 1;
+ } else if (channel < 4) {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS1_REG);
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (((channel * 2) + 1) & 0x07)) & 1;
+ } else {
+ if ((channel - 5) < 4) {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS7_REG +
+ ((channel - 5) >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> ((channel - 5) & 0x07)) & 1;
+ } else {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS8_REG +
+ ((channel - 5) >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (((channel - 5) & 0x07) - 4))
+ & 1;
+ }
+ }
+ return 0;
+ case hwmon_temp_type:
+ if (channel < 5) {
+ if ((data->tcpu_mask >> channel) & 0x01) {
+ if ((data->temp_mode >> channel) & 0x01)
+ *val = 3; /* TD */
+ else
+ *val = 4; /* TR */
+ } else {
+ *val = 0;
+ }
+ } else {
+ if ((data->has_dts >> (channel - 5)) & 0x01) {
+ if (data->enable_dts & ENABLE_TSI)
+ *val = 5; /* TSI */
+ else
+ *val = 6; /* PECI */
+ } else {
+ *val = 0;
+ }
+ }
+ return 0;
+ case hwmon_temp_max:
+ reg1 = LTD_HV_LL_REG;
+ reg2 = TEMP_CH1_W_REG;
+ reg3 = DTS_T_CPU1_W_REG;
+ break;
+ case hwmon_temp_max_hyst:
+ reg1 = LTD_LV_LL_REG;
+ reg2 = TEMP_CH1_WH_REG;
+ reg3 = DTS_T_CPU1_WH_REG;
+ break;
+ case hwmon_temp_crit:
+ reg1 = LTD_HV_HL_REG;
+ reg2 = TEMP_CH1_C_REG;
+ reg3 = DTS_T_CPU1_C_REG;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg1 = LTD_LV_HL_REG;
+ reg2 = TEMP_CH1_CH_REG;
+ reg3 = DTS_T_CPU1_CH_REG;
+ break;
default:
return -EOPNOTSUPP;
}
+
+ if (channel == 4)
+ ret = nct7904_read_reg(data, BANK_1, reg1);
+ else if (channel < 5)
+ ret = nct7904_read_reg(data, BANK_1,
+ reg2 + channel * 8);
+ else
+ ret = nct7904_read_reg(data, BANK_1,
+ reg3 + (channel - 5) * 4);
+
+ if (ret < 0)
+ return ret;
+ *val = ret * 1000;
+ return 0;
}
static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
{
const struct nct7904_data *data = _data;
- if (attr == hwmon_temp_input) {
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_alarm:
+ case hwmon_temp_type:
if (channel < 5) {
if (data->tcpu_mask & BIT(channel))
return 0444;
if (data->has_dts & BIT(channel - 5))
return 0444;
}
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel < 5) {
+ if (data->tcpu_mask & BIT(channel))
+ return 0644;
+ } else {
+ if (data->has_dts & BIT(channel - 5))
+ return 0644;
+ }
+ break;
+ default:
+ break;
}
return 0;
}
}
+static int nct7904_write_temp(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7904_data *data = dev_get_drvdata(dev);
+ int ret;
+ unsigned int reg1, reg2, reg3;
+
+ val = clamp_val(val / 1000, -128, 127);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg1 = LTD_HV_LL_REG;
+ reg2 = TEMP_CH1_W_REG;
+ reg3 = DTS_T_CPU1_W_REG;
+ break;
+ case hwmon_temp_max_hyst:
+ reg1 = LTD_LV_LL_REG;
+ reg2 = TEMP_CH1_WH_REG;
+ reg3 = DTS_T_CPU1_WH_REG;
+ break;
+ case hwmon_temp_crit:
+ reg1 = LTD_HV_HL_REG;
+ reg2 = TEMP_CH1_C_REG;
+ reg3 = DTS_T_CPU1_C_REG;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg1 = LTD_LV_HL_REG;
+ reg2 = TEMP_CH1_CH_REG;
+ reg3 = DTS_T_CPU1_CH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (channel == 4)
+ ret = nct7904_write_reg(data, BANK_1, reg1, val);
+ else if (channel < 5)
+ ret = nct7904_write_reg(data, BANK_1,
+ reg2 + channel * 8, val);
+ else
+ ret = nct7904_write_reg(data, BANK_1,
+ reg3 + (channel - 5) * 4, val);
+
+ return ret;
+}
+
+static int nct7904_write_fan(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7904_data *data = dev_get_drvdata(dev);
+ int ret;
+ u8 tmp;
+
+ switch (attr) {
+ case hwmon_fan_min:
+ if (val <= 0)
+ return -EINVAL;
+
+ val = clamp_val(DIV_ROUND_CLOSEST(1350000, val), 1, 0x1fff);
+ tmp = (val >> 5) & 0xff;
+ ret = nct7904_write_reg(data, BANK_1,
+ FANIN1_HV_HL_REG + channel * 2, tmp);
+ if (ret < 0)
+ return ret;
+ tmp = val & 0x1f;
+ ret = nct7904_write_reg(data, BANK_1,
+ FANIN1_LV_HL_REG + channel * 2, tmp);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct7904_write_in(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7904_data *data = dev_get_drvdata(dev);
+ int ret, index, tmp;
+
+ index = nct7904_chan_to_index[channel];
+
+ if (index < 14)
+ val = val / 2; /* 0.002V scale */
+ else
+ val = val / 6; /* 0.006V scale */
+
+ val = clamp_val(val, 0, 0x7ff);
+
+ switch (attr) {
+ case hwmon_in_min:
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_LV_LL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp &= ~0x7;
+ tmp |= val & 0x7;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_LV_LL_REG + index * 4, tmp);
+ if (ret < 0)
+ return ret;
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_HV_LL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp = (val >> 3) & 0xff;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_HV_LL_REG + index * 4, tmp);
+ return ret;
+ case hwmon_in_max:
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_LV_HL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp &= ~0x7;
+ tmp |= val & 0x7;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_LV_HL_REG + index * 4, tmp);
+ if (ret < 0)
+ return ret;
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_HV_HL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp = (val >> 3) & 0xff;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_HV_HL_REG + index * 4, tmp);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
long val)
{
u32 attr, int channel, long val)
{
switch (type) {
+ case hwmon_in:
+ return nct7904_write_in(dev, attr, channel, val);
+ case hwmon_fan:
+ return nct7904_write_fan(dev, attr, channel, val);
case hwmon_pwm:
return nct7904_write_pwm(dev, attr, channel, val);
+ case hwmon_temp:
+ return nct7904_write_temp(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
static const struct hwmon_channel_info *nct7904_info[] = {
HWMON_CHANNEL_INFO(in,
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT),
+ /* dummy, skipped in is_visible */
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM),
HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT),
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
HWMON_CHANNEL_INFO(temp,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT),
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST),
NULL
};
if (ret < 0)
return ret;
+ data->temp_mode = 0;
for (i = 0; i < 4; i++) {
val = (ret & (0x03 << i)) >> (i * 2);
bit = (1 << i);
if (val == 0)
data->tcpu_mask &= ~bit;
+ else if (val == 0x1 || val == 0x2)
+ data->temp_mode |= bit;
}
/* PECI */
if (ret < 0)
return ret;
data->has_dts = ret & 0xF;
- if (data->enable_dts & 0x2) {
+ if (data->enable_dts & ENABLE_TSI) {
ret = nct7904_read_reg(data, BANK_0, DTS_T_CTRL1_REG);
if (ret < 0)
return ret;
spin_lock_init(&data->fan_lock[i]);
data->fan_irq[i] = platform_get_irq(pdev, i);
- if (data->fan_irq[i] < 0) {
- dev_err(dev, "get IRQ fan%d failed\n", i);
+ if (data->fan_irq[i] < 0)
return data->fan_irq[i];
- }
sprintf(name, "NPCM7XX-FAN-MD%d", i);
ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
This driver can also be built as a module. If so, the module will
be called ibm-cffps.
+config SENSORS_INSPUR_IPSPS
+ tristate "INSPUR Power System Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for the INSPUR
+ Power System power supply.
+
+ This driver can also be built as a module. If so, the module will
+ be called inspur-ipsps.
+
config SENSORS_IR35221
tristate "Infineon IR35221"
help
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
+obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/pmbus.h>
#include "pmbus.h"
#define CFFPS_PN_CMD 0x9B
#define CFFPS_SN_CMD 0x9E
#define CFFPS_CCIN_CMD 0xBD
-#define CFFPS_FW_CMD_START 0xFA
-#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_FW_CMD 0xFA
+#define CFFPS1_FW_NUM_BYTES 4
+#define CFFPS2_FW_NUM_WORDS 3
#define CFFPS_SYS_CONFIG_CMD 0xDA
#define CFFPS_INPUT_HISTORY_CMD 0xD6
CFFPS_DEBUGFS_NUM_ENTRIES
};
+enum versions { cffps1, cffps2 };
+
struct ibm_cffps_input_history {
struct mutex update_lock;
unsigned long last_update;
};
struct ibm_cffps {
+ enum versions version;
struct i2c_client *client;
struct ibm_cffps_input_history input_history;
struct ibm_cffps *psu = to_psu(idxp, idx);
char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ pmbus_set_page(psu->client, 0);
+
switch (idx) {
case CFFPS_DEBUGFS_INPUT_HISTORY:
return ibm_cffps_read_input_history(psu, buf, count, ppos);
rc = snprintf(data, 5, "%04X", rc);
goto done;
case CFFPS_DEBUGFS_FW:
- for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
- rc = i2c_smbus_read_byte_data(psu->client,
- CFFPS_FW_CMD_START + i);
- if (rc < 0)
- return rc;
+ switch (psu->version) {
+ case cffps1:
+ for (i = 0; i < CFFPS1_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD +
+ i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
- snprintf(&data[i * 2], 3, "%02X", rc);
- }
+ rc = i * 2;
+ break;
+ case cffps2:
+ for (i = 0; i < CFFPS2_FW_NUM_WORDS; ++i) {
+ rc = i2c_smbus_read_word_data(psu->client,
+ CFFPS_FW_CMD +
+ i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 4], 5, "%04X", rc);
+ }
- rc = i * 2;
+ rc = i * 4;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
goto done;
default:
return -EINVAL;
psu->led_state = CFFPS_LED_ON;
}
+ pmbus_set_page(psu->client, 0);
+
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
psu->led_state);
if (rc < 0)
if (led_cdev->brightness == LED_OFF)
return 0;
+ pmbus_set_page(psu->client, 0);
+
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
CFFPS_LED_BLINK);
if (rc < 0)
dev_warn(dev, "failed to register led class: %d\n", rc);
}
-static struct pmbus_driver_info ibm_cffps_info = {
- .pages = 1,
- .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
- PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
- PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
- PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
- PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
- .read_byte_data = ibm_cffps_read_byte_data,
- .read_word_data = ibm_cffps_read_word_data,
+static struct pmbus_driver_info ibm_cffps_info[] = {
+ [cffps1] = {
+ .pages = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_STATUS_FAN12,
+ .read_byte_data = ibm_cffps_read_byte_data,
+ .read_word_data = ibm_cffps_read_word_data,
+ },
+ [cffps2] = {
+ .pages = 2,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_STATUS_FAN12,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
+ .read_byte_data = ibm_cffps_read_byte_data,
+ .read_word_data = ibm_cffps_read_word_data,
+ },
};
static struct pmbus_platform_data ibm_cffps_pdata = {
const struct i2c_device_id *id)
{
int i, rc;
+ enum versions vs;
struct dentry *debugfs;
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
+ const void *md = of_device_get_match_data(&client->dev);
+
+ if (md)
+ vs = (enum versions)md;
+ else if (id)
+ vs = (enum versions)id->driver_data;
+ else
+ vs = cffps1;
client->dev.platform_data = &ibm_cffps_pdata;
- rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
if (rc)
return rc;
if (!psu)
return 0;
+ psu->version = vs;
psu->client = client;
mutex_init(&psu->input_history.update_lock);
psu->input_history.last_update = jiffies - HZ;
}
static const struct i2c_device_id ibm_cffps_id[] = {
- { "ibm_cffps1", 1 },
+ { "ibm_cffps1", cffps1 },
+ { "ibm_cffps2", cffps2 },
{}
};
MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
static const struct of_device_id ibm_cffps_of_match[] = {
- { .compatible = "ibm,cffps1" },
+ {
+ .compatible = "ibm,cffps1",
+ .data = (void *)cffps1
+ },
+ {
+ .compatible = "ibm,cffps2",
+ .data = (void *)cffps2
+ },
{}
};
MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2019 Inspur Corp.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/hwmon-sysfs.h>
+
+#include "pmbus.h"
+
+#define IPSPS_REG_VENDOR_ID 0x99
+#define IPSPS_REG_MODEL 0x9A
+#define IPSPS_REG_FW_VERSION 0x9B
+#define IPSPS_REG_PN 0x9C
+#define IPSPS_REG_SN 0x9E
+#define IPSPS_REG_HW_VERSION 0xB0
+#define IPSPS_REG_MODE 0xFC
+
+#define MODE_ACTIVE 0x55
+#define MODE_STANDBY 0x0E
+#define MODE_REDUNDANCY 0x00
+
+#define MODE_ACTIVE_STRING "active"
+#define MODE_STANDBY_STRING "standby"
+#define MODE_REDUNDANCY_STRING "redundancy"
+
+enum ipsps_index {
+ vendor,
+ model,
+ fw_version,
+ part_number,
+ serial_number,
+ hw_version,
+ mode,
+ num_regs,
+};
+
+static const u8 ipsps_regs[num_regs] = {
+ [vendor] = IPSPS_REG_VENDOR_ID,
+ [model] = IPSPS_REG_MODEL,
+ [fw_version] = IPSPS_REG_FW_VERSION,
+ [part_number] = IPSPS_REG_PN,
+ [serial_number] = IPSPS_REG_SN,
+ [hw_version] = IPSPS_REG_HW_VERSION,
+ [mode] = IPSPS_REG_MODE,
+};
+
+static ssize_t ipsps_string_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ u8 reg;
+ int rc;
+ char *p;
+ char data[I2C_SMBUS_BLOCK_MAX + 1];
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ rc = i2c_smbus_read_block_data(client, reg, data);
+ if (rc < 0)
+ return rc;
+
+ /* filled with printable characters, ending with # */
+ p = memscan(data, '#', rc);
+ *p = '\0';
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", data);
+}
+
+static ssize_t ipsps_fw_version_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ u8 reg;
+ int rc;
+ u8 data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ rc = i2c_smbus_read_block_data(client, reg, data);
+ if (rc < 0)
+ return rc;
+
+ if (rc != 6)
+ return -EPROTO;
+
+ return snprintf(buf, PAGE_SIZE, "%u.%02u%u-%u.%02u\n",
+ data[1], data[2]/* < 100 */, data[3]/*< 10*/,
+ data[4], data[5]/* < 100 */);
+}
+
+static ssize_t ipsps_mode_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u8 reg;
+ int rc;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ rc = i2c_smbus_read_byte_data(client, reg);
+ if (rc < 0)
+ return rc;
+
+ switch (rc) {
+ case MODE_ACTIVE:
+ return snprintf(buf, PAGE_SIZE, "[%s] %s %s\n",
+ MODE_ACTIVE_STRING,
+ MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+ case MODE_STANDBY:
+ return snprintf(buf, PAGE_SIZE, "%s [%s] %s\n",
+ MODE_ACTIVE_STRING,
+ MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+ case MODE_REDUNDANCY:
+ return snprintf(buf, PAGE_SIZE, "%s %s [%s]\n",
+ MODE_ACTIVE_STRING,
+ MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+ default:
+ return snprintf(buf, PAGE_SIZE, "unspecified\n");
+ }
+}
+
+static ssize_t ipsps_mode_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ u8 reg;
+ int rc;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ if (sysfs_streq(MODE_STANDBY_STRING, buf)) {
+ rc = i2c_smbus_write_byte_data(client, reg,
+ MODE_STANDBY);
+ if (rc < 0)
+ return rc;
+ return count;
+ } else if (sysfs_streq(MODE_ACTIVE_STRING, buf)) {
+ rc = i2c_smbus_write_byte_data(client, reg,
+ MODE_ACTIVE);
+ if (rc < 0)
+ return rc;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static SENSOR_DEVICE_ATTR_RO(vendor, ipsps_string, vendor);
+static SENSOR_DEVICE_ATTR_RO(model, ipsps_string, model);
+static SENSOR_DEVICE_ATTR_RO(part_number, ipsps_string, part_number);
+static SENSOR_DEVICE_ATTR_RO(serial_number, ipsps_string, serial_number);
+static SENSOR_DEVICE_ATTR_RO(hw_version, ipsps_string, hw_version);
+static SENSOR_DEVICE_ATTR_RO(fw_version, ipsps_fw_version, fw_version);
+static SENSOR_DEVICE_ATTR_RW(mode, ipsps_mode, mode);
+
+static struct attribute *ipsps_attrs[] = {
+ &sensor_dev_attr_vendor.dev_attr.attr,
+ &sensor_dev_attr_model.dev_attr.attr,
+ &sensor_dev_attr_part_number.dev_attr.attr,
+ &sensor_dev_attr_serial_number.dev_attr.attr,
+ &sensor_dev_attr_hw_version.dev_attr.attr,
+ &sensor_dev_attr_fw_version.dev_attr.attr,
+ &sensor_dev_attr_mode.dev_attr.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(ipsps);
+
+static struct pmbus_driver_info ipsps_info = {
+ .pages = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_POUT | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
+ .groups = ipsps_groups,
+};
+
+static struct pmbus_platform_data ipsps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static int ipsps_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ client->dev.platform_data = &ipsps_pdata;
+ return pmbus_do_probe(client, id, &ipsps_info);
+}
+
+static const struct i2c_device_id ipsps_id[] = {
+ { "ipsps1", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ipsps_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id ipsps_of_match[] = {
+ { .compatible = "inspur,ipsps1" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ipsps_of_match);
+#endif
+
+static struct i2c_driver ipsps_driver = {
+ .driver = {
+ .name = "inspur-ipsps",
+ .of_match_table = of_match_ptr(ipsps_of_match),
+ },
+ .probe = ipsps_probe,
+ .remove = pmbus_do_remove,
+ .id_table = ipsps_id,
+};
+
+module_i2c_driver(ipsps_driver);
+
+MODULE_AUTHOR("John Wang");
+MODULE_DESCRIPTION("PMBus driver for Inspur Power System power supplies");
+MODULE_LICENSE("GPL");
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
-#define MAX37185_NUM_FAN_PAGES 6
-
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/pmbus.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include "pmbus.h"
};
module_platform_driver(rpi_hwmon_driver);
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:raspberrypi-hwmon");
static const unsigned char shtc1_cmd_measure_nonblocking_lpm[] = { 0x60, 0x9c };
/* command for reading the ID register */
-static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
+static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
-/* constants for reading the ID register */
-#define SHTC1_ID 0x07
-#define SHTC1_ID_REG_MASK 0x1f
+/*
+ * constants for reading the ID register
+ * SHTC1: 0x0007 with mask 0x003f
+ * SHTW1: 0x0007 with mask 0x003f
+ * SHTC3: 0x0807 with mask 0x083f
+ */
+#define SHTC3_ID 0x0807
+#define SHTC3_ID_MASK 0x083f
+#define SHTC1_ID 0x0007
+#define SHTC1_ID_MASK 0x003f
/* delays for non-blocking i2c commands, both in us */
#define SHTC1_NONBLOCKING_WAIT_TIME_HPM 14400
#define SHTC1_NONBLOCKING_WAIT_TIME_LPM 1000
+#define SHTC3_NONBLOCKING_WAIT_TIME_HPM 12100
+#define SHTC3_NONBLOCKING_WAIT_TIME_LPM 800
#define SHTC1_CMD_LENGTH 2
#define SHTC1_RESPONSE_LENGTH 6
+enum shtcx_chips {
+ shtc1,
+ shtc3,
+};
+
struct shtc1_data {
struct i2c_client *client;
struct mutex update_lock;
unsigned int nonblocking_wait_time; /* in us */
struct shtc1_platform_data setup;
+ enum shtcx_chips chip;
int temperature; /* 1000 * temperature in dgr C */
int humidity; /* 1000 * relative humidity in %RH */
data->command = data->setup.blocking_io ?
shtc1_cmd_measure_blocking_hpm :
shtc1_cmd_measure_nonblocking_hpm;
- data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_HPM;
-
+ data->nonblocking_wait_time = (data->chip == shtc1) ?
+ SHTC1_NONBLOCKING_WAIT_TIME_HPM :
+ SHTC3_NONBLOCKING_WAIT_TIME_HPM;
} else {
data->command = data->setup.blocking_io ?
shtc1_cmd_measure_blocking_lpm :
shtc1_cmd_measure_nonblocking_lpm;
- data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_LPM;
+ data->nonblocking_wait_time = (data->chip == shtc1) ?
+ SHTC1_NONBLOCKING_WAIT_TIME_LPM :
+ SHTC3_NONBLOCKING_WAIT_TIME_LPM;
}
}
const struct i2c_device_id *id)
{
int ret;
- char id_reg[2];
+ u16 id_reg;
+ char id_reg_buf[2];
struct shtc1_data *data;
struct device *hwmon_dev;
+ enum shtcx_chips chip = id->driver_data;
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
dev_err(dev, "could not send read_id_reg command: %d\n", ret);
return ret < 0 ? ret : -ENODEV;
}
- ret = i2c_master_recv(client, id_reg, sizeof(id_reg));
- if (ret != sizeof(id_reg)) {
+ ret = i2c_master_recv(client, id_reg_buf, sizeof(id_reg_buf));
+ if (ret != sizeof(id_reg_buf)) {
dev_err(dev, "could not read ID register: %d\n", ret);
return -ENODEV;
}
- if ((id_reg[1] & SHTC1_ID_REG_MASK) != SHTC1_ID) {
- dev_err(dev, "ID register doesn't match\n");
+
+ id_reg = be16_to_cpup((__be16 *)id_reg_buf);
+ if (chip == shtc3) {
+ if ((id_reg & SHTC3_ID_MASK) != SHTC3_ID) {
+ dev_err(dev, "SHTC3 ID register does not match\n");
+ return -ENODEV;
+ }
+ } else if ((id_reg & SHTC1_ID_MASK) != SHTC1_ID) {
+ dev_err(dev, "SHTC1 ID register does not match\n");
return -ENODEV;
}
data->setup.blocking_io = false;
data->setup.high_precision = true;
data->client = client;
+ data->chip = chip;
if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data;
/* device ID table */
static const struct i2c_device_id shtc1_id[] = {
- { "shtc1", 0 },
- { "shtw1", 0 },
+ { "shtc1", shtc1 },
+ { "shtw1", shtc1 },
+ { "shtc3", shtc3 },
{ }
};
MODULE_DEVICE_TABLE(i2c, shtc1_id);
data->client = client;
data->type = id->driver_data;
- data->cmdreg = i2c_new_dummy(adapter, (client->addr & ~SMM665_REGMASK)
+ data->cmdreg = i2c_new_dummy_device(adapter, (client->addr & ~SMM665_REGMASK)
| SMM665_CMDREG_BASE);
- if (!data->cmdreg)
- return -ENOMEM;
+ if (IS_ERR(data->cmdreg))
+ return PTR_ERR(data->cmdreg);
switch (data->type) {
case smm465:
}
for (i = 0; i < num_sc; i++) {
- data->lm75[i] = i2c_new_dummy(adapter, sc_addr[i]);
- if (!data->lm75[i]) {
+ data->lm75[i] = i2c_new_dummy_device(adapter, sc_addr[i]);
+ if (IS_ERR(data->lm75[i])) {
dev_err(&new_client->dev,
"Subclient %d registration at address 0x%x failed.\n",
i, sc_addr[i]);
- err = -ENOMEM;
+ err = PTR_ERR(data->lm75[i]);
if (i == 1)
goto ERROR_SC_3;
goto ERROR_SC_2;
struct i2c_adapter *adapter = client->adapter;
struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr;
- int i, id, err;
+ int i, id;
u8 val;
id = i2c_adapter_id(adapter);
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
- err = -ENODEV;
- goto error_sc_0;
+ return -ENODEV;
}
}
w83791d_write(client, W83791D_REG_I2C_SUBADDR,
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
if (!(val & 0x08))
- data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
+ data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + (val & 0x7));
if (!(val & 0x80)) {
- if ((data->lm75[0] != NULL) &&
+ if (!IS_ERR(data->lm75[0]) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclient\n",
data->lm75[0]->addr);
- err = -ENODEV;
- goto error_sc_1;
+ return -ENODEV;
}
- data->lm75[1] = i2c_new_dummy(adapter,
- 0x48 + ((val >> 4) & 0x7));
+ data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + ((val >> 4) & 0x7));
}
return 0;
-
-/* Undo inits in case of errors */
-
-error_sc_1:
- i2c_unregister_device(data->lm75[0]);
-error_sc_0:
- return err;
}
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &w83791d_group);
if (err)
- goto error3;
+ return err;
/* Check if pins of fan/pwm 4-5 are in use as GPIO */
has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10;
sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45);
error4:
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
-error3:
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
return err;
}
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
-
return 0;
}
static int
w83792d_detect_subclients(struct i2c_client *new_client)
{
- int i, id, err;
+ int i, id;
int address = new_client->addr;
u8 val;
struct i2c_adapter *adapter = new_client->adapter;
dev_err(&new_client->dev,
"invalid subclient address %d; must be 0x48-0x4f\n",
force_subclients[i]);
- err = -ENODEV;
- goto ERROR_SC_0;
+ return -ENODEV;
}
}
w83792d_write_value(new_client, W83792D_REG_I2C_SUBADDR,
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
if (!(val & 0x08))
- data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
+ data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
+ 0x48 + (val & 0x7));
if (!(val & 0x80)) {
- if ((data->lm75[0] != NULL) &&
+ if (!IS_ERR(data->lm75[0]) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&new_client->dev,
"duplicate addresses 0x%x, use force_subclient\n",
data->lm75[0]->addr);
- err = -ENODEV;
- goto ERROR_SC_1;
+ return -ENODEV;
}
- data->lm75[1] = i2c_new_dummy(adapter,
- 0x48 + ((val >> 4) & 0x7));
+ data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
+ 0x48 + ((val >> 4) & 0x7));
}
return 0;
-
-/* Undo inits in case of errors */
-
-ERROR_SC_1:
- i2c_unregister_device(data->lm75[0]);
-ERROR_SC_0:
- return err;
}
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
/* Register sysfs hooks */
err = sysfs_create_group(&dev->kobj, &w83792d_group);
if (err)
- goto exit_i2c_unregister;
+ return err;
/*
* Read GPIO enable register to check if pins for fan 4,5 are used as
sysfs_remove_group(&dev->kobj, &w83792d_group);
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
-exit_i2c_unregister:
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
return err;
}
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
-
return 0;
}
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
-
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
static int
w83793_detect_subclients(struct i2c_client *client)
{
- int i, id, err;
+ int i, id;
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
- err = -EINVAL;
- goto ERROR_SC_0;
+ return -EINVAL;
}
}
w83793_write_value(client, W83793_REG_I2C_SUBADDR,
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
if (!(tmp & 0x08))
- data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
+ data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + (tmp & 0x7));
if (!(tmp & 0x80)) {
- if ((data->lm75[0] != NULL)
+ if (!IS_ERR(data->lm75[0])
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclients\n", data->lm75[0]->addr);
- err = -ENODEV;
- goto ERROR_SC_1;
+ return -ENODEV;
}
- data->lm75[1] = i2c_new_dummy(adapter,
- 0x48 + ((tmp >> 4) & 0x7));
+ data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + ((tmp >> 4) & 0x7));
}
return 0;
-
- /* Undo inits in case of errors */
-
-ERROR_SC_1:
- i2c_unregister_device(data->lm75[0]);
-ERROR_SC_0:
- return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
-
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
*
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Lewisburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
+ .driver_data = (kernel_ulong_t)0,
+ },
{
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Tiger Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub PTI output data structures
*
err:
put_device(&src->dev);
- kfree(src);
return err;
}
static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
- u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ u32 val;
+
+ /* We do not support the SMBUS Quick command */
+ val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
if (adap->algo->reg_slave)
val |= I2C_FUNC_SLAVE;
dev->disable_int(dev);
dev->disable(dev);
+ synchronize_irq(dev->irq);
dev->slave = NULL;
pm_runtime_put(dev->dev);
struct completion msg_done;
struct clk *sclk;
struct i2c_client *slave;
+ int irq;
};
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
writeb(0, priv->base + I2C_OFS_SVA0);
+ /*
+ * Wait for interrupt to finish. New slave irqs cannot happen because we
+ * cleared the slave address and, thus, only extension codes will be
+ * detected which do not use the slave ptr.
+ */
+ synchronize_irq(priv->irq);
priv->slave = NULL;
return 0;
{
struct em_i2c_device *priv;
struct resource *r;
- int irq, ret;
+ int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
em_i2c_reset(&priv->adap);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
goto err_clk;
if (ret)
goto err_clk;
- dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
+ priv->irq);
return 0;
int i;
status = acpi_get_object_info(obj_handle, &info);
- if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID))
+ if (ACPI_FAILURE(status))
return AE_OK;
+ if (!(info->valid & ACPI_VALID_HID))
+ goto smo88xx_not_found;
+
hid = info->hardware_id.string;
if (!hid)
- return AE_OK;
+ goto smo88xx_not_found;
i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
if (i < 0)
- return AE_OK;
+ goto smo88xx_not_found;
+
+ kfree(info);
*((bool *)return_value) = true;
return AE_CTRL_TERMINATE;
+
+smo88xx_not_found:
+ kfree(info);
+ return AE_OK;
}
static bool is_dell_system_with_lis3lv02d(void)
}
/* Functions for DMA support */
-static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
- dma_addr_t phy_addr)
+static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+ dma_addr_t phy_addr)
{
struct imx_i2c_dma *dma;
struct dma_slave_config dma_sconfig;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return -ENOMEM;
+ return;
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
- return 0;
+ return;
fail_rx:
dma_release_channel(dma->chan_rx);
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
- /* return successfully if there is no dma support */
- return ret == -ENODEV ? 0 : ret;
}
static void i2c_imx_dma_callback(void *arg)
dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
i2c_imx->adapter.name);
+ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
/* Init DMA config if supported */
- ret = i2c_imx_dma_request(i2c_imx, phy_addr);
- if (ret < 0)
- goto del_adapter;
+ i2c_imx_dma_request(i2c_imx, phy_addr);
- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
-del_adapter:
- i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
rpm_disable:
.max_num_msgs = 255,
};
+static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct mtk_i2c_compatible mt2712_compat = {
.regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
};
static const struct mtk_i2c_compatible mt8183_compat = {
+ .quirks = &mt8183_i2c_quirks,
.regs = mt_i2c_regs_v2,
.pmic_i2c = 0,
.dcm = 0,
static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+ return I2C_FUNC_I2C |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ else
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm mtk_i2c_algorithm = {
#define SB800_PIIX4_PORT_IDX_MASK 0x06
#define SB800_PIIX4_PORT_IDX_SHIFT 1
-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
/* Find which register is used for port selection */
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
- switch (PIIX4_dev->device) {
- case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+ if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+ (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ PIIX4_dev->revision >= 0x1F)) {
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
- break;
- case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
- default:
+ } else {
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
- break;
}
} else {
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
enum dma_data_direction dma_direction;
struct reset_control *rstc;
+ int irq;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
WARN_ON(!priv->slave);
+ /* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
+ synchronize_irq(priv->irq);
priv->slave = NULL;
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
struct i2c_timings i2c_t;
- int irq, ret;
+ int ret;
/* Otherwise logic will break because some bytes must always use PIO */
BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
pm_runtime_put(dev);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
if (ret < 0) {
- dev_err(dev, "cannot get irq %d\n", irq);
+ dev_err(dev, "cannot get irq %d\n", priv->irq);
goto out_pm_disable;
}
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* i2c-stm32.h
*
*/
void i2c_unregister_device(struct i2c_client *client)
{
- if (!client)
+ if (IS_ERR_OR_NULL(client))
return;
if (client->dev.of_node) {
config TI_ADS1015
tristate "Texas Instruments ADS1015 ADC"
- depends on I2C && !SENSORS_ADS1015
+ depends on I2C
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
if (ret)
return ret;
- regval = ret & MAX9611_TEMP_MASK;
+ regval &= MAX9611_TEMP_MASK;
if ((regval > MAX9611_TEMP_MAX_POS &&
regval < MAX9611_TEMP_MIN_NEG) ||
st->buf[0] = st->integer >> 8;
st->buf[1] = 0x40; /* REG12 default */
st->buf[2] = 0x00;
- st->buf[3] = st->fract2 & 0xFF;
- st->buf[4] = st->fract2 >> 7;
- st->buf[5] = st->fract2 >> 15;
+ st->buf[3] = st->fract1 & 0xFF;
+ st->buf[4] = st->fract1 >> 8;
+ st->buf[5] = st->fract1 >> 16;
st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
- ADF4371_FRAC1WORD(st->fract1 >> 23);
+ ADF4371_FRAC1WORD(st->fract1 >> 24);
st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
st->buf[8] = st->mod2 & 0xFF;
st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
if (ret)
goto err;
- cma_configfs_init();
+ ret = cma_configfs_init();
+ if (ret)
+ goto err_ib;
return 0;
+err_ib:
+ ib_unregister_client(&cma_client);
err:
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
int ret;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
mutex_lock(&port_counter->lock);
if (on) {
ret = __counter_set_mode(&port_counter->mode,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res))
+ if (!rdma_is_visible_in_pid_ns(&qp->res))
return false;
- /* Ensure that counter belong to right PID */
- if (!rdma_is_kernel_res(&counter->res) &&
- !rdma_is_kernel_res(&qp->res) &&
- (task_pid_vnr(counter->res.task) != current->pid))
+ /* Ensure that counter belongs to the right PID */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
return qp;
err:
- rdma_restrack_put(&qp->res);
+ rdma_restrack_put(res);
return NULL;
}
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
+ if (!dev->port_data[port].port_counter.hstats)
+ return -EOPNOTSUPP;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i])
continue;
- curr = rdma_restrack_count(device, i,
- task_active_pid_ns(current));
+ curr = rdma_restrack_count(device, i);
ret = fill_res_info_entry(msg, names[i], curr);
if (ret)
goto err;
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
nlmsg_end(msg, nlh);
ib_device_put(device);
* rdma_restrack_count() - the current usage of specific object
* @dev: IB device
* @type: actual type of object to operate
- * @ns: PID namespace
*/
-int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
- struct pid_namespace *ns)
+int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
{
struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e;
xa_lock(&rt->xa);
xas_for_each(&xas, e, U32_MAX) {
- if (ns == &init_pid_ns ||
- (!rdma_is_kernel_res(e) &&
- ns == task_active_pid_ns(e->task)))
- cnt++;
+ if (!rdma_is_visible_in_pid_ns(e))
+ continue;
+ cnt++;
}
xa_unlock(&rt->xa);
return cnt;
*/
if (rdma_is_kernel_res(res))
return task_active_pid_ns(current) == &init_pid_ns;
- return task_active_pid_ns(current) == task_active_pid_ns(res->task);
+
+ /* PID 0 means that resource is not found in current namespace */
+ return task_pid_vnr(res->task);
}
int ib_umem_page_count(struct ib_umem *umem)
{
- int i;
- int n;
+ int i, n = 0;
struct scatterlist *sg;
- if (umem->is_odp)
- return ib_umem_num_pages(umem);
-
- n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
n += sg_dma_len(sg) >> PAGE_SHIFT;
* prevent any further fault handling on this MR.
*/
ib_umem_notifier_start_account(umem_odp);
- umem_odp->dying = 1;
- /* Make sure that the fact the umem is dying is out before we release
- * all pending page faults. */
- smp_wmb();
complete_all(&umem_odp->notifier_completion);
umem_odp->umem.context->invalidate_range(
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EBUSY;
}
+
+ size = req->cmd_size;
+ /* change the cmd_size to the number of 16byte cmdq unit.
+ * req->cmd_size is modified here
+ */
+ bnxt_qplib_set_cmd_slots(req);
+
memset(resp, 0, sizeof(*resp));
crsqe->resp = (struct creq_qp_event *)resp;
crsqe->resp->cookie = req->cookie;
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
- size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
do { \
memset(&(req), 0, sizeof((req))); \
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
- (req).cmd_size = (sizeof((req)) + \
- BNXT_QPLIB_CMDQE_UNITS - 1) / \
- BNXT_QPLIB_CMDQE_UNITS; \
+ (req).cmd_size = sizeof((req)); \
(req).flags = cpu_to_le16(cmd_flags); \
} while (0)
BNXT_QPLIB_CMDQE_UNITS);
}
+/* Set the cmd_size to a factor of CMDQE unit */
+static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
+{
+ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+}
+
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
if (!data)
return -ENOMEM;
copy = min(len, datalen - 1);
- if (copy_from_user(data, buf, copy))
- return -EFAULT;
+ if (copy_from_user(data, buf, copy)) {
+ ret = -EFAULT;
+ goto free_data;
+ }
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
- return ret;
+ goto free_data;
ptr = data;
token = ptr;
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
ret = len;
debugfs_file_put(file->f_path.dentry);
+free_data:
kfree(data);
return ret;
}
return -ENOMEM;
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
- return ret;
+ goto free_data;
bit = find_first_bit(fault->opcodes, bitsize);
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
data[size - 1] = '\n';
data[size] = '\0';
ret = simple_read_from_buffer(buf, len, pos, data, size);
+free_data:
kfree(data);
return ret;
}
hfi1_kern_clear_hw_flow(priv->rcd, qp);
}
-static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet *packet, u8 rcv_type,
- u8 opcode)
+static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
{
struct rvt_qp *qp = packet->qp;
- struct hfi1_qp_priv *qpriv = qp->priv;
- u32 ipsn;
- struct ib_other_headers *ohdr = packet->ohdr;
- struct rvt_ack_entry *e;
- struct tid_rdma_request *req;
- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
- u32 i;
if (rcv_type >= RHF_RCV_TYPE_IB)
goto done;
if (rcv_type == RHF_RCV_TYPE_EAGER) {
hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
- goto done_unlock;
- }
-
- /*
- * For TID READ response, error out QP after freeing the tid
- * resources.
- */
- if (opcode == TID_OP(READ_RESP)) {
- ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
- if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
- cmp_psn(ipsn, qp->s_psn) < 0) {
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto done;
- }
- goto done_unlock;
- }
-
- /*
- * Error out the qp for TID RDMA WRITE
- */
- hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
- for (i = 0; i < rvt_max_atomic(rdi); i++) {
- e = &qp->s_ack_queue[i];
- if (e->opcode == TID_OP(WRITE_REQ)) {
- req = ack_to_tid_req(e);
- hfi1_kern_exp_rcv_clear_all(req);
- }
}
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
- goto done;
-done_unlock:
+ /* Since no payload is delivered, just drop the packet */
spin_unlock(&qp->s_lock);
done:
return true;
u32 fpsn;
lockdep_assert_held(&qp->r_lock);
+ spin_lock(&qp->s_lock);
/* If the psn is out of valid range, drop the packet */
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
cmp_psn(ibpsn, qp->s_psn) > 0)
- return ret;
+ goto s_unlock;
- spin_lock(&qp->s_lock);
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
wqe = do_rc_completion(qp, wqe, ibp);
if (qp->s_acked == qp->s_tail)
- break;
+ goto s_unlock;
}
+ if (qp->s_acked == qp->s_tail)
+ goto s_unlock;
+
/* Handle the eflags for the request */
if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
goto s_unlock;
if (lnh == HFI1_LRH_GRH)
goto r_unlock;
- if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
+ if (tid_rdma_tid_err(packet, rcv_type))
goto r_unlock;
}
*/
spin_lock(&qp->s_lock);
qpriv = qp->priv;
+ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
+ qpriv->r_tid_tail == qpriv->r_tid_head)
+ goto unlock;
e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ if (e->opcode != TID_OP(WRITE_REQ))
+ goto unlock;
req = ack_to_tid_req(e);
+ if (req->comp_seg == req->cur_seg)
+ goto unlock;
flow = &req->flows[req->clear_tail];
trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
struct rvt_swqe *wqe;
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
- u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
+ u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
unsigned long flags;
u16 fidx;
ack_kpsn--;
}
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_op_err;
+
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
/* Drop stale ACK/NAK */
- if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
+ cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
goto ack_op_err;
while (cmp_psn(ack_kpsn,
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
IB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
+ if (!req->flows)
+ break;
flow = &req->flows[req->acked_tail];
+ flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+ if (cmp_psn(psn, flpsn) > 0)
+ break;
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
flow);
req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
- kfree(tun_qp->tx_ring);
- tun_qp->tx_ring = NULL;
i = MLX4_NUM_TUNNEL_BUFS;
err:
while (i > 0) {
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
+ kfree(tun_qp->tx_ring);
+ tun_qp->tx_ring = NULL;
kfree(tun_qp->ring);
tun_qp->ring = NULL;
return -ENOMEM;
event_sub->eventfd =
eventfd_ctx_fdget(redirect_fd);
- if (IS_ERR(event_sub)) {
+ if (IS_ERR(event_sub->eventfd)) {
err = PTR_ERR(event_sub->eventfd);
event_sub->eventfd = NULL;
goto err;
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
- mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
/* delete the subscriptions which are related to this FD */
list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) {
- devx_cleanup_subscription(ev_file->dev, event_sub);
+ devx_cleanup_subscription(dev, event_sub);
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
kfree_rcu(event_sub, rcu);
}
- mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
}
uverbs_close_fd(filp);
- put_device(&ev_file->dev->ib_dev.dev);
+ put_device(&dev->ib_dev.dev);
return 0;
}
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- if (MLX5_CAP_GEN(mdev, pg))
+ if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
}
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
+ mlx5_ib_internal_fill_odp_caps(dev);
+
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
{
- mlx5_ib_internal_fill_odp_caps(dev);
-
return mlx5_ib_odp_init_one(dev);
}
int entry;
if (umem->is_odp) {
- unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
+ struct ib_umem_odp *odp = to_ib_umem_odp(umem);
+ unsigned int page_shift = odp->page_shift;
- *ncont = ib_umem_page_count(umem);
+ *ncont = ib_umem_odp_num_pages(odp);
*count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift;
if (order)
bool dyn_bfreg);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+
+static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
+ bool do_modify_atomic)
+{
+ if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return false;
+
+ if (do_modify_atomic &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return false;
+
+ return true;
+}
#endif /* MLX5_IB_H */
if (err < 0)
return ERR_PTR(err);
- use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
- (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
- !MLX5_CAP_GEN(dev->mdev, atomic));
+ use_umr = mlx5_ib_can_use_umr(dev, true);
if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
goto err;
}
- if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
+ if (!mlx5_ib_can_use_umr(dev, true) ||
+ (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
*/
memset(caps, 0, sizeof(*caps));
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!MLX5_CAP_GEN(dev->mdev, pg) ||
+ !mlx5_ib_can_use_umr(dev, true))
return;
caps->general_caps = IB_ODP_SUPPORT;
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
- MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
return;
u32 flags)
{
int npages = 0, current_seq, page_shift, ret, np;
- bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
- implicit = true;
} else {
odp = odp_mr;
}
out:
if (ret == -EAGAIN) {
- if (implicit || !odp->dying) {
- unsigned long timeout =
- msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(
- &odp->notifier_completion,
- timeout)) {
- mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
- current_seq, odp->notifiers_seq, odp->notifiers_count);
- }
- } else {
- /* The MR is being killed, kill the QP as well. */
- ret = -EFAULT;
+ unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(&odp->notifier_completion,
+ timeout)) {
+ mlx5_ib_warn(
+ dev,
+ "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+ current_seq, odp->notifiers_seq,
+ odp->notifiers_count);
}
}
{
int ret = 0;
- if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
- ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return ret;
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
}
}
- if (!MLX5_CAP_GEN(dev->mdev, pg))
- return ret;
-
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
return ret;
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return;
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
MLX5_IB_UMR_OCTOWORD;
}
-static __be64 frwr_mkey_mask(void)
+static __be64 frwr_mkey_mask(bool atomic)
{
u64 result;
MLX5_MKEY_MASK_LW |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_SMALL_FENCE |
MLX5_MKEY_MASK_FREE;
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
return cpu_to_be64(result);
}
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, u8 flags)
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
umr->flags = flags;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->mkey_mask = frwr_mkey_mask();
+ umr->mkey_mask = frwr_mkey_mask(atomic);
}
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
+ if (!mlx5_ib_can_use_umr(dev, atomic)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Fast update of %s for MR is disabled\n",
+ (MLX5_CAP_GEN(dev->mdev,
+ umr_modify_entity_size_disabled)) ?
+ "entity size" :
+ "atomic access");
+ return -EINVAL;
+ }
+
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
"Invalid IB_SEND_INLINE send flag\n");
if (umr_inline)
flags |= MLX5_UMR_INLINE;
- set_reg_umr_seg(*seg, mr, flags);
+ set_reg_umr_seg(*seg, mr, flags, atomic);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
- depends on INET && INFINIBAND && LIBCRC32C && 64BIT
+ depends on INET && INFINIBAND && LIBCRC32C
select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
};
struct siw_pble {
- u64 addr; /* Address of assigned user buffer */
- u64 size; /* Size of this entry */
- u64 pbl_off; /* Total offset from start of PBL */
+ dma_addr_t addr; /* Address of assigned buffer */
+ unsigned int size; /* Size of this entry */
+ unsigned long pbl_off; /* Total offset from start of PBL */
};
struct siw_pbl {
struct siw_cq {
struct ib_cq base_cq;
spinlock_t lock;
- u64 *notify;
+ struct siw_cq_ctrl *notify;
struct siw_cqe *queue;
u32 cq_put;
u32 cq_get;
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
getname_local(cep->sock, &event.local_addr);
getname_peer(cep->sock, &event.remote_addr);
}
- siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n",
- cep->qp ? qp_id(cep->qp) : -1, id, reason, status);
+ siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
+ cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
return id->event_handler(id, &event);
}
siw_cep_get(new_cep);
new_s->sk->sk_user_data = new_cep;
- siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
-
if (siw_tcp_nagle == false) {
int val = 1;
cep = work->cep;
siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
- cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state);
+ cep->qp ? qp_id(cep->qp) : UINT_MAX,
+ work->type, cep->state);
siw_cep_set_inuse(cep);
}
if (release_cep) {
siw_dbg_cep(cep,
- "release: timer=%s, QP[%u], id 0x%p\n",
+ "release: timer=%s, QP[%u]\n",
cep->mpa_timer ? "y" : "n",
- cep->qp ? qp_id(cep->qp) : -1, cep->cm_id);
+ cep->qp ? qp_id(cep->qp) : UINT_MAX);
siw_cancel_mpatimer(cep);
else
delay = MPAREP_TIMEOUT;
}
- siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n",
- cep->qp ? qp_id(cep->qp) : -1, type, work, delay);
+ siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
+ cep->qp ? qp_id(cep->qp) : -1, type, delay);
queue_delayed_work(siw_cm_wq, &work->work, delay);
}
if (v4)
siw_dbg_qp(qp,
- "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- id, pd_len,
+ "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
+ pd_len,
&((struct sockaddr_in *)(laddr))->sin_addr,
ntohs(((struct sockaddr_in *)(laddr))->sin_port),
&((struct sockaddr_in *)(raddr))->sin_addr,
ntohs(((struct sockaddr_in *)(raddr))->sin_port));
else
siw_dbg_qp(qp,
- "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- id, pd_len,
+ "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
+ pd_len,
&((struct sockaddr_in6 *)(laddr))->sin6_addr,
ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
&((struct sockaddr_in6 *)(raddr))->sin6_addr,
if (rv >= 0) {
rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
if (!rv) {
- siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id,
- qp_id(qp));
+ siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
siw_cep_set_free(cep);
return 0;
}
}
error:
- siw_dbg_qp(qp, "failed: %d\n", rv);
+ siw_dbg(id->device, "failed: %d\n", rv);
if (cep) {
siw_socket_disassoc(s);
} else if (s) {
sock_release(s);
}
- siw_qp_put(qp);
+ if (qp)
+ siw_qp_put(qp);
return rv;
}
siw_cancel_mpatimer(cep);
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
- siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
+ siw_dbg_cep(cep, "out of state\n");
siw_cep_set_free(cep);
siw_cep_put(cep);
up_write(&qp->state_lock);
goto error;
}
- siw_dbg_cep(cep, "id 0x%p\n", id);
+ siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
siw_dbg_cep(cep, "peer allows GSO on TX\n");
params->ird > sdev->attrs.max_ird) {
siw_dbg_cep(
cep,
- "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n",
- id, qp_id(qp), params->ord, sdev->attrs.max_ord,
+ "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
+ qp_id(qp), params->ord, sdev->attrs.max_ord,
params->ird, sdev->attrs.max_ird);
rv = -EINVAL;
up_write(&qp->state_lock);
if (params->private_data_len > max_priv_data) {
siw_dbg_cep(
cep,
- "id 0x%p, [QP %u]: private data length: %d (max %d)\n",
- id, qp_id(qp), params->private_data_len, max_priv_data);
+ "[QP %u]: private data length: %d (max %d)\n",
+ qp_id(qp), params->private_data_len, max_priv_data);
rv = -EINVAL;
up_write(&qp->state_lock);
goto error;
qp_attrs.flags = SIW_MPA_CRC;
qp_attrs.state = SIW_QP_STATE_RTS;
- siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp));
+ siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
/* Associate QP with CEP */
siw_cep_get(cep);
if (rv)
goto error;
- siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n",
- id, qp_id(qp), params->private_data_len);
+ siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
+ qp_id(qp), params->private_data_len);
rv = siw_send_mpareqrep(cep, params->private_data,
params->private_data_len);
siw_cancel_mpatimer(cep);
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
- siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
+ siw_dbg_cep(cep, "out of state\n");
siw_cep_set_free(cep);
siw_cep_put(cep); /* put last reference */
return -ECONNRESET;
}
- siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state,
+ siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
pd_len);
if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
sizeof(s_val));
if (rv) {
- siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv);
+ siw_dbg(id->device, "setsockopt error: %d\n", rv);
goto error;
}
rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6));
if (rv) {
- siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv);
+ siw_dbg(id->device, "socket bind error: %d\n", rv);
goto error;
}
cep = siw_cep_alloc(sdev);
rv = siw_cm_alloc_work(cep, backlog);
if (rv) {
siw_dbg(id->device,
- "id 0x%p: alloc_work error %d, backlog %d\n", id,
+ "alloc_work error %d, backlog %d\n",
rv, backlog);
goto error;
}
rv = s->ops->listen(s, backlog);
if (rv) {
- siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv);
+ siw_dbg(id->device, "listen error %d\n", rv);
goto error;
}
cep->cm_id = id;
list_del(p);
- siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id,
- cep->state);
+ siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
siw_cep_set_inuse(cep);
struct net_device *dev = to_siw_dev(id->device)->netdev;
int rv = 0, listeners = 0;
- siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog);
+ siw_dbg(id->device, "backlog %d\n", backlog);
/*
* For each attached address of the interface, create a
struct sockaddr_in s_laddr, *s_raddr;
const struct in_ifaddr *ifa;
+ if (!in_dev) {
+ rv = -ENODEV;
+ goto out;
+ }
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
s_raddr = (struct sockaddr_in *)&id->remote_addr;
siw_dbg(id->device,
- "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n",
- id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
+ "laddr %pI4:%d, raddr %pI4:%d\n",
+ &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
&s_raddr->sin_addr, ntohs(s_raddr->sin_port));
rtnl_lock();
struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
*s_raddr = &to_sockaddr_in6(id->remote_addr);
+ if (!in6_dev) {
+ rv = -ENODEV;
+ goto out;
+ }
siw_dbg(id->device,
- "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n",
- id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
+ "laddr %pI6:%d, raddr %pI6:%d\n",
+ &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
&s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
- read_lock_bh(&in6_dev->lock);
+ rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- struct sockaddr_in6 bind_addr;
-
+ if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
+ continue;
if (ipv6_addr_any(&s_laddr->sin6_addr) ||
ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
- bind_addr.sin6_family = AF_INET6;
- bind_addr.sin6_port = s_laddr->sin6_port;
- bind_addr.sin6_flowinfo = 0;
- bind_addr.sin6_addr = ifp->addr;
- bind_addr.sin6_scope_id = dev->ifindex;
+ struct sockaddr_in6 bind_addr = {
+ .sin6_family = AF_INET6,
+ .sin6_port = s_laddr->sin6_port,
+ .sin6_flowinfo = 0,
+ .sin6_addr = ifp->addr,
+ .sin6_scope_id = dev->ifindex };
rv = siw_listen_address(id, backlog,
(struct sockaddr *)&bind_addr,
listeners++;
}
}
- read_unlock_bh(&in6_dev->lock);
-
+ rtnl_unlock();
in6_dev_put(in6_dev);
} else {
- return -EAFNOSUPPORT;
+ rv = -EAFNOSUPPORT;
}
+out:
if (listeners)
rv = 0;
else if (!rv)
rv = -EINVAL;
- siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK");
+ siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
return rv;
}
int siw_destroy_listen(struct iw_cm_id *id)
{
- siw_dbg(id->device, "id 0x%p\n", id);
-
if (!id->provider_data) {
- siw_dbg(id->device, "id 0x%p: no cep(s)\n", id);
+ siw_dbg(id->device, "no cep(s)\n");
return 0;
}
siw_drop_listeners(id);
wc->wc_flags = IB_WC_WITH_INVALIDATE;
}
wc->qp = cqe->base_qp;
- siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n",
+ siw_dbg_cq(cq,
+ "idx %u, type %d, flags %2x, id 0x%pK\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
- cqe->flags, (void *)cqe->id);
+ cqe->flags, (void *)(uintptr_t)cqe->id);
}
WRITE_ONCE(cqe->flags, 0);
cq->cq_get++;
out_err:
siw_cpu_info.num_nodes = 0;
- while (i) {
+ while (--i >= 0)
kfree(siw_cpu_info.tx_valid_cpus[i]);
- siw_cpu_info.tx_valid_cpus[i--] = NULL;
- }
kfree(siw_cpu_info.tx_valid_cpus);
siw_cpu_info.tx_valid_cpus = NULL;
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n",
- (unsigned long long)addr,
- (unsigned long long)(addr + len));
- siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n",
- (unsigned long long)mem->va,
- (unsigned long long)(mem->va + mem->len),
+ siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ (void *)(uintptr_t)addr,
+ (void *)(uintptr_t)(addr + len));
+ siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ (void *)(uintptr_t)mem->va,
+ (void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
return -E_BASE_BOUNDS;
* Optionally, provides remaining len within current element, and
* current PBL index for later resume at same element.
*/
-u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
{
int i = idx ? *idx : 0;
struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
void siw_umem_release(struct siw_umem *umem, bool dirty);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
-u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
rv = -EINVAL;
goto out;
}
- wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
wqe->sqe.sge[0].lkey = 0;
wqe->sqe.num_sge = 1;
}
*/
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
{
- u64 cq_notify;
+ u32 cq_notify;
if (!cq->base_cq.comp_handler)
return false;
- cq_notify = READ_ONCE(*cq->notify);
+ /* Read application shared notification state */
+ cq_notify = READ_ONCE(cq->notify->flags);
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
((cq_notify & SIW_NOTIFY_SOLICITED) &&
(flags & SIW_WQE_SOLICITED))) {
- /* dis-arm CQ */
- smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
+ /*
+ * CQ notification is one-shot: Since the
+ * current CQE causes user notification,
+ * the CQ gets dis-aremd and must be re-aremd
+ * by the user for a new notification.
+ */
+ WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
return true;
}
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
__func__, qp_id(rx_qp(srx)),
- (void *)dest_addr, (void *)umem->fp_addr);
+ (void *)(uintptr_t)dest_addr,
+ (void *)(uintptr_t)umem->fp_addr);
/* siw internal error */
srx->skb_copied += copied;
srx->skb_new -= copied;
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
while (len) {
int bytes;
- u64 buf_addr =
+ dma_addr_t buf_addr =
siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
if (!buf_addr)
break;
mem_p = *mem;
if (mem_p->mem_obj == NULL)
rv = siw_rx_kva(srx,
- (void *)(sge->laddr + frx->sge_off),
- sge_bytes);
+ (void *)(uintptr_t)(sge->laddr + frx->sge_off),
+ sge_bytes);
else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem,
sge->laddr + frx->sge_off, sge_bytes);
if (mem->mem_obj == NULL)
rv = siw_rx_kva(srx,
- (void *)(srx->ddp_to + srx->fpdu_part_rcvd),
- bytes);
+ (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
+ bytes);
else if (!mem->is_pbl)
rv = siw_rx_umem(srx, mem->umem,
srx->ddp_to + srx->fpdu_part_rcvd, bytes);
bytes = min(srx->fpdu_part_rem, srx->skb_new);
if (mem_p->mem_obj == NULL)
- rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed),
- bytes);
+ rv = siw_rx_kva(srx,
+ (void *)(uintptr_t)(sge->laddr + wqe->processed),
+ bytes);
else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
bytes);
{
struct siw_pbl *pbl = mem->pbl;
u64 offset = addr - mem->va;
- u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
return virt_to_page(paddr);
/*
* Copy short payload at provided destination payload address
*/
-static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
+static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
{
struct siw_wqe *wqe = &c_tx->wqe_active;
struct siw_sge *sge = &wqe->sqe.sge[0];
return 0;
if (tx_flags(wqe) & SIW_WQE_INLINE) {
- memcpy((void *)paddr, &wqe->sqe.sge[1], bytes);
+ memcpy(paddr, &wqe->sqe.sge[1], bytes);
} else {
struct siw_mem *mem = wqe->mem[0];
if (!mem->mem_obj) {
/* Kernel client using kva */
- memcpy((void *)paddr, (void *)sge->laddr, bytes);
+ memcpy(paddr,
+ (const void *)(uintptr_t)sge->laddr, bytes);
} else if (c_tx->in_syscall) {
- if (copy_from_user((void *)paddr,
- (const void __user *)sge->laddr,
+ if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
bytes))
return -EFAULT;
} else {
buffer = kmap_atomic(p);
if (likely(PAGE_SIZE - off >= bytes)) {
- memcpy((void *)paddr, buffer + off, bytes);
+ memcpy(paddr, buffer + off, bytes);
kunmap_atomic(buffer);
} else {
unsigned long part = bytes - (PAGE_SIZE - off);
- memcpy((void *)paddr, buffer + off, part);
+ memcpy(paddr, buffer + off, part);
kunmap_atomic(buffer);
if (!mem->is_pbl)
return -EFAULT;
buffer = kmap_atomic(p);
- memcpy((void *)(paddr + part), buffer,
+ memcpy(paddr + part, buffer,
bytes - part);
kunmap_atomic(buffer);
}
c_tx->ctrl_len = sizeof(struct iwarp_send);
crc = (char *)&c_tx->pkt.send_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_SEND_REMOTE_INV:
c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
crc = (char *)&c_tx->pkt.send_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_WRITE:
c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
crc = (char *)&c_tx->pkt.write_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_READ_RESPONSE:
c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
crc = (char *)&c_tx->pkt.write_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
default:
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
-static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
+static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
{
- if (hdr_len) {
- ++pages;
- --num_maps;
- }
- while (num_maps-- > 0) {
- kunmap(*pages);
- pages++;
+ while (kmap_mask) {
+ if (kmap_mask & BIT(0))
+ kunmap(*pp);
+ pp++;
+ kmap_mask >>= 1;
}
}
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
pbl_idx = c_tx->pbl_idx;
+ unsigned long kmap_mask = 0L;
if (c_tx->state == SIW_SEND_HDR) {
if (c_tx->use_sendpage) {
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
mem = wqe->mem[sge_idx];
- if (!mem->mem_obj)
- is_kva = 1;
+ is_kva = mem->mem_obj == NULL ? 1 : 0;
} else {
is_kva = 1;
}
* tx from kernel virtual address: either inline data
* or memory region with assigned kernel buffer
*/
- iov[seg].iov_base = (void *)(sge->laddr + sge_off);
+ iov[seg].iov_base =
+ (void *)(uintptr_t)(sge->laddr + sge_off);
iov[seg].iov_len = sge_len;
if (do_crc)
p = siw_get_upage(mem->umem,
sge->laddr + sge_off);
if (unlikely(!p)) {
- if (hdr_len)
- seg--;
- if (!c_tx->use_sendpage && seg) {
- siw_unmap_pages(page_array,
- hdr_len, seg);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EFAULT;
goto done_crc;
if (!c_tx->use_sendpage) {
iov[seg].iov_base = kmap(p) + fp_off;
iov[seg].iov_len = plen;
+
+ /* Remember for later kunmap() */
+ kmap_mask |= BIT(seg);
+
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
page_address(p) + fp_off,
plen);
} else {
- u64 pa = ((sge->laddr + sge_off) & PAGE_MASK);
+ u64 va = sge->laddr + sge_off;
- page_array[seg] = virt_to_page(pa);
+ page_array[seg] = virt_to_page(va & PAGE_MASK);
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(sge->laddr + sge_off),
+ (void *)(uintptr_t)va,
plen);
}
if (++seg > (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
- if (!is_kva && !c_tx->use_sendpage) {
- siw_unmap_pages(page_array, hdr_len,
- seg - 1);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EMSGSIZE;
goto done_crc;
} else {
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
hdr_len + data_len + trl_len);
- if (!is_kva)
- siw_unmap_pages(page_array, hdr_len, seg);
+ siw_unmap_pages(page_array, kmap_mask);
}
if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */
rv = -EINVAL;
goto tx_error;
}
- wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].laddr =
+ (u64)(uintptr_t)&wqe->sqe.sge[1];
}
}
wqe->wr_status = SIW_WR_INPROGRESS;
static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
{
- struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr;
+ struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
struct siw_device *sdev = to_siw_dev(pd->device);
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
int rv = 0;
mem->stag = sqe->rkey;
mem->perms = sqe->access;
- siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n",
- mem->va, base_mr->iova);
+ siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
mem->va = base_mr->iova;
mem->stag_valid = 1;
out:
*/
qp->srq = to_siw_srq(attrs->srq);
qp->attrs.rq_size = 0;
- siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n",
- qp->qp_num, qp->srq);
+ siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
} else if (num_rqe) {
if (qp->kernel_verbs)
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
base_ucontext);
struct siw_qp_attrs qp_attrs;
- siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep);
+ siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
/*
* Mark QP as in process of destruction to prevent from
void *kbuf = &sqe->sge[1];
int num_sge = core_wr->num_sge, bytes = 0;
- sqe->sge[0].laddr = (u64)kbuf;
+ sqe->sge[0].laddr = (uintptr_t)kbuf;
sqe->sge[0].lkey = 0;
while (num_sge--) {
break;
case IB_WR_REG_MR:
- sqe->base_mr = (uint64_t)reg_wr(wr)->mr;
+ sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
sqe->rkey = reg_wr(wr)->key;
sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
sqe->opcode = SIW_OP_REG_MR;
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
- sqe->opcode, sqe->flags, (void *)sqe->id);
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ sqe->opcode, sqe->flags,
+ (void *)(uintptr_t)sqe->id);
if (unlikely(rv < 0))
break;
spin_lock_init(&cq->lock);
- cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
+ cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
if (udata) {
struct siw_uresp_create_cq uresp = {};
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- /* CQ event for next solicited completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
+ /*
+ * Enable CQ event for next solicited completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
else
- /* CQ event for any signalled completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
+ /*
+ * Enable CQ event for any signalled completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
return cq->cq_put - cq->cq_get;
unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
int rv;
- siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n",
- (unsigned long long)start, (unsigned long long)rnic_va,
+ siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
struct siw_mem *mem = mr->mem;
struct siw_pbl *pbl = mem->pbl;
struct siw_pble *pble;
- u64 pbl_size;
+ unsigned long pbl_size;
int i, rv;
if (!pbl) {
pbl_size += sg_dma_len(slp);
}
siw_dbg_mem(mem,
- "sge[%d], size %llu, addr 0x%016llx, total %llu\n",
- i, pble->size, pble->addr, pbl_size);
+ "sge[%d], size %u, addr 0x%p, total %lu\n",
+ i, pble->size, (void *)(uintptr_t)pble->addr,
+ pbl_size);
}
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
if (rv > 0) {
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%016llx, %u SLE to %u entries\n",
- mem->len, mem->va, num_sle, pbl->num_buf);
+ "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ mem->len, (void *)(uintptr_t)mem->va, num_sle,
+ pbl->num_buf);
}
return rv;
}
}
spin_lock_init(&srq->lock);
- siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq);
+ siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
return 0;
if (unlikely(!srq->kernel_verbs)) {
siw_dbg_pd(base_srq->pd,
- "[SRQ 0x%p]: no kernel post_recv for mapped srq\n",
- srq);
+ "[SRQ]: no kernel post_recv for mapped srq\n");
rv = -EINVAL;
goto out;
}
}
if (unlikely(wr->num_sge > srq->max_sge)) {
siw_dbg_pd(base_srq->pd,
- "[SRQ 0x%p]: too many sge's: %d\n", srq,
- wr->num_sge);
+ "[SRQ]: too many sge's: %d\n", wr->num_sge);
rv = -EINVAL;
break;
}
spin_unlock_irqrestore(&srq->lock, flags);
out:
if (unlikely(rv < 0)) {
- siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv);
+ siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
*bad_wr = wr;
}
return rv;
static void hv_kbd_on_channel_callback(void *context)
{
+ struct vmpacket_descriptor *desc;
struct hv_device *hv_dev = context;
- void *buffer;
- int bufferlen = 0x100; /* Start with sensible size */
u32 bytes_recvd;
u64 req_id;
- int error;
- buffer = kmalloc(bufferlen, GFP_ATOMIC);
- if (!buffer)
- return;
-
- while (1) {
- error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
- &bytes_recvd, &req_id);
- switch (error) {
- case 0:
- if (bytes_recvd == 0) {
- kfree(buffer);
- return;
- }
-
- hv_kbd_handle_received_packet(hv_dev, buffer,
- bytes_recvd, req_id);
- break;
+ foreach_vmbus_pkt(desc, hv_dev->channel) {
+ bytes_recvd = desc->len8 * 8;
+ req_id = desc->trans_id;
- case -ENOBUFS:
- kfree(buffer);
- /* Handle large packet */
- bufferlen = bytes_recvd;
- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (!buffer)
- return;
- break;
- }
+ hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
+ req_id);
}
}
iommu_completion_wait(iommu);
}
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ dom_id, 1);
+ iommu_queue_command(iommu, &cmd);
+
+ iommu_completion_wait(iommu);
+}
+
static void amd_iommu_flush_all(struct amd_iommu *iommu)
{
struct iommu_cmd cmd;
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
gfp_t gfp)
{
+ unsigned long flags;
u64 *pte;
- if (domain->mode == PAGE_MODE_6_LEVEL)
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
- return false;
+ goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
- return false;
+ goto out;
*pte = PM_LEVEL_PDE(domain->mode,
iommu_virt_to_phys(domain->pt_root));
domain->mode += 1;
domain->updated = true;
- return true;
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return;
}
static u64 *alloc_pte(struct protection_domain *domain,
{
u64 pte_root = 0;
u64 flags = 0;
+ u32 old_domid;
if (domain->mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->pt_root);
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
+ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
amd_iommu_dev_table[devid].data[1] = flags;
amd_iommu_dev_table[devid].data[0] = pte_root;
+
+ /*
+ * A kdump kernel might be replacing a domain ID that was copied from
+ * the previous kernel--if so, it needs to flush the translation cache
+ * entries for the old domain ID that is being overwritten
+ */
+ if (old_domid) {
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ amd_iommu_flush_tlb_domid(iommu, old_domid);
+ }
}
static void clear_dte_entry(u16 devid)
ste_live = true;
break;
case STRTAB_STE_0_CFG_ABORT:
- if (disable_bypass)
- break;
+ BUG_ON(!disable_bypass);
+ break;
default:
BUG(); /* STE corruption */
}
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
- (cur_len + s_length <= max_len)) {
+ (max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
{
bool coherent = dev_is_dma_coherent(dev);
size_t alloc_size = PAGE_ALIGN(size);
+ int node = dev_to_node(dev);
struct page *page = NULL;
void *cpu_addr;
page = dma_alloc_contiguous(dev, alloc_size, gfp);
+ if (!page)
+ page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (!page)
return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
unsigned long pfn, off = vma->vm_pgoff;
int ret;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
- if (iova == DMA_MAPPING_ERROR)
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
goto out_free_page;
+ if (iommu_map(domain, iova, msi_addr, size, prot))
+ goto out_free_iova;
+
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size);
out_free_page:
kfree(msi_page);
return NULL;
tbl_wlk.ctx_entry = context;
m->private = &tbl_wlk;
- if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+ if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
pasid_dir_size = get_pasid_dir_size(context);
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+ struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
return ret;
}
+struct domain_context_mapping_data {
+ struct dmar_domain *domain;
+ struct intel_iommu *iommu;
+ struct pasid_table *table;
+};
+
+static int domain_context_mapping_cb(struct pci_dev *pdev,
+ u16 alias, void *opaque)
+{
+ struct domain_context_mapping_data *data = opaque;
+
+ return domain_context_mapping_one(data->domain, data->iommu,
+ data->table, PCI_BUS_NUM(alias),
+ alias & 0xff);
+}
+
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
+ struct domain_context_mapping_data data;
struct pasid_table *table;
struct intel_iommu *iommu;
u8 bus, devfn;
return -ENODEV;
table = intel_pasid_get_table(dev);
- return domain_context_mapping_one(domain, iommu, table, bus, devfn);
+
+ if (!dev_is_pci(dev))
+ return domain_context_mapping_one(domain, iommu, table,
+ bus, devfn);
+
+ data.domain = domain;
+ data.iommu = iommu;
+ data.table = table;
+
+ return pci_for_each_dma_alias(to_pci_dev(dev),
+ &domain_context_mapping_cb, &data);
}
static int domain_context_mapped_cb(struct pci_dev *pdev,
dmar_domain = to_dmar_domain(domain);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
}
+ dmar_remove_one_dev_info(dev);
get_private_domain_for_dev(dev);
}
return ret;
}
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+{
+ struct intel_iommu *iommu = opaque;
+
+ domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+ return 0;
+}
+
+/*
+ * NB - intel-iommu lacks any sort of reference counting for the users of
+ * dependent devices. If multiple endpoints have intersecting dependent
+ * devices, unbinding the driver from any one of them will possibly leave
+ * the others unable to operate.
+ */
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+{
+ if (!iommu || !dev || !dev_is_pci(dev))
+ return;
+
+ pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+}
+
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
struct dmar_domain *domain;
PASID_RID2PASID);
iommu_disable_dev_iotlb(info);
- domain_context_clear_one(iommu, info->bus, info->devfn);
+ domain_context_clear(iommu, info->dev);
intel_pasid_free_table(info->dev);
}
/* free the private domain */
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
domain_exit(info->domain);
free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- __dmar_remove_one_dev_info(info);
+ if (info)
+ __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
ret = iommu_request_dm_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
domain_add_dev_info(si_domain, dev);
dev_info(dev,
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
ret = iommu_request_dma_domain_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
if (!iommu)
return;
+ dmar_remove_one_dev_info(dev);
+
iommu_group_remove_device(dev);
iommu_device_unlink(&iommu->iommu, dev);
}
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih, int gl)
+ unsigned long address, unsigned long pages, int ih)
{
struct qi_desc desc;
- if (pages == -1) {
- /* For global kernel pages we have to flush them in *all* PASIDs
- * because that's the only option the hardware gives us. Despite
- * the fact that they are actually only accessible through one. */
- if (gl)
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
- QI_EIOTLB_TYPE;
- else
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
+ /*
+ * Do PASID granu IOTLB invalidation if page selective capability is
+ * not available.
+ */
+ if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
+ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+ QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(pages));
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(address) |
- QI_EIOTLB_GL(gl) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
}
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
- unsigned long pages, int ih, int gl)
+ unsigned long pages, int ih)
{
struct intel_svm_dev *sdev;
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
+ intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
rcu_read_unlock();
}
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
intel_flush_svm_range(svm, start,
- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
+ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
}
rcu_read_unlock();
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
if (!cdev->ap.applid)
return -ENODEV;
+ if (count < CAPIMSG_BASELEN)
+ return -EINVAL;
+
skb = alloc_skb(count, GFP_USER);
if (!skb)
return -ENOMEM;
}
mlen = CAPIMSG_LEN(skb->data);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+ if (count < CAPI_DATA_B3_REQ_LEN ||
+ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
kfree_skb(skb);
return -EINVAL;
}
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
mutex_unlock(&cdev->lock);
unsigned long freed;
c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (!dm_bufio_trylock(c))
+ if (sc->gfp_mask & __GFP_FS)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
return SHRINK_STOP;
freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
unsigned long long badblock_count;
spinlock_t dust_lock;
unsigned int blksz;
+ int sect_per_block_shift;
unsigned int sect_per_block;
sector_t start;
bool fail_read_on_bb:1;
unsigned long flags;
spin_lock_irqsave(&dd->dust_lock, flags);
- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ bblock = dust_rb_search(&dd->badblocklist, block);
if (bblock == NULL) {
if (!dd->quiet_mode) {
}
spin_lock_irqsave(&dd->dust_lock, flags);
- bblock->bb = block * dd->sect_per_block;
+ bblock->bb = block;
if (!dust_rb_insert(&dd->badblocklist, bblock)) {
if (!dd->quiet_mode) {
DMERR("%s: block %llu already in badblocklist",
unsigned long flags;
spin_lock_irqsave(&dd->dust_lock, flags);
- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ bblock = dust_rb_search(&dd->badblocklist, block);
if (bblock != NULL)
DMINFO("%s: block %llu found in badblocklist", __func__, block);
else
int ret = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
+ thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
ret = __dust_map_read(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
unsigned long flags;
if (fail_read_on_bb) {
+ thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
__dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
dd->blksz = blksz;
dd->start = tmp;
+ dd->sect_per_block_shift = __ffs(sect_per_block);
+
/*
* Whether to fail a read on a "bad" block.
* Defaults to false; enabled later by message.
queue_work(ic->wait_wq, &dio->work);
return;
}
+ if (journal_read_pos != NOT_FOUND)
+ dio->range.n_sectors = ic->sectors_per_block;
wait_and_add_new_range(ic, &dio->range);
+ /*
+ * wait_and_add_new_range drops the spinlock, so the journal
+ * may have been changed arbitrarily. We need to recheck.
+ * To simplify the code, we restrict I/O size to just one block.
+ */
+ if (journal_read_pos != NOT_FOUND) {
+ sector_t next_sector;
+ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ if (unlikely(new_pos != journal_read_pos)) {
+ remove_range_unlocked(ic, &dio->range);
+ goto retry;
+ }
+ }
}
spin_unlock_irq(&ic->endio_wait.lock);
* no point in continuing.
*/
if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
- job->master_job->write_err)
+ job->master_job->write_err) {
+ job->write_err = job->master_job->write_err;
return -EIO;
+ }
io_job_start(job->kc->throttle);
else
job->read_err = 1;
push(&kc->complete_jobs, job);
+ wake(kc);
break;
}
*/
r = rs_prepare_reshape(rs);
if (r)
- return r;
+ goto bad;
/* Reshaping ain't recovery, so disable recovery */
rs_setup_recovery(rs, MaxSector);
}
EXPORT_SYMBOL(dm_table_event);
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
unsigned int l, n = 0, k = 0;
sector_t *node;
+ if (unlikely(sector >= dm_table_get_size(t)))
+ return &t->targets[t->num_targets];
+
for (l = 0; l < t->depth; l++) {
n = get_child(n, k);
node = get_node(t, l, n);
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
* (1) Super block (1 block)
* (2) Chunk mapping table (nr_map_blocks)
* (3) Bitmap blocks (nr_bitmap_blocks)
- * All metadata blocks are stored in conventional zones, starting from the
+ * All metadata blocks are stored in conventional zones, starting from
* the first conventional zone found on disk.
*/
struct dmz_super {
* Lock/unlock metadata access. This is a "read" lock on a semaphore
* that prevents metadata flush from running while metadata are being
* modified. The actual metadata write mutual exclusion is achieved with
- * the map lock and zone styate management (active and reclaim state are
+ * the map lock and zone state management (active and reclaim state are
* mutually exclusive).
*/
void dmz_lock_metadata(struct dmz_metadata *zmd)
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return ERR_PTR(-EIO);
+
/* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
spin_lock(&zmd->mblk_lock);
if (!mblk) {
/* Cache miss: read the block from disk */
mblk = dmz_get_mblock_slow(zmd, mblk_no);
- if (!mblk)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(mblk))
+ return mblk;
}
/* Wait for on-going read I/O and check for error */
/*
* Issue a metadata block write BIO.
*/
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
- unsigned int set)
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+ unsigned int set)
{
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return -EIO;
+
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
set_bit(DMZ_META_ERROR, &mblk->state);
- return;
+ return -ENOMEM;
}
set_bit(DMZ_META_WRITING, &mblk->state);
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
+
+ return 0;
}
/*
struct bio *bio;
int ret;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return -EIO;
+
bio = bio_alloc(GFP_NOIO, 1);
if (!bio)
return -ENOMEM;
{
struct dmz_mblock *mblk;
struct blk_plug plug;
- int ret = 0;
+ int ret = 0, nr_mblks_submitted = 0;
/* Issue writes */
blk_start_plug(&plug);
- list_for_each_entry(mblk, write_list, link)
- dmz_write_mblock(zmd, mblk, set);
+ list_for_each_entry(mblk, write_list, link) {
+ ret = dmz_write_mblock(zmd, mblk, set);
+ if (ret)
+ break;
+ nr_mblks_submitted++;
+ }
blk_finish_plug(&plug);
/* Wait for completion */
list_for_each_entry(mblk, write_list, link) {
+ if (!nr_mblks_submitted)
+ break;
wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
ret = -EIO;
}
+ nr_mblks_submitted--;
}
/* Flush drive cache (this will also sync data) */
*/
dmz_lock_flush(zmd);
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ ret = -EIO;
+ goto out;
+ }
+
/* Get dirty blocks */
spin_lock(&zmd->mblk_lock);
list_splice_init(&zmd->mblk_dirty_list, &write_list);
struct dm_zone *zone;
if (list_empty(&zmd->map_rnd_list))
- return NULL;
+ return ERR_PTR(-EBUSY);
list_for_each_entry(zone, &zmd->map_rnd_list, link) {
if (dmz_is_buf(zone))
return dzone;
}
- return NULL;
+ return ERR_PTR(-EBUSY);
}
/*
struct dm_zone *zone;
if (list_empty(&zmd->map_seq_list))
- return NULL;
+ return ERR_PTR(-EBUSY);
list_for_each_entry(zone, &zmd->map_seq_list, link) {
if (!zone->bzone)
return zone;
}
- return NULL;
+ return ERR_PTR(-EBUSY);
}
/*
if (op != REQ_OP_WRITE)
goto out;
- /* Alloate a random zone */
+ /* Allocate a random zone */
dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!dzone) {
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
dmz_wait_for_free_zones(zmd);
goto again;
}
if (bzone)
goto out;
- /* Alloate a random zone */
+ /* Allocate a random zone */
bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!bzone) {
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ bzone = ERR_PTR(-EIO);
+ goto out;
+ }
dmz_wait_for_free_zones(zmd);
goto again;
}
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
/*
* Number of seconds of target BIO inactivity to consider the target idle.
*/
-#define DMZ_IDLE_PERIOD (10UL * HZ)
+#define DMZ_IDLE_PERIOD (10UL * HZ)
/*
* Percentage of unmapped (free) random zones below which reclaim starts
set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
while (block < end_block) {
+ if (dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, src_zone, &block);
if (ret <= 0)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
/*
* Find a candidate zone for reclaim and process it.
*/
-static void dmz_reclaim(struct dmz_reclaim *zrc)
+static int dmz_do_reclaim(struct dmz_reclaim *zrc)
{
struct dmz_metadata *zmd = zrc->metadata;
struct dm_zone *dzone;
/* Get a data zone */
dzone = dmz_get_zone_for_reclaim(zmd);
- if (!dzone)
- return;
+ if (IS_ERR(dzone))
+ return PTR_ERR(dzone);
start = jiffies;
out:
if (ret) {
dmz_unlock_zone_reclaim(dzone);
- return;
+ return ret;
}
- (void) dmz_flush_metadata(zrc->metadata);
+ ret = dmz_flush_metadata(zrc->metadata);
+ if (ret) {
+ dmz_dev_debug(zrc->dev,
+ "Metadata flush for zone %u failed, err %d\n",
+ dmz_id(zmd, rzone), ret);
+ return ret;
+ }
dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+ return 0;
}
/*
return false;
/*
- * If the percentage of unmappped random zones is low,
+ * If the percentage of unmapped random zones is low,
* reclaim even if the target is busy.
*/
return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
struct dmz_metadata *zmd = zrc->metadata;
unsigned int nr_rnd, nr_unmap_rnd;
unsigned int p_unmap_rnd;
+ int ret;
+
+ if (dmz_bdev_is_dying(zrc->dev))
+ return;
if (!dmz_should_reclaim(zrc)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
(dmz_target_idle(zrc) ? "Idle" : "Busy"),
p_unmap_rnd, nr_unmap_rnd, nr_rnd);
- dmz_reclaim(zrc);
+ ret = dmz_do_reclaim(zrc);
+ if (ret) {
+ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
+ if (ret == -EIO)
+ /*
+ * LLD might be performing some error handling sequence
+ * at the underlying device. To not interfere, do not
+ * attempt to schedule the next reclaim run immediately.
+ */
+ return;
+ }
dmz_schedule_reclaim(zrc);
}
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
refcount_inc(&bioctx->ref);
generic_make_request(clone);
+ if (clone->bi_status == BLK_STS_IOERR)
+ return -EIO;
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
/* Get the buffer zone. One will be allocated if needed */
bzone = dmz_get_chunk_buffer(zmd, zone);
- if (!bzone)
- return -ENOSPC;
+ if (IS_ERR(bzone))
+ return PTR_ERR(bzone);
if (dmz_is_readonly(bzone))
return -EROFS;
dmz_lock_metadata(zmd);
+ if (dmz->dev->flags & DMZ_BDEV_DYING) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
+ if (ret)
+ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
/* Process queued flush requests */
while (1) {
* Get a chunk work and start it to process a new BIO.
* If the BIO chunk has no work yet, create one.
*/
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
struct dm_chunk_work *cw;
+ int ret = 0;
mutex_lock(&dmz->chunk_lock);
/* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
if (!cw) {
- int ret;
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
- if (!cw)
+ if (unlikely(!cw)) {
+ ret = -ENOMEM;
goto out;
+ }
INIT_WORK(&cw->work, dmz_chunk_work);
refcount_set(&cw->refcount, 0);
ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
if (unlikely(ret)) {
kfree(cw);
- cw = NULL;
goto out;
}
}
bio_list_add(&cw->bio_list, bio);
dmz_get_chunk_work(cw);
+ dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
mutex_unlock(&dmz->chunk_lock);
+ return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+ disk = dmz_dev->bdev->bd_disk;
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ } else if (disk->fops->check_events) {
+ if (disk->fops->check_events(disk, 0) &
+ DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+ }
+ }
+
+ return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
+ int ret;
+
+ if (dmz_bdev_is_dying(dmz->dev))
+ return DM_MAPIO_KILL;
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors,
dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
/* Now ready to handle this BIO */
- dmz_reclaim_bio_acc(dmz->reclaim);
- dmz_queue_chunk_work(dmz, bio);
+ ret = dmz_queue_chunk_work(dmz, bio);
+ if (ret) {
+ dmz_dev_debug(dmz->dev,
+ "BIO op %d, can't process chunk %llu, err %i\n",
+ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+ ret);
+ return DM_MAPIO_REQUEUE;
+ }
return DM_MAPIO_SUBMITTED;
}
{
struct dmz_target *dmz = ti->private;
+ if (dmz_bdev_is_dying(dmz->dev))
+ return -ENODEV;
+
*bdev = dmz->dev->bdev;
return 0;
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
unsigned int nr_zones;
+ unsigned int flags;
+
sector_t zone_nr_sectors;
unsigned int zone_nr_sectors_shift;
(dev)->zone_nr_sectors_shift)
#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+/* Device flags. */
+#define DMZ_BDEV_DYING (1 << 0)
+
/*
* Zone descriptor.
*/
void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
+/*
+ * Functions defined in dm-zoned-target.c
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+
#endif /* DM_ZONED_H */
new_parent = shadow_current(s);
+ pn = dm_block_data(new_parent);
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+
+ /* create & init the left block */
r = new_block(s->info, &left);
if (r < 0)
return r;
+ ln = dm_block_data(left);
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+ /* create & init the right block */
r = new_block(s->info, &right);
if (r < 0) {
unlock_block(s->info, left);
return r;
}
- pn = dm_block_data(new_parent);
- ln = dm_block_data(left);
rn = dm_block_data(right);
-
- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
- ln->header.flags = pn->header.flags;
- ln->header.nr_entries = cpu_to_le32(nr_left);
- ln->header.max_entries = pn->header.max_entries;
- ln->header.value_size = pn->header.value_size;
-
rn->header.flags = pn->header.flags;
rn->header.nr_entries = cpu_to_le32(nr_right);
rn->header.max_entries = pn->header.max_entries;
rn->header.value_size = pn->header.value_size;
-
- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
- sizeof(__le64) : s->info->value_type.size;
- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
nr_right * size);
}
if (smm->recursion_count == 1)
- apply_bops(smm);
+ r = apply_bops(smm);
smm->recursion_count--;
*/
pixsize = vout->bpp * vout->vrfb_bpp;
- dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
+ dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
xt->src_start = vout->buf_phy_addr[vb->i];
xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
return 0;
}
-static int rk8xx_suspend(struct device *dev)
+static int __maybe_unused rk8xx_suspend(struct device *dev)
{
struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
int ret = 0;
return ret;
}
-static int rk8xx_resume(struct device *dev)
+static int __maybe_unused rk8xx_resume(struct device *dev)
{
struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
int ret = 0;
return ret;
}
-SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
+static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
static struct i2c_driver rk808_i2c_driver = {
.driver = {
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
+ depends on HAS_IOMEM
help
This option enables support for the Xilinx SDFEC (Soft Decision
Forward Error Correction) driver. This enables a char driver
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel context\n");
- goto free_ctx;
+ kfree(hdev->kernel_ctx);
+ goto mmu_fini;
}
rc = hl_cb_pool_init(hdev);
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
"kernel ctx is still alive on initialization failure\n");
-free_ctx:
- kfree(hdev->kernel_ctx);
mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
}
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
+void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
{
- /* Not needed in Goya */
+ /* The QMANs are on the SRAM so need to copy to IO space */
+ memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
}
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
int rc;
dev_dbg(hdev->dev, "DMA packet details:\n");
- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->dst_addr));
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
struct packet_lin_dma *user_dma_pkt)
{
dev_dbg(hdev->dev, "DMA packet details:\n");
- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->dst_addr));
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
/*
* WA for HW-23.
dev_dbg(hdev->dev, "WREG32 packet details:\n");
dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
- dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
+ dev_dbg(hdev->dev, "value == 0x%x\n",
+ le32_to_cpu(wreg_pkt->value));
if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
- void *user_pkt;
+ struct goya_packet *user_pkt;
- user_pkt = (void *) (uintptr_t)
+ user_pkt = (struct goya_packet *) (uintptr_t)
(parser->user_cb->kernel_address + cb_parsed_length);
- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
* need to validate here as well because patch_cb() is
* not called in MMU path while this function is called
*/
- rc = goya_validate_wreg32(hdev, parser, user_pkt);
+ rc = goya_validate_wreg32(hdev,
+ parser, (struct packet_wreg32 *) user_pkt);
break;
case PACKET_WREG_BULK:
case PACKET_LIN_DMA:
if (is_mmu)
rc = goya_validate_dma_pkt_mmu(hdev, parser,
- user_pkt);
+ (struct packet_lin_dma *) user_pkt);
else
rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
- user_pkt);
+ (struct packet_lin_dma *) user_pkt);
break;
case PACKET_MSG_LONG:
enum packet_id pkt_id;
u16 pkt_size;
u32 new_pkt_size = 0;
- void *user_pkt, *kernel_pkt;
+ struct goya_packet *user_pkt, *kernel_pkt;
- user_pkt = (void *) (uintptr_t)
+ user_pkt = (struct goya_packet *) (uintptr_t)
(parser->user_cb->kernel_address + cb_parsed_length);
- kernel_pkt = (void *) (uintptr_t)
+ kernel_pkt = (struct goya_packet *) (uintptr_t)
(parser->patched_cb->kernel_address +
cb_patched_cur_length);
- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
switch (pkt_id) {
case PACKET_LIN_DMA:
- rc = goya_patch_dma_packet(hdev, parser, user_pkt,
- kernel_pkt, &new_pkt_size);
+ rc = goya_patch_dma_packet(hdev, parser,
+ (struct packet_lin_dma *) user_pkt,
+ (struct packet_lin_dma *) kernel_pkt,
+ &new_pkt_size);
cb_patched_cur_length += new_pkt_size;
break;
case PACKET_WREG_32:
memcpy(kernel_pkt, user_pkt, pkt_size);
cb_patched_cur_length += pkt_size;
- rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
+ rc = goya_validate_wreg32(hdev, parser,
+ (struct packet_wreg32 *) kernel_pkt);
break;
case PACKET_WREG_BULK:
size_t total_pkt_size;
long result;
int rc;
+ int irq_num_entries, irq_arr_index;
+ __le32 *goya_irq_arr;
total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
irq_arr_size;
if (!pkt)
return -ENOMEM;
- pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
- memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+ irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
+ pkt->length = cpu_to_le32(irq_num_entries);
+
+ /* We must perform any necessary endianness conversation on the irq
+ * array being passed to the goya hardware
+ */
+ for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
+ irq_arr_index < irq_num_entries ; irq_arr_index++)
+ goya_irq_arr[irq_arr_index] =
+ cpu_to_le32(irq_arr[irq_arr_index]);
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
.resume = goya_resume,
.cb_mmap = goya_cb_mmap,
.ring_doorbell = goya_ring_doorbell,
- .flush_pq_write = goya_flush_pq_write,
+ .pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
.asic_dma_free_coherent = goya_dma_free_coherent,
.get_int_queue_base = goya_get_int_queue_base,
void goya_late_fini(struct hl_device *hdev);
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
void goya_update_eq_ci(struct hl_device *hdev, u32 val);
void goya_restore_phase_topology(struct hl_device *hdev);
int goya_context_switch(struct hl_device *hdev, u32 asid);
* @resume: handles IP specific H/W or SW changes for resume.
* @cb_mmap: maps a CB.
* @ring_doorbell: increment PI on a given QMAN.
- * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
+ * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
+ * function because the PQs are located in different memory areas
+ * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
+ * writing the PQE must match the destination memory area
+ * properties.
* @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
* dma_alloc_coherent(). This is ASIC function because
* its implementation is not trivial when the driver
int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u64 kaddress, phys_addr_t paddress, u32 size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
- void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
+ void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
+ struct hl_bd *bd);
void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_bd bd;
- u64 *pi, *pbd = (u64 *) &bd;
+ __le64 *pi;
bd.ctl = 0;
- bd.len = __cpu_to_le32(job->job_cb_size);
- bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
+ bd.len = cpu_to_le32(job->job_cb_size);
+ bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
- pi = (u64 *) (uintptr_t) (q->kernel_address +
+ pi = (__le64 *) (uintptr_t) (q->kernel_address +
((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
- pi[0] = pbd[0];
- pi[1] = pbd[1];
-
q->pi++;
q->pi &= ((q->int_queue_len << 1) - 1);
- /* Flush PQ entry write. Relevant only for specific ASICs */
- hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
+ hdev->asic_funcs->pqe_write(hdev, pi, &bd);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
#define GOYA_PKT_CTL_MB_SHIFT 31
#define GOYA_PKT_CTL_MB_MASK 0x80000000
+/* All packets have, at least, an 8-byte header, which contains
+ * the packet type. The kernel driver uses the packet header for packet
+ * validation and to perform any necessary required preparation before
+ * sending them off to the hardware.
+ */
+struct goya_packet {
+ __le64 header;
+ /* The rest of the packet data follows. Use the corresponding
+ * packet_XXX struct to deference the data, based on packet type
+ */
+ u8 contents[0];
+};
+
struct packet_nop {
__le32 reserved;
__le32 ctl;
struct hl_cs_job *job;
bool shadow_index_valid;
u16 shadow_index;
- u32 *cq_entry;
- u32 *cq_base;
+ struct hl_cq_entry *cq_entry, *cq_base;
if (hdev->disabled) {
dev_dbg(hdev->dev,
return IRQ_HANDLED;
}
- cq_base = (u32 *) (uintptr_t) cq->kernel_address;
+ cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
while (1) {
- bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
+ bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
+ CQ_ENTRY_READY_MASK)
>> CQ_ENTRY_READY_SHIFT);
if (!entry_ready)
break;
- cq_entry = (u32 *) &cq_base[cq->ci];
+ cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
- /*
- * Make sure we read CQ entry contents after we've
+ /* Make sure we read CQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
- shadow_index_valid =
- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
+ shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
+ CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
>> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
- shadow_index = (u16)
- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
+ shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
+ CQ_ENTRY_SHADOW_INDEX_MASK)
>> CQ_ENTRY_SHADOW_INDEX_SHIFT);
queue = &hdev->kernel_queues[cq->hw_queue_id];
queue_work(hdev->cq_wq, &job->finish_work);
}
- /*
- * Update ci of the context's queue. There is no
+ /* Update ci of the context's queue. There is no
* need to protect it with spinlock because this update is
* done only inside IRQ and there is a different IRQ per
* queue
queue->ci = hl_queue_inc_ptr(queue->ci);
/* Clear CQ entry ready bit */
- cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
+ cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
+ ~CQ_ENTRY_READY_MASK);
cq->ci = hl_cq_inc_ptr(cq->ci);
dev_dbg(hdev->dev,
"page list 0x%p of asid %d is still alive\n",
phys_pg_list, ctx->asid);
+ atomic64_sub(phys_pg_list->total_size,
+ &hdev->dram_used_mem);
free_phys_pg_pack(hdev, phys_pg_list);
idr_remove(&vm->phys_pg_pack_handles, i);
}
* recurse past the end of THREAD_SIZE by default.
*/
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
#else
#define REC_STACK_SIZE (THREAD_SIZE / 8)
#endif
void lkdtm_EXHAUST_STACK(void)
{
- pr_info("Calling function with %d frame size to depth %d ...\n",
+ pr_info("Calling function with %lu frame size to depth %d ...\n",
REC_STACK_SIZE, recur_count);
recursive_loop(recur_count);
pr_info("FAIL: survived without exhausting stack?!\n");
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
+
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
}
if (page) {
- vmballoon_mark_page_offline(page, ctl->page_size);
/* Success. Add the page to the list and continue. */
list_add(&page->lru, &ctl->pages);
continue;
list_for_each_entry_safe(page, tmp, page_list, lru) {
list_del(&page->lru);
- vmballoon_mark_page_online(page, page_size);
__free_pages(page, vmballoon_page_order(page_size));
}
enum vmballoon_page_size_type page_size)
{
unsigned long flags;
+ struct page *page;
if (page_size == VMW_BALLOON_4K_PAGE) {
balloon_page_list_enqueue(&b->b_dev_info, pages);
* for the balloon compaction mechanism.
*/
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+
+ list_for_each_entry(page, pages, lru) {
+ vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
+ }
+
list_splice_init(pages, &b->huge_pages);
__count_vm_events(BALLOON_INFLATE, *n_pages *
vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
/* 2MB pages */
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
+ vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
+
list_move(&page->lru, pages);
if (++i == n_req_pages)
break;
entry = container_of(resource, struct dbell_entry, resource);
if (entry->run_delayed) {
- schedule_work(&entry->work);
+ if (!schedule_work(&entry->work))
+ vmci_resource_put(resource);
} else {
entry->notify_cb(entry->client_data);
vmci_resource_put(resource);
atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) {
vmci_resource_get(&dbell->resource);
- schedule_work(&dbell->work);
+ if (!schedule_work(&dbell->work))
+ vmci_resource_put(&dbell->resource);
} else {
dbell->notify_cb(dbell->client_data);
}
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
- err = mmc_wait_for_cmd(host, &cmd, 0);
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
goto out;
goto err;
}
+ /*
+ * Some SD cards claims an out of spec VDD voltage range. Let's treat
+ * these bits as being in-valid and especially also bit7.
+ */
+ ocr &= ~0x7FFF;
+
rocr = mmc_select_voltage(host, ocr);
/*
struct dma_chan *terminate_chan = NULL;
struct mmc_request *mrq;
- cancel_delayed_work_sync(&host->timeout_work);
+ cancel_delayed_work(&host->timeout_work);
mrq = host->mrq;
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- pm_runtime_enable(&pdev->dev);
-
ret = renesas_sdhi_clk_enable(host);
if (ret)
goto efree;
efree:
tmio_mmc_host_free(host);
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
- pm_runtime_disable(&pdev->dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_cdns_hs400_enhanced_strobe;
+ sdhci_enable_v4_mode(host);
sdhci_get_of_property(pdev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
+ /* HS200 is broken at this moment */
+ host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+
ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
mmc_hostname(host->mmc));
host->flags &= ~SDHCI_SIGNALING_330;
host->flags |= SDHCI_SIGNALING_180;
- host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
host->mmc->caps2 |= MMC_CAP2_NO_SD;
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
pci_write_config_dword(chip->pdev,
const struct sdhci_pci_fixes sdhci_o2 = {
.probe = sdhci_pci_o2_probe,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
.probe_slot = sdhci_pci_o2_probe_slot,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_o2_resume,
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
u32 div, val, mask;
- div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
- sdhci_enable_clk(host, clk);
+ div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+ div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ sdhci_enable_clk(host, div);
/* enable auto gate sdhc_enable_auto_gate */
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
return 1 << 31;
}
+static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
+{
+ return 0;
+}
+
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
.hw_reset = sdhci_sprd_hw_reset,
.get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
+ .get_ro = sdhci_sprd_get_ro,
};
static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MISSING_CAPS,
.quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
- SDHCI_QUIRK2_USE_32BIT_BLK_CNT,
+ SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_sprd_ops,
};
sdhci_enable_v4_mode(host);
+ /*
+ * Supply the existing CAPS, but clear the UHS-I modes. This
+ * will allow these modes to be specified only by device
+ * tree properties through mmc_of_parse().
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;
}
}
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
+{
+ /*
+ * Write-enable shall be assumed if GPIO is missing in a board's
+ * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
+ * Tegra.
+ */
+ return mmc_gpio_get_ro(host->mmc);
+}
+
static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
};
static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
};
static const struct sdhci_ops tegra114_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra_sdhci_writew,
.write_l = tegra_sdhci_writel,
};
static const struct sdhci_ops tegra210_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra210_sdhci_writew,
.write_l = tegra_sdhci_writel,
};
static const struct sdhci_ops tegra186_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
host->mmc->f_max = pdata->hclk;
host->mmc->f_min = pdata->hclk / 512;
- pm_runtime_enable(&pdev->dev);
-
ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
tmio_mmc_host_remove(host);
host_free:
tmio_mmc_host_free(host);
- pm_runtime_disable(&pdev->dev);
cell_disable:
if (cell->disable)
cell->disable(pdev);
if (cell->disable)
cell->disable(pdev);
- pm_runtime_disable(&pdev->dev);
-
return 0;
}
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
+ bool runtime_synced;
bool sdio_irq_enabled;
/* Mandatory callback */
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-/**
- * tmio_mmc_host_probe() - Common probe for all implementations
- * @_host: Host to probe
- *
- * Perform tasks common to all implementations probe functions.
- *
- * The caller should have called pm_runtime_enable() prior to calling
- * the common probe function.
- */
int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
ret = mmc_add_host(mmc);
if (ret)
goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
+ pm_runtime_put(&pdev->dev);
return 0;
remove_host:
+ pm_runtime_put_noidle(&pdev->dev);
tmio_mmc_host_remove(_host);
return ret;
}
struct platform_device *pdev = host->pdev;
struct mmc_host *mmc = host->mmc;
+ pm_runtime_get_sync(&pdev->dev);
+
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
- if (!host->native_hotplug)
- pm_runtime_get_sync(&pdev->dev);
-
dev_pm_qos_hide_latency_limit(&pdev->dev);
mmc_remove_host(mmc);
tmio_mmc_release_dma(host);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (host->native_hotplug)
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
+ if (!host->runtime_synced) {
+ host->runtime_synced = true;
+ return 0;
+ }
+
tmio_mmc_clk_enable(host);
tmio_mmc_hw_reset(host->mmc);
host->clk_disable = uniphier_sd_clk_disable;
host->set_clock = uniphier_sd_set_clock;
- pm_runtime_enable(&pdev->dev);
ret = uniphier_sd_clk_enable(host);
if (ret)
goto free_host;
free_host:
tmio_mmc_host_free(host);
- pm_runtime_disable(&pdev->dev);
return ret;
}
tmio_mmc_host_remove(host);
uniphier_sd_clk_disable(host);
- pm_runtime_disable(&pdev->dev);
return 0;
}
menuconfig MTD_HYPERBUS
tristate "HyperBus support"
+ depends on HAS_IOMEM
select MTD_CFI
select MTD_MAP_BANK_WIDTH_2
select MTD_CFI_AMDSTD
default:
printk(KERN_WARNING "SA1100 flash: unknown base address "
"0x%08lx, assuming CS0\n", phys);
+ /* Fall through */
case SA1100_CS0_PHYS:
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
default:
/* Kept only for backward compatibility purpose. */
params->quad_enable = spansion_quad_enable;
- if (nor->clear_sr_bp)
- nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
break;
}
int err;
if (nor->clear_sr_bp) {
+ if (nor->quad_enable == spansion_quad_enable)
+ nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
+
err = nor->clear_sr_bp(nor);
if (err) {
dev_err(nor->dev,
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_GSO_UDP_L4;
bond_dev->mpls_features = mpls_features;
bond_dev->gso_max_segs = gso_max_segs;
unsigned long *supported,
struct phylink_link_state *state)
{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
if (!phy_interface_mode_is_rgmii(state->interface) &&
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
state->interface != PHY_INTERFACE_MODE_MOCA) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- dev_err(ds->dev,
- "Unsupported interface: %d\n", state->interface);
+ if (port != core_readl(priv, CORE_IMP0_PRT_ID))
+ dev_err(ds->dev,
+ "Unsupported interface: %d for port %d\n",
+ state->interface, port);
return;
}
u32 id_mode_dis = 0, port_mode;
u32 reg, offset;
+ if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ return;
+
if (priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
{ .compatible = "microchip,ksz9897" },
{ .compatible = "microchip,ksz9893" },
{ .compatible = "microchip,ksz9563" },
+ { .compatible = "microchip,ksz8563" },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
{ \
+ .name = #width, \
.val_bits = (width), \
.reg_stride = (width) / 8, \
.reg_bits = (regbits) + (regalign), \
{
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
- u16 rx_vid, tx_vid;
int i;
- rx_vid = dsa_8021q_rx_vid(ds, port);
- tx_vid = dsa_8021q_tx_vid(ds, port);
-
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
ret = xgbe_platform_init();
if (ret)
- return ret;
+ goto err_platform_init;
ret = xgbe_pci_init();
if (ret)
- return ret;
+ goto err_pci_init;
return 0;
+
+err_pci_init:
+ xgbe_platform_exit();
+err_platform_init:
+ unregister_netdevice_notifier(&xgbe_netdev_notifier);
+ return ret;
}
static void __exit xgbe_mod_exit(void)
if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
break;
}
- if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ if (rule && rule->type == aq_rx_filter_vlan &&
+ be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
struct ethtool_rxnfc cmd;
cmd.fs.location = rule->aq_fsp.location;
return err;
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
+ if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
!(aq_nic->packet_filter & IFF_PROMISC));
aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
if (err < 0)
goto err_exit;
+ err = aq_filters_vlans_update(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
self->aq_nic_cfg.link_irq_vec);
err = request_threaded_irq(irqvec, NULL,
aq_linkstate_threaded_isr,
- IRQF_SHARED,
+ IRQF_SHARED | IRQF_ONESHOT,
self->ndev->name, self);
if (err < 0)
goto err_exit;
}
}
+err_exit:
if (!was_tx_cleaned)
work_done = budget;
1U << self->aq_ring_param.vec_idx);
}
}
-err_exit:
+
return work_done;
}
/* if VF indicate to PF this function is going down (PF will delete sp
* elements and clear initializations
*/
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_clear_vlan_info(bp);
bnx2x_vfpf_close_vf(bp);
- else if (unload_mode != UNLOAD_RECOVERY)
+ } else if (unload_mode != UNLOAD_RECOVERY) {
/* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
- else {
+ } else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
/**
* bnx2x_sp_event - handle ramrods completion.
*
return rc;
}
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+
+ bp->vlan_cnt = 0;
+}
+
static int bnx2x_del_all_vlans(struct bnx2x *bp)
{
struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
unsigned long ramrod_flags = 0, vlan_flags = 0;
- struct bnx2x_vlan_entry *vlan;
int rc;
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
if (rc)
return rc;
- /* Mark that hw forgot all entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
+ bnx2x_clear_vlan_info(bp);
return 0;
}
if (bnapi->events & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bnapi->events & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
bnapi->events = 0;
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
+ bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
int i, rc = 0;
u32 type;
if (rc)
goto err_out;
bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
}
}
- if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (agg_rings) {
type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
ring->fw_ring_id);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
bnxt_hwrm_vnic_set_rss(bp, i, false);
}
-static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
- bool irq_re_init)
+static void bnxt_clear_vnic(struct bnxt *bp)
{
- if (bp->vnic_info) {
- bnxt_hwrm_clear_vnic_filter(bp);
+ if (!bp->vnic_info)
+ return;
+
+ bnxt_hwrm_clear_vnic_filter(bp);
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
/* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp);
- /* before free the vnic, undo the vnic tpa settings */
- if (bp->flags & BNXT_FLAG_TPA)
- bnxt_set_tpa(bp, false);
- bnxt_hwrm_vnic_free(bp);
}
+ /* before free the vnic, undo the vnic tpa settings */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ bnxt_hwrm_vnic_free(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ bnxt_hwrm_vnic_ctx_free(bp);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+ bool irq_re_init)
+{
+ bnxt_clear_vnic(bp);
bnxt_hwrm_ring_free(bp, close_path);
bnxt_hwrm_ring_grp_free(bp);
if (irq_re_init) {
if (idx)
req->dimensions = cpu_to_le16(1);
- if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
memcpy(data_addr, buf, bytesize);
-
- rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message_silent(bp, msg, msg_len,
+ HWRM_CMD_TIMEOUT);
+ }
if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
memcpy(buf, data_addr, bytesize);
mutex_lock(&bp->hwrm_cmd_lock);
hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
- if (hwrm_err)
- goto flash_pkg_exit;
-
- if (resp->error_code) {
+ if (hwrm_err) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
- if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ if (resp->error_code && error_code ==
+ NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
hwrm_err = _hwrm_send_message(bp, &install,
sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
- if (hwrm_err)
- goto flash_pkg_exit;
}
+ if (hwrm_err)
+ goto flash_pkg_exit;
}
if (resp->result) {
static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
- flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
+ flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
}
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
goto free_node;
bnxt_tc_set_src_fid(bp, flow, src_fid);
-
- if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
- bnxt_tc_set_flow_dir(bp, flow, src_fid);
+ bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -EOPNOTSUPP;
* 2. 15th bit of flow_handle must specify the flow
* direction (TX/RX).
*/
- if (flow_node->flow.dir == BNXT_DIR_RX)
+ if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
else
__be16 inner_vlan_tci;
__be16 ether_type;
u8 num_vlans;
+ u8 dir;
+#define BNXT_DIR_RX 1
+#define BNXT_DIR_TX 0
};
struct bnxt_tc_l3_key {
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
- u8 dir;
-#define BNXT_DIR_RX 1
-#define BNXT_DIR_TX 0
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
.set_coalesce = bcmgenet_set_coalesce,
.get_link_ksettings = bcmgenet_get_link_ksettings,
.set_link_ksettings = bcmgenet_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* Power down the unimac, based on mode. */
{ .compatible = "cdns,emac", .data = &emac_config },
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
- { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config },
+ { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#include "cavium_ptp.h"
-#define DRV_NAME "Cavium PTP Driver"
+#define DRV_NAME "cavium_ptp"
#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
}
oct->num_iqs++;
- if (oct->fn_list.enable_io_queues(oct))
+ if (oct->fn_list.enable_io_queues(oct)) {
+ octeon_delete_instr_queue(oct, iq_no);
return 1;
+ }
return 0;
}
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
- if (err)
+ if (err) {
+ kvfree(t);
return err;
+ }
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
kvfree(t);
};
/**
- * nps_reg_set - Sets ENET register with provided value.
+ * nps_enet_reg_set - Sets ENET register with provided value.
* @priv: Pointer to EZchip ENET private data structure.
* @reg: Register offset from base address.
* @value: Value to set in register.
}
/**
- * nps_reg_get - Gets value of specified ENET register.
+ * nps_enet_reg_get - Gets value of specified ENET register.
* @priv: Pointer to EZchip ENET private data structure.
* @reg: Register offset from base address.
*
n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
if (n != 1) {
err = -EPERM;
- goto err_irq;
+ goto err_irq_vectors;
}
ptp_qoriq->irq = pci_irq_vector(pdev, 0);
err_no_clock:
free_irq(ptp_qoriq->irq, ptp_qoriq);
err_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_vectors:
iounmap(base);
err_ioremap:
kfree(ptp_qoriq);
enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
+ pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
pci_release_mem_regions(pdev);
u64_stats_fetch_begin(&priv->tx[ring].statss);
s->tx_packets += priv->tx[ring].pkt_done;
s->tx_bytes += priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
}
}
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
.reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
.reset_level = HNAE3_GLOBAL_RESET },
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
- unsigned int *mcastFilterSize_p;
+ __be32 *mcastFilterSize_p;
long ret;
unsigned long ret_attr;
return -EINVAL;
}
- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
- VETH_MCAST_FILTER_SIZE, NULL);
+ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE,
+ NULL);
if (!mcastFilterSize_p) {
dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
"attribute\n");
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->mcastFilterSize = *mcastFilterSize_p;
+ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
adapter->pool_config = 0;
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
(u64)tx_buff->indir_dma,
(u64)num_entries);
+ dma_unmap_single(dev, tx_buff->indir_dma,
+ sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
} else {
tx_buff->num_entries = num_entries;
lpar_rc = send_subcrq(adapter, handle_array[queue_num],
rwi = get_next_rwi(adapter);
while (rwi) {
+ if (adapter->state == VNIC_REMOVING ||
+ adapter->state == VNIC_REMOVED) {
+ kfree(rwi);
+ rc = EBUSY;
+ break;
+ }
+
if (adapter->force_reset_recovery) {
adapter->force_reset_recovery = false;
rc = do_hard_reset(adapter, rwi, reset_state);
union sub_crq *next;
int index;
int i, j;
- u8 *first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
txbuff->data_dma[j] = 0;
}
- /* if sub_crq was sent indirectly */
- first = &txbuff->indir_arr[0].generic.first;
- if (*first == IBMVNIC_CRQ_CMD) {
- dma_unmap_single(dev, txbuff->indir_dma,
- sizeof(txbuff->indir_arr),
- DMA_TO_DEVICE);
- *first = 0;
- }
if (txbuff->last_frag) {
dev_kfree_skb_any(txbuff->skb);
#include <net/vxlan.h>
#include <net/mpls.h>
#include <net/xdp_sock.h>
+#include <net/xfrm.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
/* 16K ints/sec to 9.2K ints/sec */
avg_wire_size *= 15;
avg_wire_size += 11452;
- } else if (avg_wire_size <= 1980) {
+ } else if (avg_wire_size < 1968) {
/* 9.2K ints/sec to 8K ints/sec */
avg_wire_size *= 5;
avg_wire_size += 22420;
case IXGBE_LINK_SPEED_2_5GB_FULL:
case IXGBE_LINK_SPEED_1GB_FULL:
case IXGBE_LINK_SPEED_10_FULL:
+ if (avg_wire_size > 8064)
+ avg_wire_size = 8064;
itr += DIV_ROUND_UP(avg_wire_size,
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
IXGBE_ITR_ADAPTIVE_MIN_INC;
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- rtnl_lock();
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
unregister_netdev(adapter->netdev);
- rtnl_unlock();
- }
ixgbe_service_event_complete(adapter);
return;
}
#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_IPSEC
- if (secpath_exists(skb) &&
+ if (xfrm_offload(skb) &&
!ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
- u32 i = tx_ring->next_to_clean, xsk_frames = 0;
- unsigned int budget = q_vector->tx.work_limit;
struct xdp_umem *umem = tx_ring->xsk_umem;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
- bool xmit_done;
+ u32 xsk_frames = 0;
- tx_bi = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC(tx_ring, i);
- i -= tx_ring->count;
+ tx_bi = &tx_ring->tx_buffer_info[ntc];
+ tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
- do {
+ while (ntc != ntu) {
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
tx_bi++;
tx_desc++;
- i++;
- if (unlikely(!i)) {
- i -= tx_ring->count;
+ ntc++;
+ if (unlikely(ntc == tx_ring->count)) {
+ ntc = 0;
tx_bi = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
/* issue prefetch for next Tx descriptor */
prefetch(tx_desc);
+ }
- /* update budget accounting */
- budget--;
- } while (likely(budget));
-
- i += tx_ring->count;
- tx_ring->next_to_clean = i;
+ tx_ring->next_to_clean = ntc;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
- xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
- return budget > 0 && xmit_done;
+ return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
+#include <net/xfrm.h>
#include "ixgbevf.h"
first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC
- if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
DMI_MATCH(DMI_BOARD_NAME, "P6T"),
},
},
+ {
+ .ident = "ASUS P6X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+ },
+ },
{}
};
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
- goto rss_err;
+ goto qp_alloc_err;
}
rss_map->indir_qp->event = mlx4_en_sqp_event;
MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
kfree(rss_map->indir_qp);
rss_map->indir_qp = NULL;
rss_err:
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_dev_port(dev, i, &port_cap)) {
mlx4_err(dev,
- "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
+ "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
} else if ((dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_DEFAULT) &&
(port_cap.dmfs_optimized_state ==
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ union {
+ struct {
+ struct mlx5_wqe_eth_seg eth;
+ struct mlx5_wqe_data_seg data[0];
+ };
+ u8 tls_progress_params_ctx[0];
+ };
};
struct mlx5e_rx_wqe_ll {
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash);
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
u8 state;
int err;
- if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- return 0;
-
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) {
netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
sq->sqn, err);
- return err;
+ goto out;
}
- if (state != MLX5_SQC_STATE_ERR) {
- netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
- return -EINVAL;
- }
+ if (state != MLX5_SQC_STATE_ERR)
+ goto out;
mlx5e_tx_disable_queue(sq->txq);
err = mlx5e_wait_for_sq_flush(sq);
if (err)
- return err;
+ goto out;
/* At this point, no new packets will arrive from the stack as TXQ is
* marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
err = mlx5e_sq_to_ready(sq, state);
if (err)
- return err;
+ goto out;
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
mlx5e_activate_txqsq(sq);
return 0;
+out:
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ return err;
}
static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
{
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
+
+ spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
+ spin_unlock(&c->xskicosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
#include "accel/tls.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
- (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params))
+ (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
+ MLX5_ST_SZ_BYTES(tls_static_params))
#define MLX5E_KTLS_STATIC_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_PROGRESS_WQE_SZ \
- (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params))
+ (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
+ MLX5_ST_SZ_BYTES(tls_progress_params))
#define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
- cseg->imm = cpu_to_be32(priv_tx->tisn);
+ cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
static void
fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
- MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn);
+ MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
MLX5_SET(tls_progress_params, ctx, record_tracker_state,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
MLX5_SET(tls_progress_params, ctx, auth_state,
PROGRESS_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
- fill_progress_params_ctx(wqe->data, priv_tx);
+ fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
u16 pi, u8 num_wqebbs,
- skb_frag_t *resync_dump_frag)
+ skb_frag_t *resync_dump_frag,
+ u32 num_bytes)
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
wi->skb = NULL;
wi->num_wqebbs = num_wqebbs;
wi->resync_dump_frag = resync_dump_frag;
+ wi->num_bytes = num_bytes;
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
}
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
}
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
}
+struct mlx5e_dump_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_data_seg data;
+};
+
static int
tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb_frag_t *frag, u32 tisn, bool first)
{
struct mlx5_wqe_ctrl_seg *cseg;
- struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
- struct mlx5e_tx_wqe *wqe;
+ struct mlx5e_dump_wqe *wqe;
dma_addr_t dma_addr = 0;
- u16 ds_cnt, ds_cnt_inl;
u8 num_wqebbs;
- u16 pi, ihs;
+ u16 ds_cnt;
int fsz;
-
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- ds_cnt += 1; /* one frag */
+ u16 pi;
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
cseg = &wqe->ctrl;
- eseg = &wqe->eth;
- dseg = wqe->data;
+ dseg = &wqe->data;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- cseg->imm = cpu_to_be32(tisn);
+ cseg->tisn = cpu_to_be32(tisn << 8);
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
- dseg += ds_cnt_inl;
-
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
DMA_TO_DEVICE);
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
- tx_fill_wi(sq, pi, num_wqebbs, frag);
+ tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
sq->pc += num_wqebbs;
WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- tx_fill_wi(sq, pi, 1, NULL);
+ tx_fill_wi(sq, pi, 1, NULL, 0);
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
}
priv_tx->expected_seq = seq + datalen;
cseg = &(*wqe)->ctrl;
- cseg->imm = cpu_to_be32(priv_tx->tisn);
+ cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
return &arfs_t->rules_hash[bucket_idx];
}
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) ?
- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype)
{
arfs_may_expire_flow(priv);
}
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->dest;
- return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->source;
- return ((struct udphdr *)transport_header)->source;
-}
-
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t,
- const struct sk_buff *skb,
+ const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
struct arfs_rule *rule;
INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple;
- tuple->etype = skb->protocol;
+ tuple->etype = fk->basic.n_proto;
+ tuple->ip_proto = fk->basic.ip_proto;
if (tuple->etype == htons(ETH_P_IP)) {
- tuple->src_ipv4 = ip_hdr(skb)->saddr;
- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ tuple->src_ipv4 = fk->addrs.v4addrs.src;
+ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
} else {
- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
sizeof(struct in6_addr));
- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
sizeof(struct in6_addr));
}
- tuple->ip_proto = arfs_get_ip_proto(skb);
- tuple->src_port = arfs_get_src_port(skb);
- tuple->dst_port = arfs_get_dst_port(skb);
+ tuple->src_port = fk->ports.src;
+ tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
return rule;
}
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
- const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
{
- if (tuple->etype == htons(ETH_P_IP) &&
- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
- return true;
- if (tuple->etype == htons(ETH_P_IPV6) &&
- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
- sizeof(struct in6_addr))) &&
- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
- sizeof(struct in6_addr))))
- return true;
+ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+ return false;
+ if (tuple->etype != fk->basic.n_proto)
+ return false;
+ if (tuple->etype == htons(ETH_P_IP))
+ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+ if (tuple->etype == htons(ETH_P_IPV6))
+ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
return false;
}
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
- const struct sk_buff *skb)
+ const struct flow_keys *fk)
{
struct arfs_rule *arfs_rule;
struct hlist_head *head;
- __be16 src_port = arfs_get_src_port(skb);
- __be16 dst_port = arfs_get_dst_port(skb);
- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
hlist_for_each_entry(arfs_rule, head, hlist) {
- if (arfs_rule->tuple.src_port == src_port &&
- arfs_rule->tuple.dst_port == dst_port &&
- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ if (arfs_cmp(&arfs_rule->tuple, fk))
return arfs_rule;
- }
}
return NULL;
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
+ struct flow_keys fk;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
if (!arfs_t)
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
- arfs_rule = arfs_find_rule(arfs_t, skb);
+ arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock);
}
arfs_rule->rxq = rxq_index;
} else {
- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
- rxq_index, flow_id);
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM;
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
link_modes = link_modes & eproto.cap;
if (!link_modes) {
netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EOPNOTSUPP;
+
if (pauseparam->autoneg)
return -EINVAL;
return 0;
}
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->netdev;
+ const struct firmware *fw;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&fw, flash->data, &dev->dev);
+ if (err)
+ return err;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ err = mlx5_firmware_flash(mdev, fw, NULL);
+ release_firmware(fw);
+
+ rtnl_lock();
+ dev_put(dev);
+ return err;
+}
+
+static int mlx5e_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
+ .flash_device = mlx5e_flash_device,
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
- clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
- u8 *match_level, u8 *tunnel_match_level)
+ u8 *inner_match_level, u8 *outer_match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
+ u8 *match_level;
- *match_level = MLX5_MATCH_NONE;
+ match_level = outer_match_level;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_META) |
}
if (mlx5e_get_tc_tun(filter_dev)) {
- if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
+ if (parse_tunnel_attr(priv, spec, f, filter_dev,
+ outer_match_level))
return -EOPNOTSUPP;
- /* In decap flow, header pointers should point to the inner
+ /* At this point, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
*/
+ match_level = inner_match_level;
headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
spec);
headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
+ u8 inner_match_level, outer_match_level, non_tunnel_match_level;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
struct mlx5_eswitch_rep *rep;
int err;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
+ inner_match_level = MLX5_MATCH_NONE;
+ outer_match_level = MLX5_MATCH_NONE;
+
+ err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
+ &outer_match_level);
+ non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
+ outer_match_level : inner_match_level;
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < match_level)) {
+ esw->offloads.inline_mode < non_tunnel_match_level)) {
NL_SET_ERR_MSG_MOD(extack,
"Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
- match_level, esw->offloads.inline_mode);
+ non_tunnel_match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- flow->esw_attr->match_level = match_level;
- flow->esw_attr->tunnel_match_level = tunnel_match_level;
+ flow->esw_attr->inner_match_level = inner_match_level;
+ flow->esw_attr->outer_match_level = outer_match_level;
} else {
- flow->nic_attr->match_level = match_level;
+ flow->nic_attr->match_level = non_tunnel_match_level;
}
return err;
esw_attr->parse_attr = parse_attr;
esw_attr->chain = f->common.chain_index;
- esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
+ esw_attr->prio = f->common.prio;
esw_attr->in_rep = in_rep;
esw_attr->in_mdev = in_mdev;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
- u8 match_level;
- u8 tunnel_match_level;
+ u8 inner_match_level;
+ u8 outer_match_level;
struct mlx5_fc *counter;
u32 chain;
u16 prio;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
- if (attr->tunnel_match_level != MLX5_MATCH_NONE)
- spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
- if (attr->match_level != MLX5_MATCH_NONE)
- spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- } else if (attr->match_level != MLX5_MATCH_NONE) {
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
- }
+ if (attr->inner_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
- if (attr->match_level != MLX5_MATCH_NONE)
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
data_size = crdump_size - offset;
else
data_size = MLX5_CR_DUMP_CHUNK_SIZE;
- err = devlink_fmsg_binary_put(fmsg, cr_data, data_size);
+ err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
+ data_size);
if (err)
goto free_data;
}
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
+ fatal_error = check_fatal_sensors(dev);
+
+ if (fatal_error && !health->fatal_error) {
+ mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
+ dev->priv.health.fatal_error = fatal_error;
+ print_health_info(dev);
+ mlx5_trigger_health_work(dev);
+ goto out;
+ }
+
count = ioread32be(health->health_counter);
if (count == health->prev)
++health->miss_counter;
if (health->synd && health->synd != prev_synd)
queue_work(health->wq, &health->report_work);
- fatal_error = check_fatal_sensors(dev);
-
- if (fatal_error && !health->fatal_error) {
- mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
- dev->priv.health.fatal_error = fatal_error;
- print_health_info(dev);
- mlx5_trigger_health_work(dev);
- }
-
out:
mod_timer(&health->timer, get_next_poll_jiffies());
}
return mlx5e_ethtool_get_ts_info(priv, info);
}
+static int mlx5i_flash_device(struct net_device *netdev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_1X = 1 << 0,
MLX5_PTYS_WIDTH_2X = 1 << 1,
.get_ethtool_stats = mlx5i_get_ethtool_stats,
.get_ringparam = mlx5i_get_ringparam,
.set_ringparam = mlx5i_set_ringparam,
+ .flash_device = mlx5i_flash_device,
.get_channels = mlx5i_get_channels,
.set_channels = mlx5i_set_channels,
.get_coalesce = mlx5i_get_coalesce,
case 128:
general_obj_key_size =
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
+ key_p += sz_bytes;
break;
case 256:
general_obj_key_size =
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority)
{
- rulei->priority = priority >> 16;
+ rulei->priority = priority;
}
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_ptp_state {
struct mlxsw_sp *mlxsw_sp;
- struct rhashtable unmatched_ht;
+ struct rhltable unmatched_ht;
spinlock_t unmatched_lock; /* protects the HT */
struct delayed_work ht_gc_dw;
u32 gc_cycle;
struct mlxsw_sp1_ptp_unmatched {
struct mlxsw_sp1_ptp_key key;
- struct rhash_head ht_node;
+ struct rhlist_head ht_node;
struct rcu_head rcu;
struct sk_buff *skb;
u64 timestamp;
/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
* error.
*/
-static struct mlxsw_sp1_ptp_unmatched *
+static int
mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb,
int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
struct mlxsw_sp1_ptp_unmatched *unmatched;
- struct mlxsw_sp1_ptp_unmatched *conflict;
+ int err;
unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
if (!unmatched)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
unmatched->key = key;
unmatched->skb = skb;
unmatched->timestamp = timestamp;
unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
- conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht,
- &unmatched->ht_node,
- mlxsw_sp1_ptp_unmatched_ht_params);
- if (conflict)
+ err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ if (err)
kfree(unmatched);
- return conflict;
+ return err;
}
static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp1_ptp_key key)
+ struct mlxsw_sp1_ptp_key key, int *p_length)
{
- return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
- mlxsw_sp1_ptp_unmatched_ht_params);
+ struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
+ struct rhlist_head *tmp, *list;
+ int length = 0;
+
+ list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
+ last = unmatched;
+ length++;
+ }
+
+ *p_length = length;
+ return last;
}
static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
- return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht,
- &unmatched->ht_node,
- mlxsw_sp1_ptp_unmatched_ht_params);
+ return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
+ &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
}
/* This function is called in the following scenarios:
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb, u64 timestamp)
{
- struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict;
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ int length;
int err;
rcu_read_lock();
- unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
-
spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
- if (unmatched) {
- /* There was an unmatched entry when we looked, but it may have
- * been removed before we took the lock.
- */
- err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
- if (err)
- unmatched = NULL;
- }
-
- if (!unmatched) {
- /* We have no unmatched entry, but one may have been added after
- * we looked, but before we took the lock.
- */
- unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
- skb, timestamp);
- if (IS_ERR(unmatched)) {
- if (skb)
- mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
- key.local_port,
- key.ingress, NULL);
- unmatched = NULL;
- } else if (unmatched) {
- /* Save just told us, under lock, that the entry is
- * there, so this has to work.
- */
- err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
- unmatched);
- WARN_ON_ONCE(err);
- }
- }
-
- /* If unmatched is non-NULL here, it comes either from the lookup, or
- * from the save attempt above. In either case the entry was removed
- * from the hash table. If unmatched is NULL, a new unmatched entry was
- * added to the hash table, and there was no conflict.
- */
-
+ unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
if (skb && unmatched && unmatched->timestamp) {
unmatched->skb = skb;
} else if (timestamp && unmatched && unmatched->skb) {
unmatched->timestamp = timestamp;
- } else if (unmatched) {
- /* unmatched holds an older entry of the same type: either an
- * skb if we are handling skb, or a timestamp if we are handling
- * timestamp. We can't match that up, so save what we have.
+ } else {
+ /* Either there is no entry to match, or one that is there is
+ * incompatible.
*/
- conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
- skb, timestamp);
- if (IS_ERR(conflict)) {
- if (skb)
- mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
- key.local_port,
- key.ingress, NULL);
- } else {
- /* Above, we removed an object with this key from the
- * hash table, under lock, so conflict can not be a
- * valid pointer.
- */
- WARN_ON_ONCE(conflict);
- }
+ if (length < 100)
+ err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
+ skb, timestamp);
+ else
+ err = -E2BIG;
+ if (err && skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port,
+ key.ingress, NULL);
+ unmatched = NULL;
+ }
+
+ if (unmatched) {
+ err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
+ WARN_ON_ONCE(err);
}
spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
local_bh_disable();
spin_lock(&ptp_state->unmatched_lock);
- err = rhashtable_remove_fast(&ptp_state->unmatched_ht,
- &unmatched->ht_node,
- mlxsw_sp1_ptp_unmatched_ht_params);
+ err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
spin_unlock(&ptp_state->unmatched_lock);
if (err)
ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
gc_cycle = ptp_state->gc_cycle++;
- rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter);
+ rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
rhashtable_walk_start(&iter);
while ((obj = rhashtable_walk_next(&iter))) {
if (IS_ERR(obj))
spin_lock_init(&ptp_state->unmatched_lock);
- err = rhashtable_init(&ptp_state->unmatched_ht,
- &mlxsw_sp1_ptp_unmatched_ht_params);
+ err = rhltable_init(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_ht_params);
if (err)
goto err_hashtable_init;
err_mtptpt1_set:
mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
err_mtptpt_set:
- rhashtable_destroy(&ptp_state->unmatched_ht);
+ rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
return ERR_PTR(err);
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
- rhashtable_free_and_destroy(&ptp_state->unmatched_ht,
- &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
+ rhltable_free_and_destroy(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
}
break;
case OCELOT_ACL_ACTION_TRAP:
VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x0);
+ VCAP_ACT_SET(MASK_MODE, 0x1);
VCAP_ACT_SET(POLICE_ENA, 0x0);
VCAP_ACT_SET(POLICE_IDX, 0x0);
VCAP_ACT_SET(CPU_QU_NUM, 0x0);
struct ocelot_port *port;
};
-static u16 get_prio(u32 prio)
-{
- /* prio starts from 0x1000 while the ids starts from 0 */
- return prio >> 16;
-}
-
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
struct ocelot_ace_rule *rule)
{
}
finished_key_parsing:
- ocelot_rule->prio = get_prio(f->common.prio);
+ ocelot_rule->prio = f->common.prio;
ocelot_rule->id = f->cookie;
return ocelot_flower_parse_action(f, ocelot_rule);
}
struct ocelot_ace_rule rule;
int ret;
- rule.prio = get_prio(f->common.prio);
+ rule.prio = f->common.prio;
rule.port = port_block->port;
rule.id = f->cookie;
struct ocelot_ace_rule rule;
int ret;
- rule.prio = get_prio(f->common.prio);
+ rule.prio = f->common.prio;
rule.port = port_block->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_stats_update(&rule);
* setup (if available). */
status = myri10ge_request_irq(mgp);
if (status != 0)
- goto abort_with_firmware;
+ goto abort_with_slices;
myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
- printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
- dev_kfree_skb(skb);
- return NETDEV_TX_BUSY;
+ pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
bool clr_gpr, lmem_step step)
{
s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
- bool first = true, last;
+ bool first = true, narrow_ld, last;
bool needs_inc = false;
swreg stack_off_reg;
u8 prev_gpr = 255;
needs_inc = true;
}
+
+ narrow_ld = clr_gpr && size < 8;
+
if (lm3) {
+ unsigned int nop_cnt;
+
emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
- /* For size < 4 one slot will be filled by zeroing of upper. */
- wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+ /* For size < 4 one slot will be filled by zeroing of upper,
+ * but be careful, that zeroing could be eliminated by zext
+ * optimization.
+ */
+ nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
+ wrp_nops(nfp_prog, nop_cnt);
}
- if (clr_gpr && size < 8)
+ if (narrow_ld)
wrp_zext(nfp_prog, meta, gpr);
while (size) {
type = cmsg_hdr->type;
switch (type) {
- case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
- nfp_flower_cmsg_portreify_rx(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
struct nfp_flower_priv *priv = app->priv;
struct sk_buff_head *skb_head;
- if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
- type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
+ if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
skb_head = &priv->cmsg_skbs_high;
else
skb_head = &priv->cmsg_skbs_low;
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
+ /* Handle REIFY acks outside wq to prevent RTNL conflict. */
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ dev_consume_skb_any(skb);
} else {
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
struct nfp_flower_priv *priv = app->priv;
struct flow_block_cb *block_cb;
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
- !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
- nfp_flower_internal_port_can_offload(app, netdev)))
+ if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !nfp_flower_internal_port_can_offload(app, netdev)) ||
+ (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
case FLOW_BLOCK_BIND:
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (cb_priv &&
+ flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
+ cb_priv,
+ &nfp_block_cb_list))
+ return -EBUSY;
+
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
if (!cb_priv)
return -ENOMEM;
return -EOPNOTSUPP;
}
- if (flow->common.prio != (1 << 16)) {
+ if (flow->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
return -EOPNOTSUPP;
}
flow.daddr = *(__be32 *)n->primary_key;
- /* Only concerned with route changes for representors. */
- if (!nfp_netdev_is_nfp_repr(n->dev))
- return NOTIFY_DONE;
-
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
+ if (!nfp_netdev_is_nfp_repr(n->dev) &&
+ !nfp_flower_internal_port_can_offload(app, n->dev))
+ return NOTIFY_DONE;
+
/* Only concerned with changes to routes already added to NFP. */
if (!nfp_tun_has_route(app, flow.daddr))
return NOTIFY_DONE;
struct nv_skb_map *next_tx_ctx;
};
+struct nv_txrx_stats {
+ u64 stat_rx_packets;
+ u64 stat_rx_bytes; /* not always available in HW */
+ u64 stat_rx_missed_errors;
+ u64 stat_rx_dropped;
+ u64 stat_tx_packets; /* not always available in HW */
+ u64 stat_tx_bytes;
+ u64 stat_tx_dropped;
+};
+
+#define nv_txrx_stats_inc(member) \
+ __this_cpu_inc(np->txrx_stats->member)
+#define nv_txrx_stats_add(member, count) \
+ __this_cpu_add(np->txrx_stats->member, (count))
+
/*
* SMP locking:
* All hardware access under netdev_priv(dev)->lock, except the performance
/* RX software stats */
struct u64_stats_sync swstats_rx_syncp;
- u64 stat_rx_packets;
- u64 stat_rx_bytes; /* not always available in HW */
- u64 stat_rx_missed_errors;
- u64 stat_rx_dropped;
+ struct nv_txrx_stats __percpu *txrx_stats;
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
/* TX software stats */
struct u64_stats_sync swstats_tx_syncp;
- u64 stat_tx_packets; /* not always available in HW */
- u64 stat_tx_bytes;
- u64 stat_tx_dropped;
/* msi/msi-x fields */
u32 msi_flags;
}
}
+static void nv_get_stats(int cpu, struct fe_priv *np,
+ struct rtnl_link_stats64 *storage)
+{
+ struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
+ unsigned int syncp_start;
+ u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
+ u64 tx_packets, tx_bytes, tx_dropped;
+
+ do {
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
+ rx_packets = src->stat_rx_packets;
+ rx_bytes = src->stat_rx_bytes;
+ rx_dropped = src->stat_rx_dropped;
+ rx_missed_errors = src->stat_rx_missed_errors;
+ } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
+
+ storage->rx_packets += rx_packets;
+ storage->rx_bytes += rx_bytes;
+ storage->rx_dropped += rx_dropped;
+ storage->rx_missed_errors += rx_missed_errors;
+
+ do {
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
+ tx_packets = src->stat_tx_packets;
+ tx_bytes = src->stat_tx_bytes;
+ tx_dropped = src->stat_tx_dropped;
+ } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+
+ storage->tx_packets += tx_packets;
+ storage->tx_bytes += tx_bytes;
+ storage->tx_dropped += tx_dropped;
+}
+
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
__releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
- unsigned int syncp_start;
+ int cpu;
/*
* Note: because HW stats are not always available and for
*/
/* software stats */
- do {
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
- storage->rx_packets = np->stat_rx_packets;
- storage->rx_bytes = np->stat_rx_bytes;
- storage->rx_dropped = np->stat_rx_dropped;
- storage->rx_missed_errors = np->stat_rx_missed_errors;
- } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
-
- do {
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
- storage->tx_packets = np->stat_tx_packets;
- storage->tx_bytes = np->stat_tx_bytes;
- storage->tx_dropped = np->stat_tx_dropped;
- } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+ for_each_online_cpu(cpu)
+ nv_get_stats(cpu, np, storage);
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_dropped++;
+ nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_dropped++;
+ nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
}
if (nv_release_txskb(np, &np->tx_skb[i])) {
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
}
np->tx_skb[i].dma = 0;
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
+ unsigned int len;
+
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_packets++;
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ nv_txrx_stats_inc(stat_tx_packets);
+ len = np->get_tx_ctx->skb->len;
+ nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
+ unsigned int len;
+
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_packets++;
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ nv_txrx_stats_inc(stat_tx_packets);
+ len = np->get_tx_ctx->skb->len;
+ nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
nv_legacybackoff_reseed(dev);
}
} else {
+ unsigned int len;
+
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_packets++;
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ nv_txrx_stats_inc(stat_tx_packets);
+ len = np->get_tx_ctx->skb->len;
+ nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
}
}
+static void rx_missing_handler(u32 flags, struct fe_priv *np)
+{
+ if (flags & NV_RX_MISSEDFRAME) {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ nv_txrx_stats_inc(stat_rx_missed_errors);
+ u64_stats_update_end(&np->swstats_rx_syncp);
+ }
+}
+
static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
}
/* the rest are hard errors */
else {
- if (flags & NV_RX_MISSEDFRAME) {
- u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_missed_errors++;
- u64_stats_update_end(&np->swstats_rx_syncp);
- }
+ rx_missing_handler(flags, np);
dev_kfree_skb(skb);
goto next_pkt;
}
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_packets++;
- np->stat_rx_bytes += len;
+ nv_txrx_stats_inc(stat_rx_packets);
+ nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
}
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_packets++;
- np->stat_rx_bytes += len;
+ nv_txrx_stats_inc(stat_rx_packets);
+ nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
SET_NETDEV_DEV(dev, &pci_dev->dev);
u64_stats_init(&np->swstats_rx_syncp);
u64_stats_init(&np->swstats_tx_syncp);
+ np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
+ if (!np->txrx_stats) {
+ pr_err("np->txrx_stats, alloc memory error.\n");
+ err = -ENOMEM;
+ goto out_alloc_percpu;
+ }
timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
out_disable:
pci_disable_device(pci_dev);
out_free:
+ free_percpu(np->txrx_stats);
+out_alloc_percpu:
free_netdev(dev);
out:
return err;
static void nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ free_percpu(np->txrx_stats);
unregister_netdev(dev);
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ goto err4;
}
}
return 0;
+err4:
+ qed_ll2_dealloc_if(cdev);
err3:
qed_hw_stop(cdev);
err2:
skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb)
skb_copy_to_linear_data(skb, data, pkt_size);
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
return skb;
}
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet AVB device driver
*
- * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2014-2019 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
kfree(ts_skb);
if (tag == tfa_tag) {
skb_tstamp_tx(skb, &shhwtstamps);
+ dev_consume_skb_any(skb);
break;
+ } else {
+ dev_kfree_skb_any(skb);
}
}
ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
}
goto unmap;
}
- ts_skb->skb = skb;
+ ts_skb->skb = skb_get(skb);
ts_skb->tag = priv->ts_skb_tag++;
priv->ts_skb_tag &= 0x3ff;
list_add_tail(&ts_skb->list, &priv->ts_skb_list);
/* Clear the timestamp list */
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
kfree(ts_skb);
}
printk(KERN_ERR "Sgiseeq: Cannot register net device, "
"aborting.\n");
err = -ENODEV;
- goto err_out_free_page;
+ goto err_out_free_attrs;
}
printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
return 0;
-err_out_free_page:
- free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
err_out_free_dev:
free_netdev(dev);
int ret;
struct device *dev = &bsp_priv->pdev->dev;
- if (!ldo) {
- dev_err(dev, "no regulator found\n");
- return -1;
- }
+ if (!ldo)
+ return 0;
if (enable) {
ret = regulator_enable(ldo);
int ret;
u32 reg, val;
- regmap_field_read(gmac->regmap_field, &val);
+ ret = regmap_field_read(gmac->regmap_field, &val);
+ if (ret) {
+ dev_err(priv->device, "Fail to read from regmap field.\n");
+ return ret;
+ }
+
reg = gmac->variant->default_syscon_value;
if (reg != val)
dev_warn(priv->device,
struct stmmac_tc_entry *entry, *frag = NULL;
struct tc_u32_sel *sel = cls->knode.sel;
u32 off, data, mask, real_off, rem;
- u32 prio = cls->common.prio;
+ u32 prio = cls->common.prio << 16;
int ret;
/* Only 1 match per entry */
if (!cpsw)
return -ENOMEM;
+ platform_set_drvdata(pdev, cpsw);
cpsw->dev = dev;
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
goto clean_cpts;
}
- platform_set_drvdata(pdev, cpsw);
priv = netdev_priv(ndev);
priv->cpsw = cpsw;
priv->ndev = ndev;
pci_unmap_single(lp->pci_dev,
lp->rx_skbs[cur_bd].skb_dma,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
data = skb_put(skb, pkt_len);
static void tsi108_stat_carry(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
u32 carry1, carry2;
- spin_lock_irq(&data->misclock);
+ spin_lock_irqsave(&data->misclock, flags);
carry1 = TSI_READ(TSI108_STAT_CARRY1);
carry2 = TSI_READ(TSI108_STAT_CARRY2);
TSI108_STAT_TXPAUSEDROP_CARRY,
&data->tx_pause_drop);
- spin_unlock_irq(&data->misclock);
+ spin_unlock_irqrestore(&data->misclock, flags);
}
/* Read a stat counter atomically with respect to carries.
sp->dev->stats.rx_bytes += count;
- if ((skb = dev_alloc_skb(count)) == NULL)
+ if ((skb = dev_alloc_skb(count + 1)) == NULL)
goto out_mem;
- ptr = skb_put(skb, count);
+ ptr = skb_put(skb, count + 1);
*ptr++ = cmd; /* KISS command */
memcpy(ptr, sp->cooked_buf + 1, count);
struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+ struct netvsc_device *nvdev;
struct netvsc_vf_pcpu_stats vf_tot;
int i;
+ rcu_read_lock();
+
+ nvdev = rcu_dereference(ndev_ctx->nvdev);
if (!nvdev)
- return;
+ goto out;
netdev_stats_to_stats64(t, &net->stats);
t->rx_packets += packets;
t->multicast += multicast;
}
+out:
+ rcu_read_unlock();
}
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
err = hwsim_subscribe_all_others(phy);
if (err < 0) {
mutex_unlock(&hwsim_phys_lock);
- goto err_reg;
+ goto err_subscribe;
}
}
list_add_tail(&phy->list, &hwsim_phys);
return idx;
+err_subscribe:
+ ieee802154_unregister_hw(phy->hw);
err_reg:
kfree(pib);
err_pib:
return 0;
platform_drv:
- genl_unregister_family(&hwsim_genl_family);
-platform_dev:
platform_device_unregister(mac802154hwsim_dev);
+platform_dev:
+ genl_unregister_family(&hwsim_genl_family);
return rc;
}
debugfs_remove_recursive(nsim_dev_port->ddir);
}
+static struct net *nsim_devlink_net(struct devlink *devlink)
+{
+ return &init_net;
+}
+
static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
}
static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB_RULES, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
}
static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
}
static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB_RULES, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
static int nsim_dev_resources_register(struct devlink *devlink)
{
- struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct devlink_resource_size_params params = {
.size_max = (u64)-1,
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY
};
+ struct net *net = nsim_devlink_net(devlink);
int err;
u64 n;
goto out;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV4_FIB,
NSIM_RESOURCE_IPV4, ¶ms);
return err;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB_RULES, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV4, ¶ms);
goto out;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6, ¶ms);
return err;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB_RULES, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_IPV6, ¶ms);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV4_FIB,
nsim_dev_ipv4_fib_resource_occ_get,
- nsim_dev);
+ net);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV4_FIB_RULES,
nsim_dev_ipv4_fib_rules_res_occ_get,
- nsim_dev);
+ net);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV6_FIB,
nsim_dev_ipv6_fib_resource_occ_get,
- nsim_dev);
+ net);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV6_FIB_RULES,
nsim_dev_ipv6_fib_rules_res_occ_get,
- nsim_dev);
+ net);
out:
return err;
}
static int nsim_dev_reload(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
- struct nsim_dev *nsim_dev = devlink_priv(devlink);
enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
};
+ struct net *net = nsim_devlink_net(devlink);
int i;
for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
err = devlink_resource_size_get(devlink, res_ids[i], &val);
if (!err) {
- err = nsim_fib_set_max(nsim_dev->fib_data,
- res_ids[i], val, extack);
+ err = nsim_fib_set_max(net, res_ids[i], val, extack);
if (err)
return err;
}
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
- nsim_dev->fib_data = nsim_fib_create();
- if (IS_ERR(nsim_dev->fib_data)) {
- err = PTR_ERR(nsim_dev->fib_data);
- goto err_devlink_free;
- }
-
err = nsim_dev_resources_register(devlink);
if (err)
- goto err_fib_destroy;
+ goto err_devlink_free;
err = devlink_register(devlink, &nsim_bus_dev->dev);
if (err)
devlink_unregister(devlink);
err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
-err_fib_destroy:
- nsim_fib_destroy(nsim_dev->fib_data);
err_devlink_free:
devlink_free(devlink);
return ERR_PTR(err);
nsim_dev_debugfs_exit(nsim_dev);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
- nsim_fib_destroy(nsim_dev->fib_data);
mutex_destroy(&nsim_dev->port_list_lock);
devlink_free(devlink);
}
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
+#include <net/netns/generic.h>
#include "netdevsim.h"
};
struct nsim_fib_data {
- struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, bool max)
+static unsigned int nsim_fib_net_id;
+
+u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
{
+ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, u64 val,
+int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack)
{
+ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
int err = 0;
return err;
}
-static int nsim_fib_rule_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
{
+ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
return err;
}
-static int nsim_fib_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct fib_notifier_info *info, bool add)
{
+ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
- fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(data, info,
- event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(data, info,
- event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
break;
}
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
- fib_nb);
+ struct nsim_fib_data *data;
+ struct net *net;
+
+ rcu_read_lock();
+ for_each_net_rcu(net) {
+ data = net_generic(net, nsim_fib_net_id);
+
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+ }
+ rcu_read_unlock();
}
-struct nsim_fib_data *nsim_fib_create(void)
-{
- struct nsim_fib_data *data;
- int err;
+static struct notifier_block nsim_fib_nb = {
+ .notifier_call = nsim_fib_event_nb,
+};
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
+/* Initialize per network namespace state */
+static int __net_init nsim_fib_netns_init(struct net *net)
+{
+ struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
data->ipv4.fib.max = (u64)-1;
data->ipv4.rules.max = (u64)-1;
data->ipv6.fib.max = (u64)-1;
data->ipv6.rules.max = (u64)-1;
- data->fib_nb.notifier_call = nsim_fib_event_nb;
- err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent);
- if (err) {
- pr_err("Failed to register fib notifier\n");
- goto err_out;
- }
+ return 0;
+}
- return data;
+static struct pernet_operations nsim_fib_net_ops = {
+ .init = nsim_fib_netns_init,
+ .id = &nsim_fib_net_id,
+ .size = sizeof(struct nsim_fib_data),
+};
-err_out:
- kfree(data);
- return ERR_PTR(err);
+void nsim_fib_exit(void)
+{
+ unregister_pernet_subsys(&nsim_fib_net_ops);
+ unregister_fib_notifier(&nsim_fib_nb);
}
-void nsim_fib_destroy(struct nsim_fib_data *data)
+int nsim_fib_init(void)
{
- unregister_fib_notifier(&data->fib_nb);
- kfree(data);
+ int err;
+
+ err = register_pernet_subsys(&nsim_fib_net_ops);
+ if (err < 0) {
+ pr_err("Failed to register pernet subsystem\n");
+ goto err_out;
+ }
+
+ err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
+ if (err < 0) {
+ pr_err("Failed to register fib notifier\n");
+ goto err_out;
+ }
+
+err_out:
+ return err;
}
if (err)
goto err_dev_exit;
- err = rtnl_link_register(&nsim_link_ops);
+ err = nsim_fib_init();
if (err)
goto err_bus_exit;
+ err = rtnl_link_register(&nsim_link_ops);
+ if (err)
+ goto err_fib_exit;
+
return 0;
+err_fib_exit:
+ nsim_fib_exit();
err_bus_exit:
nsim_bus_exit();
err_dev_exit:
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
+ nsim_fib_exit();
nsim_bus_exit();
nsim_dev_exit();
}
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
unsigned int port_index);
-struct nsim_fib_data *nsim_fib_create(void);
-void nsim_fib_destroy(struct nsim_fib_data *fib_data);
-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, u64 val,
+int nsim_fib_init(void);
+void nsim_fib_exit(void);
+u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
+int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack);
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
* value before reset.
- *
- * So let's first disable the RX and TX delays in PHY and enable
- * them based on the mode selected (this also takes care of RGMII
- * mode where we expect delays to be disabled)
*/
-
- ret = at803x_disable_rx_delay(phydev);
- if (ret < 0)
- return ret;
- ret = at803x_disable_tx_delay(phydev);
- if (ret < 0)
- return ret;
-
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
- * otherwise keep it disabled
- */
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
ret = at803x_enable_rx_delay(phydev);
- if (ret < 0)
- return ret;
- }
+ else
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
- * otherwise keep it disabled
- */
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
ret = at803x_enable_tx_delay(phydev);
- }
+ else
+ ret = at803x_disable_tx_delay(phydev);
return ret;
}
int val, devad;
bool link = true;
+ if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ /* Autoneg is being started, therefore disregard current
+ * link status and report link as down.
+ */
+ if (val & MDIO_AN_CTRL1_RESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+ }
+
while (mmd_mask && link) {
devad = __ffs(mmd_mask);
mmd_mask &= ~BIT(devad);
}
EXPORT_SYMBOL_GPL(genphy_c45_read_status);
+/**
+ * genphy_c45_config_aneg - restart auto-negotiation or forced setup
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we force a configuration.
+ */
+int genphy_c45_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_pma_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
+
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev)
* allowed to call genphy_config_aneg()
*/
if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
- return -EOPNOTSUPP;
+ return genphy_c45_config_aneg(phydev);
return genphy_config_aneg(phydev);
}
*/
int genphy_update_link(struct phy_device *phydev)
{
- int status;
+ int status = 0, bmcr;
+
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ /* Autoneg is being started, therefore disregard BMSR value and
+ * report link as down.
+ */
+ if (bmcr & BMCR_ANRESTART)
+ goto done;
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
* Local device Link partner
* Pause AsymDir Pause AsymDir Result
* 1 X 1 X TX+RX
- * 0 1 1 1 RX
- * 1 1 0 1 TX
+ * 0 1 1 1 TX
+ * 1 1 0 1 RX
*/
static void phylink_resolve_flow(struct phylink *pl,
struct phylink_link_state *state)
new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
else if (pause & MLO_PAUSE_ASYM)
new_pause = state->pause & MLO_PAUSE_SYM ?
- MLO_PAUSE_RX : MLO_PAUSE_TX;
+ MLO_PAUSE_TX : MLO_PAUSE_RX;
} else {
new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
}
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_GSO_UDP_L4;
team->dev->hard_header_len = max_hard_header_len;
}
static int tun_attach(struct tun_struct *tun, struct file *file,
- bool skip_filter, bool napi, bool napi_frags)
+ bool skip_filter, bool napi, bool napi_frags,
+ bool publish_tun)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
* initialized tfile; otherwise we risk using half-initialized
* object.
*/
- rcu_assign_pointer(tfile->tun, tun);
+ if (publish_tun)
+ rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
tun_set_real_num_queues(tun);
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS);
+ ifr->ifr_flags & IFF_NAPI_FRAGS, true);
if (err < 0)
return err;
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS);
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
+ /* free_netdev() won't check refcnt, to aovid race
+ * with dev_put() we need publish tun after registration.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
}
netif_carrier_on(tun->dev);
if (ret < 0)
goto unlock;
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
- tun->flags & IFF_NAPI_FRAGS);
+ tun->flags & IFF_NAPI_FRAGS, true);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
goto bad_desc;
}
skip:
- if (rndis && header.usb_cdc_acm_descriptor &&
+ /* Communcation class functions with bmCapabilities are not
+ * RNDIS. But some Wireless class RNDIS functions use
+ * bmCapabilities for their own purpose. The failsafe is
+ * therefore applied only to Communication class RNDIS
+ * functions. The rndis test is redundant, but a cheap
+ * optimization.
+ */
+ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+ header.usb_cdc_acm_descriptor &&
header.usb_cdc_acm_descriptor->bmCapabilities) {
dev_dbg(&intf->dev,
"ACM capabilities %02x, not really RNDIS?\n",
}
if (!timeout) {
dev_err(&udev->dev, "firmware not ready in time\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto err;
}
/* enable ethernet mode (?) */
status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
usb_buf, 24);
if (status != 0)
- return status;
+ goto out;
memcpy(usb_buf, init_msg_2, 12);
status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
usb_buf, 28);
if (status != 0)
- return status;
+ goto out;
memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
-
+out:
kfree(usb_buf);
return status;
}
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out3;
+ goto out4;
}
usb_set_intfdata(intf, dev);
ret = lan78xx_phy_init(dev);
if (ret < 0)
- goto out4;
+ goto out5;
return 0;
-out4:
+out5:
unregister_netdev(netdev);
+out4:
+ usb_free_urb(dev->urb_intr);
out3:
lan78xx_unbind(dev, intf);
out2:
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
+ if (ret < 0)
+ memset(data, 0xff, size);
+ else
+ memcpy(data, tmp, size);
- memcpy(data, tmp, size);
kfree(tmp);
return ret;
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
- if (!test_bit(RTL8152_UNPLUG, &tp->flags))
- napi_disable(&tp->napi);
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
return 0;
out1:
- netif_napi_del(&tp->napi);
usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
if (tp) {
rtl_set_unplug(tp);
- netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
}
}
- if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
+ if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
- lmc_trace(dev, "lmc_runnin_reset_out");
+ lmc_trace(dev, "lmc_running_reset_out");
}
}
result = i2400m_barker_db_add(barker);
if (result < 0)
- goto error_add;
+ goto error_parse_add;
}
kfree(options_orig);
}
return 0;
+error_parse_add:
error_parse:
+ kfree(options_orig);
error_add:
kfree(i2400m_barker_db);
return result;
"%d\n", result);
result = 0;
error_cmd:
+ kfree(cmd);
kfree_skb(ack_skb);
error_msg_to_dev:
error_alloc:
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
+extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
cpu_to_le32(vif->bss_conf.use_short_slot ?
MAC_FLG_SHORT_SLOT : 0);
- cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+ cmd->filter_flags = 0;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
/* We need the dtim_period to set the MAC as associated */
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 ap_sta_id = mvmvif->ap_sta_id;
u32 dtim_offs;
/*
dtim_offs);
ctxt_sta->is_assoc = cpu_to_le32(1);
+
+ /*
+ * allow multicast data frames only as long as the station is
+ * authorized, i.e., GTK keys are already installed (if needed)
+ */
+ if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+ if (!IS_ERR_OR_NULL(sta)) {
+ struct iwl_mvm_sta *mvmsta =
+ iwl_mvm_sta_from_mac80211(sta);
+
+ if (mvmsta->sta_state ==
+ IEEE80211_STA_AUTHORIZED)
+ cmd.filter_flags |=
+ cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+ }
+
+ rcu_read_unlock();
+ }
} else {
ctxt_sta->is_assoc = cpu_to_le32(0);
MAC_FILTER_IN_CONTROL_AND_MGMT |
MAC_FILTER_IN_BEACON |
MAC_FILTER_IN_PROBE_REQUEST |
- MAC_FILTER_IN_CRC32);
+ MAC_FILTER_IN_CRC32 |
+ MAC_FILTER_ACCEPT_GRP);
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
/* Allocate sniffer station */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
- MAC_FILTER_IN_PROBE_REQUEST);
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_ACCEPT_GRP);
/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ /*
+ * Now that the station is authorized, i.e., keys were already
+ * installed, need to indicate to the FW that
+ * multicast data frames can be forwarded to the driver
+ */
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
+ /* Multicast data frames are no longer allowed */
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
/* disable beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
WARN_ON(ret &&
iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
+
+ /* same thing for QuZ... */
+ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
+ else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+ }
+
#endif
pci_set_drvdata(pdev, iwl_trans);
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- ((trans->cfg != &iwl_ax200_cfg_cc &&
- trans->cfg != &killer1650x_2ax_cfg &&
- trans->cfg != &killer1650w_2ax_cfg &&
- trans->cfg != &iwl_ax201_cfg_quz_hr) ||
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
u16 len = byte_cnt;
__le16 bc_ent;
- if (trans_pcie->bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ /* Starting from 22560, the HW expects bytes */
+ WARN_ON(trans_pcie->bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
- else
+ } else {
+ /* Until 22560, the HW expects DW */
+ WARN_ON(!trans_pcie->bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
}
/*
}
vs_ie = (struct ieee_types_header *)vendor_ie;
+ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+ IEEE_MAX_IE_SIZE)
+ return -EINVAL;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+ return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
- if (rate_ie)
+ if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+ return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+ }
return;
}
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
+ if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+ return;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
}
+ if (is_mt7630(dev)) {
+ dev->mt76.cap.has_5ghz = false;
+ dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
+ }
+
if (!mt76x02_field_valid(nic_conf1 & 0xff))
nic_conf1 &= 0xff00;
mt76x0e_stop_hw(dev);
}
+static int
+mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (is_mt7630(dev))
+ return -EOPNOTSUPP;
+
+ return mt76x02_set_key(hw, cmd, vif, sta, key);
+}
+
static void
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
- .set_key = mt76x02_set_key,
+ .set_key = mt76x0e_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,
.release_buffered_frames = mt76_release_buffered_frames,
};
-static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
{
int err;
- mt76x0_chip_onoff(dev, true, true);
+ mt76x0_chip_onoff(dev, true, reset);
if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
if (err < 0)
goto out_err;
- err = mt76x0u_init_hardware(dev);
+ err = mt76x0u_init_hardware(dev, true);
if (err < 0)
goto out_err;
if (ret < 0)
goto err;
- ret = mt76x0u_init_hardware(dev);
+ ret = mt76x0u_init_hardware(dev, false);
if (ret)
goto err;
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
- rt2800_register_multiread(rt2x00dev, offset,
- &iveiv_entry, sizeof(iveiv_entry));
- if ((crypto->cipher == CIPHER_TKIP) ||
- (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
- (crypto->cipher == CIPHER_AES))
- iveiv_entry.iv[3] |= 0x20;
- iveiv_entry.iv[3] |= key->keyidx << 6;
+ if (crypto->cmd == SET_KEY) {
+ rt2800_register_multiread(rt2x00dev, offset,
+ &iveiv_entry, sizeof(iveiv_entry));
+ if ((crypto->cipher == CIPHER_TKIP) ||
+ (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+ (crypto->cipher == CIPHER_AES))
+ iveiv_entry.iv[3] |= 0x20;
+ iveiv_entry.iv[3] |= key->keyidx << 6;
+ } else {
+ memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ }
+
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
/* Turn on tertiary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
- rf->channel > 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
- rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
/* fall-through */
case 2:
/* Turn on secondary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
- rf->channel > 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
- rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
/* fall-through */
case 1:
/* Turn on primary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
- rf->channel > 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
- rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
break;
}
rt2800_delete_wcid_attr(rt2x00dev, i);
}
+ /*
+ * Clear encryption initialization vectors on start, but keep them
+ * for watchdog reset. Otherwise we will have wrong IVs and not be
+ * able to keep connections after reset.
+ */
+ if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
+ for (i = 0; i < 256; i++)
+ rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
+
/*
* Clear all beacons
*/
DEVICE_STATE_ENABLED_RADIO,
DEVICE_STATE_SCANNING,
DEVICE_STATE_FLUSHING,
+ DEVICE_STATE_RESET,
/*
* Driver configuration
int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
{
- int retval;
+ int retval = 0;
if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
/*
* This is special case for ieee80211_restart_hw(), otherwise
* mac80211 never call start() two times in row without stop();
*/
+ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
rt2x00lib_stop(rt2x00dev);
}
*/
retval = rt2x00lib_load_firmware(rt2x00dev);
if (retval)
- return retval;
+ goto out;
/*
* Initialize the device.
*/
retval = rt2x00lib_initialize(rt2x00dev);
if (retval)
- return retval;
+ goto out;
rt2x00dev->intf_ap_count = 0;
rt2x00dev->intf_sta_count = 0;
/* Enable the radio */
retval = rt2x00lib_enable_radio(rt2x00dev);
if (retval)
- return retval;
+ goto out;
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
- return 0;
+out:
+ clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+ return retval;
}
void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
kfree(rsi_dev->tx_buffer);
fail_eps:
- kfree(rsi_dev);
return status;
}
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
kfree_skb(nskb);
break;
&echo_response);
if (result) {
dev_err(&st95context->spicontext.spidev->dev,
- "err: echo response receieve error = 0x%x\n", result);
+ "err: echo response receive error = 0x%x\n", result);
return result;
}
resource_size_t start, size;
struct nd_region *nd_region;
unsigned long npfns, align;
+ u32 end_trunc;
struct nd_pfn_sb *pfn_sb;
phys_addr_t offset;
const char *sig;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+ end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* The altmap should be padded out to the block size used
return -ENXIO;
}
- npfns = PHYS_PFN(size - offset);
+ npfns = PHYS_PFN(size - offset - end_trunc);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
pfn_sb->version_minor = cpu_to_le16(3);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
nvme_update_formats(ctrl);
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ revalidate_disk(ns->head->disk);
}
#endif
}
.vid = 0x1179,
.mn = "THNSF5256GPUK TOSHIBA",
.quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * This LiteON CL1-3D*-Q11 firmware version has a race
+ * condition associated with actions related to suspend to idle
+ * LiteON has resolved the problem in future firmware
+ */
+ .vid = 0x14a4,
+ .fr = "22301111",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
if (ret) {
dev_err(ctrl->device,
"failed to register subsystem device.\n");
+ put_device(&subsys->dev);
goto out_unlock;
}
ida_init(&subsys->ns_ida);
nvme_put_subsystem(subsys);
out_unlock:
mutex_unlock(&nvme_subsystems_lock);
- put_device(&subsys->dev);
return ret;
}
goto out_free;
}
+ if (!(ctrl->ops->flags & NVME_F_FABRICS))
+ ctrl->cntlid = le16_to_cpu(id->cntlid);
+
if (!ctrl->identified) {
int i;
goto out_free;
}
} else {
- ctrl->cntlid = le16_to_cpu(id->cntlid);
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
ctrl->hmminds = le32_to_cpu(id->hmminds);
struct nvme_ns *ns, *next;
LIST_HEAD(ns_list);
+ /*
+ * make sure to requeue I/O to all namespaces as these
+ * might result from the scan itself and must complete
+ * for the scan_work to make progress
+ */
+ nvme_mpath_clear_ctrl_paths(ctrl);
+
/* prevent racing with ns scanning */
flush_work(&ctrl->scan_work);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_unfreeze_queue(h->disk->queue);
+}
+
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_freeze_queue_wait(h->disk->queue);
+}
+
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_freeze_queue_start(h->disk->queue);
+}
+
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
[NVME_ANA_CHANGE] = "change",
};
-void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
+ bool changed = false;
int node;
if (!head)
- return;
+ goto out;
for_each_node(node) {
- if (ns == rcu_access_pointer(head->current_path[node]))
+ if (ns == rcu_access_pointer(head->current_path[node])) {
rcu_assign_pointer(head->current_path[node], NULL);
+ changed = true;
+ }
}
+out:
+ return changed;
+}
+
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->scan_lock);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ if (nvme_mpath_clear_current_path(ns))
+ kblockd_schedule_work(&ns->head->requeue_work);
+ mutex_unlock(&ctrl->scan_lock);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
return ns;
}
+static bool nvme_available_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ switch (ns->ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ /* fallthru */
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
struct bio *bio)
{
disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
ret = direct_make_request(bio);
- } else if (!list_empty_careful(&head->list)) {
- dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
+ } else if (nvme_available_path(head)) {
+ dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio);
spin_unlock_irq(&head->requeue_lock);
} else {
- dev_warn_ratelimited(dev, "no path - failing I/O\n");
+ dev_warn_ratelimited(dev, "no available path - failing I/O\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
srcu_read_unlock(&head->srcu, srcu_idx);
}
+ synchronize_srcu(&ns->head->srcu);
kblockd_schedule_work(&ns->head->requeue_work);
}
* Broken Write Zeroes.
*/
NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
+
+ /*
+ * Force simple suspend/resume path.
+ */
+ NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
};
/*
return ctrl->ana_log_buf != NULL;
}
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
-void nvme_mpath_clear_current_path(struct nvme_ns *ns);
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ return false;
+}
+static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
{
}
+static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
{
struct nvme_dev *dev = data;
- nvme_reset_ctrl_sync(&dev->ctrl);
+ flush_work(&dev->ctrl.reset_work);
flush_work(&dev->ctrl.scan_work);
nvme_put_ctrl(&dev->ctrl);
}
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+ nvme_reset_ctrl(&dev->ctrl);
nvme_get_ctrl(&dev->ctrl);
async_schedule(nvme_async_probe, dev);
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
struct nvme_ctrl *ctrl = &ndev->ctrl;
- if (pm_resume_via_firmware() || !ctrl->npss ||
+ if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
nvme_reset_ctrl(ctrl);
return 0;
struct nvme_ctrl *ctrl = &ndev->ctrl;
int ret = -EBUSY;
+ ndev->last_ps = U32_MAX;
+
/*
* The platform does not remove power for a kernel managed suspend so
* use host managed nvme power settings for lowest idle power if
* shutdown. But if the firmware is involved after the suspend or the
* device does not support any non-default power states, shut down the
* device fully.
+ *
+ * If ASPM is not enabled for the device, shut down the device and allow
+ * the PCI bus layer to put it into D3 in order to take the PCIe link
+ * down, so as to allow the platform to achieve its minimum low-power
+ * state (which may not be possible if the link is up).
*/
- if (pm_suspend_via_firmware() || !ctrl->npss) {
+ if (pm_suspend_via_firmware() || !ctrl->npss ||
+ !pcie_aspm_enabled(pdev) ||
+ (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
nvme_dev_disable(ndev, true);
return 0;
}
ctrl->state != NVME_CTRL_ADMIN_ONLY)
goto unfreeze;
- ndev->last_ps = 0;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
if (ret < 0)
goto unfreeze;
return ret;
}
+static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
return;
-
- rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->qp);
+ __nvme_rdma_stop_queue(queue);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
- if (!ret)
+ if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
- else
+ } else {
+ __nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
+ }
return ret;
}
found:
list_del(&p->entry);
+ nvmet_port_del_ctrls(port, subsys);
nvmet_port_disc_changed(port, subsys);
if (list_empty(&port->subsystems))
u16 status;
switch (errno) {
+ case 0:
+ status = NVME_SC_SUCCESS;
+ break;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
}
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->port == port)
+ ctrl->ops->delete_ctrl(ctrl);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
int nvmet_enable_port(struct nvmet_port *port)
{
const struct nvmet_fabrics_ops *ops;
mutex_lock(&nvme_loop_ports_mutex);
list_del_init(&port->entry);
mutex_unlock(&nvme_loop_ports_mutex);
+
+ /*
+ * Ensure any ctrls that are in the process of being
+ * deleted are in fact deleted before we return
+ * and free the port. This is to prevent active
+ * ctrls from using a port after it's freed.
+ */
+ flush_workqueue(nvme_delete_wq);
}
static const struct nvmet_fabrics_ops nvme_loop_ops = {
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+void nvmet_port_del_ctrls(struct nvmet_port *port,
+ struct nvmet_subsys *subsys);
+
int nvmet_enable_port(struct nvmet_port *port);
void nvmet_disable_port(struct nvmet_port *port);
* of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
- * @out_irq: structure of_irq filled by this function
+ * @out_irq: structure of_phandle_args filled by this function
*
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
for_each_child_of_node(local_fixups, child) {
for_each_child_of_node(overlay, overlay_child)
- if (!node_name_cmp(child, overlay_child))
+ if (!node_name_cmp(child, overlay_child)) {
+ of_node_put(overlay_child);
break;
+ }
- if (!overlay_child)
+ if (!overlay_child) {
+ of_node_put(child);
return -EINVAL;
+ }
err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
NULL, 0644);
+/**
+ * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
+ * @pdev: Target device.
+ */
+bool pcie_aspm_enabled(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+ bool ret;
+
+ if (!bridge)
+ return false;
+
+ mutex_lock(&aspm_lock);
+ ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
+ mutex_unlock(&aspm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
+
#ifdef CONFIG_PCIEASPM_DEBUG
static ssize_t link_state_show(struct device *dev,
struct device_attribute *attr,
*/
if (ioread32(map + 0x2240c) & 0x2) {
pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
- ret = pci_reset_function(pdev);
+ ret = pci_reset_bus(pdev);
if (ret < 0)
pci_err(pdev, "Failed to reset GPU: %d\n", ret);
}
if (IS_ERR(map))
return map;
} else
- map = ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENODEV);
ctx->maps[ASPEED_IP_LPC] = map;
dev_dbg(ctx->dev, "Acquired LPC regmap");
return ERR_PTR(-EINVAL);
}
+static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
+ const struct aspeed_sig_expr *expr,
+ bool enabled)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < expr->ndescs; i++) {
+ const struct aspeed_sig_desc *desc = &expr->descs[i];
+ struct regmap *map;
+
+ map = aspeed_g5_acquire_regmap(ctx, desc->ip);
+ if (IS_ERR(map)) {
+ dev_err(ctx->dev,
+ "Failed to acquire regmap for IP block %d\n",
+ desc->ip);
+ return PTR_ERR(map);
+ }
+
+ ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return 1;
+}
+
/**
* Configure a pin's signal by applying an expression's descriptor state for
* all descriptors in the expression.
}
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
+ .eval = aspeed_g5_sig_expr_eval,
.set = aspeed_g5_sig_expr_set,
};
* neither the enabled nor disabled state. Thus we must explicitly test for
* either condition as required.
*/
-int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
+int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr, bool enabled)
{
- int i;
int ret;
+ int i;
+
+ if (ctx->ops->eval)
+ return ctx->ops->eval(ctx, expr, enabled);
for (i = 0; i < expr->ndescs; i++) {
const struct aspeed_sig_desc *desc = &expr->descs[i];
struct aspeed_pinmux_data;
struct aspeed_pinmux_ops {
+ int (*eval)(struct aspeed_pinmux_data *ctx,
+ const struct aspeed_sig_expr *expr, bool enabled);
int (*set)(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr, bool enabled);
};
int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
struct regmap *map);
-int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
- const struct aspeed_sig_expr *expr,
- bool enabled);
+int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
+ const struct aspeed_sig_expr *expr, bool enabled);
static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr,
*/
static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
*/
static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
USB_CH_IP_CUR_LVL_1P5;
break;
}
+ /* Else, fall through */
case USB_STAT_HM_IDGND:
dev_err(di->dev, "USB Type - Charging not allowed\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RAS) += ras.o debugfs.o
+obj-$(CONFIG_RAS) += ras.o
+obj-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_RAS_CEC) += cec.o
*/
#include <linux/mm.h>
#include <linux/gfp.h>
+#include <linux/ras.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/debugfs.h>
+#include <linux/ras.h>
+#include "debugfs.h"
struct dentry *ras_debugfs_dir;
reg = ACT8945A_DCDC3_CTRL;
break;
case ACT8945A_ID_LDO1:
- reg = ACT8945A_LDO1_SUS;
+ reg = ACT8945A_LDO1_CTRL;
break;
case ACT8945A_ID_LDO2:
- reg = ACT8945A_LDO2_SUS;
+ reg = ACT8945A_LDO2_CTRL;
break;
case ACT8945A_ID_LDO3:
- reg = ACT8945A_LDO3_SUS;
+ reg = ACT8945A_LDO3_CTRL;
break;
case ACT8945A_ID_LDO4:
- reg = ACT8945A_LDO4_SUS;
+ reg = ACT8945A_LDO4_CTRL;
break;
default:
return -EINVAL;
ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
"enable-gpios", 0,
gflags, "gpio-en-ldo");
- if (ena_gpiod) {
+ if (!IS_ERR(ena_gpiod)) {
config->ena_gpiod = ena_gpiod;
devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
}
GPIOD_OUT_HIGH
| GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"slg51000-cs");
- if (cs_gpiod) {
+ if (!IS_ERR(cs_gpiod)) {
dev_info(dev, "Found chip selector property\n");
chip->cs_gpiod = cs_gpiod;
}
2500, 2750,
};
+/* 600mV to 1450mV in 12.5 mV steps */
+static const struct regulator_linear_range VDD1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
+};
+
+/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
+static const struct regulator_linear_range VDD2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
+ REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
+};
+
static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
}
static const struct regulator_ops twl4030smps_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+
.set_voltage = twl4030smps_set_voltage,
.get_voltage = twl4030smps_get_voltage,
};
}, \
}
-#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
+ n_volt) \
static const struct twlreg_info TWL4030_INFO_##label = { \
.base = offset, \
.id = num, \
.owner = THIS_MODULE, \
.enable_time = turnon_delay, \
.of_map_mode = twl4030reg_map_mode, \
+ .n_voltages = n_volt, \
+ .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
+ .linear_ranges = label ## _ranges, \
}, \
}
TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
-TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
-TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
+TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
+TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
/* VUSBCP is managed *only* by the USB subchip */
TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
struct qeth_reply {
struct list_head list;
struct completion received;
+ spinlock_t lock;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
if (reply) {
refcount_set(&reply->refcnt, 1);
init_completion(&reply->received);
+ spin_lock_init(&reply->lock);
}
return reply;
}
if (!reply->callback) {
rc = 0;
+ goto no_callback;
+ }
+
+ spin_lock_irqsave(&reply->lock, flags);
+ if (reply->rc) {
+ /* Bail out when the requestor has already left: */
+ rc = reply->rc;
} else {
if (cmd) {
reply->offset = (u16)((char *)cmd - (char *)iob->data);
rc = reply->callback(card, reply, (unsigned long)iob);
}
}
+ spin_unlock_irqrestore(&reply->lock, flags);
+no_callback:
if (rc <= 0)
qeth_notify_reply(reply, rc);
qeth_put_reply(reply);
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_reply(card, reply);
+
+ if (reply_cb) {
+ /* Wait until the callback for a late reply has completed: */
+ spin_lock_irq(&reply->lock);
+ if (rc)
+ /* Zap any callback that's still pending: */
+ reply->rc = rc;
+ spin_unlock_irq(&reply->lock);
+ }
+
if (!rc)
rc = reply->rc;
qeth_put_reply(reply);
get_user(req_len, &ureq->hdr.req_len))
return -EFAULT;
+ /* Sanitize user input, to avoid overflows in iob size calculation: */
+ if (req_len > QETH_BUFSIZE)
+ return -EINVAL;
+
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
if (!iob)
return -ENOMEM;
pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
/* Fall through */
#endif
+ /* Fall through - only for the #else condition above. */
default:
error = -ENXIO;
pr_err("unhandled device %d\n", dev->dev_type);
uint32_t cfg_cq_poll_threshold;
uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
+ uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
uint32_t cfg_suppress_rsp;
LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
+/*
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
+ * the driver will advertise it supports to the SCSI layer.
+ *
+ * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
+ * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ *
+ * Value range is [0,256]. Default value is 8.
+ */
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
+ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
+ "Set the number of SCSI Queues advertised");
+
/*
* lpfc_hdw_queue: Set the number of Hardware Queues the driver
* will advertise it supports to the NVME and SCSI layers. This also
&dev_attr_lpfc_cq_poll_threshold,
&dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
+ &dev_attr_lpfc_fcp_mq_threshold,
&dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- shost->nr_hw_queues = phba->cfg_hdw_queue;
- else
- shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+ if (!phba->cfg_fcp_mq_threshold ||
+ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
+ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
+
+ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
+ phba->cfg_fcp_mq_threshold);
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
/* This loop sets up all CPUs that are affinitized with a
* irq vector assigned to the driver. All affinitized CPUs
* will get a link to that vectors IRQ and EQ.
+ *
+ * NULL affinity mask handling:
+ * If irq count is greater than one, log an error message.
+ * If the null mask is received for the first irq, find the
+ * first present cpu, and assign the eq index to ensure at
+ * least one EQ is assigned.
*/
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
/* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp)
- continue;
+ if (!maskp) {
+ if (phba->cfg_irq_chann > 1)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3329 No affinity mask found "
+ "for vector %d (%d)\n",
+ idx, phba->cfg_irq_chann);
+ if (!idx) {
+ cpu = cpumask_first(cpu_present_mask);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->eq = idx;
+ cpup->irq = pci_irq_vector(phba->pcidev, idx);
+ cpup->flag |= LPFC_CPU_FIRST_IRQ;
+ }
+ break;
+ }
i = 0;
/* Loop through all CPUs associated with vector idx */
#define LPFC_HBA_HDWQ_MAX 128
#define LPFC_HBA_HDWQ_DEF 0
+/* FCP MQ queue count limiting */
+#define LPFC_FCP_MQ_THRESHOLD_MIN 0
+#define LPFC_FCP_MQ_THRESHOLD_MAX 256
+#define LPFC_FCP_MQ_THRESHOLD_DEF 8
+
/* Common buffer size to accomidate SCSI and NVME IO buffers */
#define LPFC_COMMON_IO_BUF_SZ 768
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vha->gnl.l = NULL;
+
vfree(vha->scan.l);
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
return 0;
probe_failed:
+ if (base_vha->gnl.l) {
+ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ base_vha->gnl.l, base_vha->gnl.ldma);
+ base_vha->gnl.l = NULL;
+ }
+
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
base_vha->flags.online = 0;
if (!atomic_read(&pdev->enable_cnt)) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
-
+ base_vha->gnl.l = NULL;
scsi_host_put(base_vha->host);
kfree(ha);
pci_set_drvdata(pdev, NULL);
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ base_vha->gnl.l = NULL;
+
vfree(base_vha->scan.l);
if (IS_QLAFX00(ha))
"Alloc failed for scan database.\n");
dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
vha->gnl.l, vha->gnl.ldma);
+ vha->gnl.l = NULL;
scsi_remove_host(vha->host);
return NULL;
}
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
+ if (!vreg)
+ return 0;
+
return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_IXP4XX || COMPILE_TEST
+
menu "IXP4xx SoC drivers"
config IXP4XX_QMGR
and is automatically selected by Ethernet and HSS drivers.
endmenu
+
+endif
struct geni_wrapper *wrapper = se->wrapper;
u32 val;
+ if (!wrapper)
+ return -EINVAL;
+
*iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
if (dma_mapping_error(wrapper->dev, *iova))
return -EIO;
struct geni_wrapper *wrapper = se->wrapper;
u32 val;
+ if (!wrapper)
+ return -EINVAL;
+
*iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(wrapper->dev, *iova))
return -EIO;
}
#ifdef CONFIG_SUSPEND
-struct wkup_m3_wakeup_src rtc_wake_src(void)
+static struct wkup_m3_wakeup_src rtc_wake_src(void)
{
u32 i;
return rtc_ext_wakeup;
}
-int am33xx_rtc_only_idle(unsigned long wfi_flags)
+static int am33xx_rtc_only_idle(unsigned long wfi_flags)
{
omap_rtc_power_off_program(&omap_rtc->dev);
am33xx_do_wfi_sram(wfi_flags);
if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
"omap_rtc_scratch0");
- if (nvmem)
+ if (!IS_ERR(nvmem))
nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
(void *)&rtc_magic_val);
rtc_only_idle = 1;
struct nvmem_device *nvmem;
nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
+ if (IS_ERR(nvmem))
+ return;
+
m3_ipc->ops->finish_low_power(m3_ipc);
if (rtc_only_idle) {
- if (retrigger_irq)
+ if (retrigger_irq) {
/*
* 32 bits of Interrupt Set-Pending correspond to 32
* 32 interrupts. Compute the bit offset of the
writel_relaxed(1 << (retrigger_irq & 31),
gic_dist_base + GIC_INT_SET_PENDING_BASE
+ retrigger_irq / 32 * 4);
- nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
- (void *)&val);
+ }
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&val);
}
rtc_only_idle = 0;
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
"omap_rtc_scratch0");
- if (nvmem) {
+ if (!IS_ERR(nvmem)) {
nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
4, (void *)&rtc_magic_val);
if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
#
menuconfig SOUNDWIRE
- bool "SoundWire support"
+ tristate "SoundWire support"
help
SoundWire is a 2-Pin interface with data and clock line ratified
by the MIPI Alliance. SoundWire is used for transporting data
comment "SoundWire Devices"
-config SOUNDWIRE_BUS
- tristate
- select REGMAP_SOUNDWIRE
-
config SOUNDWIRE_CADENCE
tristate
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- select SOUNDWIRE_BUS
depends on X86 && ACPI && SND_SOC
help
SoundWire Intel Master driver.
#Bus Objs
soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
#Cadence Objs
soundwire-cadence-objs := cadence_master.o
#define CDNS_MCP_INTSET 0x4C
-#define CDNS_SDW_SLAVE_STAT 0x50
-#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0)
+#define CDNS_MCP_SLAVE_STAT 0x50
+#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
#define CDNS_MCP_SLAVE_INTSTAT0 0x54
#define CDNS_MCP_SLAVE_INTSTAT1 0x58
#define CDNS_MCP_SLAVE_INTMASK0 0x5C
#define CDNS_MCP_SLAVE_INTMASK1 0x60
-#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0)
-#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0)
+#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
#define CDNS_MCP_PORT_INTSTAT 0x64
#define CDNS_MCP_PDI_STAT 0x6C
static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
unsigned int flags)
{
- int divider, base, prescale;
+ unsigned int divider, base, prescale;
- /* This function needs improvment */
+ /* This function needs improvement */
/* Don't know if divider==0 works. */
for (prescale = 0; prescale < 16; prescale++) {
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
if (divider < 65536) {
}
prescale = 15;
- base = timer_base * (1 << prescale);
+ base = timer_base * (prescale + 1);
divider = 65535;
*nanosec = divider * base;
return (prescale << 16) | (divider);
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
- uint32_t read_len = se_cmd->data_length;
+ uint32_t read_len;
/*
* cmd has been completed already from timeout, just reclaim
* data area space and free cmd
*/
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ WARN_ON_ONCE(se_cmd);
goto out;
+ }
list_del_init(&cmd->queue_entry);
goto done;
}
+ read_len = se_cmd->data_length;
if (se_cmd->data_direction == DMA_FROM_DEVICE &&
(entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
read_len_valid = true;
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd = NULL;
} else {
list_del_init(&cmd->queue_entry);
idr_remove(&udev->commands, id);
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
imx_disable_unprepare_clks(dev);
disable_hsic_regulator:
if (data->hsic_pad_regulator)
- ret = regulator_disable(data->hsic_pad_regulator);
+ /* don't overwrite original ret (cf. EPROBE_DEFER) */
+ regulator_disable(data->hsic_pad_regulator);
if (pdata.flags & CI_HDRC_PMQOS)
pm_qos_remove_request(&data->pm_qos_req);
+ data->ci_pdev = NULL;
return ret;
}
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
- ci_hdrc_remove_device(data->ci_pdev);
+ if (data->ci_pdev)
+ ci_hdrc_remove_device(data->ci_pdev);
if (data->override_phy_control)
usb_phy_shutdown(data->phy);
- imx_disable_unprepare_clks(&pdev->dev);
- if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
- if (data->hsic_pad_regulator)
- regulator_disable(data->hsic_pad_regulator);
+ if (data->ci_pdev) {
+ imx_disable_unprepare_clks(&pdev->dev);
+ if (data->plat_data->flags & CI_HDRC_PMQOS)
+ pm_qos_remove_request(&data->pm_qos_req);
+ if (data->hsic_pad_regulator)
+ regulator_disable(data->hsic_pad_regulator);
+ }
return 0;
}
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
- spin_lock_irqsave(&ci->lock, flags);
- ci->gadget.speed = USB_SPEED_UNKNOWN;
- ci->remote_wakeup = 0;
- ci->suspended = 0;
- spin_unlock_irqrestore(&ci->lock, flags);
-
/* flush all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_fifo_flush(ep);
ci->status = NULL;
}
+ spin_lock_irqsave(&ci->lock, flags);
+ ci->gadget.speed = USB_SPEED_UNKNOWN;
+ ci->remote_wakeup = 0;
+ ci->suspended = 0;
+ spin_unlock_irqrestore(&ci->lock, flags);
+
return 0;
}
return -EBUSY;
spin_lock_irqsave(hwep->lock, flags);
+ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return 0;
+ }
/* only internal SW should disable ctrl endpts */
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
+ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return 0;
+ }
retval = _ep_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
-
- hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+ if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
+ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
}
spin_lock_irqsave(hwep->lock, flags);
+ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return;
+ }
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
+ if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(&ci->lock, flags);
+ return 0;
+ }
if (!ci->remote_wakeup) {
ret = -EOPNOTSUPP;
goto out;
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
- minor = acm_alloc_minor(acm);
- if (minor < 0)
- goto alloc_fail1;
-
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
+
+ usb_get_intf(acm->control); /* undone in destruct() */
+
+ minor = acm_alloc_minor(acm);
+ if (minor < 0)
+ goto alloc_fail1;
+
acm->minor = minor;
acm->dev = usb_dev;
if (h.usb_cdc_acm_descriptor)
usb_driver_claim_interface(&acm_driver, data_interface, acm);
usb_set_intfdata(data_interface, acm);
- usb_get_intf(control_interface);
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
{
struct wdm_device *desc = file->private_data;
- wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+ wait_event(desc->wait,
+ /*
+ * needs both flags. We cannot do with one
+ * because resetting it would cause a race
+ * with write() yet we need to signal
+ * a disconnect
+ */
+ !test_bit(WDM_IN_USE, &desc->flags) ||
+ test_bit(WDM_DISCONNECTING, &desc->flags));
/* cannot dereference desc->intf if WDM_DISCONNECTING */
- if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ return -ENODEV;
+ if (desc->werr < 0)
dev_err(&desc->intf->dev, "Error in flush path: %d\n",
desc->werr);
spin_lock_irqsave(&desc->iuspin, flags);
set_bit(WDM_DISCONNECTING, &desc->flags);
set_bit(WDM_READ, &desc->flags);
- /* to terminate pending flushes */
- clear_bit(WDM_IN_USE, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
goto err_put;
}
+ retcode = -EINVAL;
data->bulk_in = bulk_in->bEndpointAddress;
data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
+ if (!data->wMaxPacketSize)
+ goto err_put;
dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
data->bulk_out = bulk_out->bEndpointAddress;
char name[16];
int i, size;
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!is_device_dma_capable(hcd->self.sysdev) &&
- !hcd->localmem_pool))
+ if (hcd->localmem_pool || !hcd_uses_dma(hcd))
return 0;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
/* some USB hosts just use PIO */
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- !is_device_dma_capable(bus->sysdev)) {
+ if (!hcd_uses_dma(hcd)) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
}
return;
}
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- !is_device_dma_capable(bus->sysdev)) {
+ if (!hcd_uses_dma(hcd)) {
kfree(addr);
return;
}
intf->minor = minor;
break;
}
- up_write(&minor_rwsem);
- if (intf->minor < 0)
+ if (intf->minor < 0) {
+ up_write(&minor_rwsem);
return -EXFULL;
+ }
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
- down_write(&minor_rwsem);
usb_minors[minor] = NULL;
intf->minor = -1;
- up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
+ up_write(&minor_rwsem);
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
up_write(&minor_rwsem);
- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
intf->usb_dev = NULL;
intf->minor = -1;
destroy_usb_class();
/* EHCI, OHCI */
hcd->rsrc_start = pci_resource_start(dev, 0);
hcd->rsrc_len = pci_resource_len(dev, 0);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
+ if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
+ hcd->rsrc_len, driver->description)) {
dev_dbg(&dev->dev, "controller already in use\n");
retval = -EBUSY;
goto put_hcd;
}
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+ hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
retval = -EFAULT;
- goto release_mem_region;
+ goto put_hcd;
}
} else {
hcd->rsrc_start = pci_resource_start(dev, region);
hcd->rsrc_len = pci_resource_len(dev, region);
- if (request_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description))
+ if (devm_request_region(&dev->dev, hcd->rsrc_start,
+ hcd->rsrc_len, driver->description))
break;
}
if (region == PCI_ROM_RESOURCE) {
}
if (retval != 0)
- goto unmap_registers;
+ goto put_hcd;
device_wakeup_enable(hcd->self.controller);
if (pci_dev_run_wake(dev))
pm_runtime_put_noidle(&dev->dev);
return retval;
-unmap_registers:
- if (driver->flags & HCD_MEMORY) {
- iounmap(hcd->regs);
-release_mem_region:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- } else
- release_region(hcd->rsrc_start, hcd->rsrc_len);
put_hcd:
usb_put_hcd(hcd);
disable_pci:
dev_set_drvdata(&dev->dev, NULL);
up_read(&companions_rwsem);
}
-
- if (hcd->driver->flags & HCD_MEMORY) {
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- } else {
- release_region(hcd->rsrc_start, hcd->rsrc_len);
- }
-
usb_put_hcd(hcd);
pci_disable_device(dev);
}
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_pio_for_control)
return ret;
- if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (is_vmalloc_addr(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is not dma capable\n");
return -EAGAIN;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (urb->num_sgs) {
int n;
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
buf = urb->transfer_buffer;
- if (hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (!buf && (urb->transfer_dma & 3)) {
dev_err(hsotg->dev,
"%s: unaligned transfer with no transfer_buffer",
* disconnect callbacks?
*/
spin_lock_irqsave(&cdev->lock, flags);
+ cdev->suspended = 0;
if (cdev->config)
reset_config(cdev);
if (cdev->driver->disconnect)
struct fsg_common {
struct usb_gadget *gadget;
struct usb_composite_dev *cdev;
- struct fsg_dev *fsg, *new_fsg;
+ struct fsg_dev *fsg;
wait_queue_head_t io_wait;
wait_queue_head_t fsg_wait;
unsigned int bulk_out_maxpacket;
enum fsg_state state; /* For exception handling */
unsigned int exception_req_tag;
+ void *exception_arg;
enum data_direction data_dir;
u32 data_size;
/* These routines may be called in process context or in_irq */
-static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
+ void *arg)
{
unsigned long flags;
if (common->state <= new_state) {
common->exception_req_tag = common->ep0_req_tag;
common->state = new_state;
+ common->exception_arg = arg;
if (common->thread_task)
send_sig_info(SIGUSR1, SEND_SIG_PRIV,
common->thread_task);
spin_unlock_irqrestore(&common->lock, flags);
}
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+ __raise_exception(common, new_state, NULL);
+}
/*-------------------------------------------------------------------------*/
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
- fsg->common->new_fsg = fsg;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
return USB_GADGET_DELAYED_STATUS;
}
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
}
enum fsg_state old_state;
struct fsg_lun *curlun;
unsigned int exception_req_tag;
+ struct fsg_dev *new_fsg;
/*
* Clear the existing signals. Anything but SIGUSR1 is converted
common->next_buffhd_to_fill = &common->buffhds[0];
common->next_buffhd_to_drain = &common->buffhds[0];
exception_req_tag = common->exception_req_tag;
+ new_fsg = common->exception_arg;
old_state = common->state;
common->state = FSG_STATE_NORMAL;
break;
case FSG_STATE_CONFIG_CHANGE:
- do_set_interface(common, common->new_fsg);
- if (common->new_fsg)
+ do_set_interface(common, new_fsg);
+ if (new_fsg)
usb_composite_setup_continue(common->cdev);
break;
DBG(fsg, "unbind\n");
if (fsg->common->fsg == fsg) {
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
/* FIXME: make interruptible or killable somehow? */
wait_event(common->fsg_wait, common->fsg != fsg);
}
default:
break;
}
-
+ break;
case USB_REQ_SET_ADDRESS:
if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/sys_soc.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
if (usb3->forced_b_device)
return -EBUSY;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode_is_host = true;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode_is_host = false;
else
return -EINVAL;
/* see what we found out */
temp = check_reset_complete(fotg210, wIndex, status_reg,
fotg210_readl(fotg210, status_reg));
+
+ /* restart schedule */
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
}
if (!(temp & (PORT_RESUME|PORT_RESET))) {
* other cases where the next software may expect clean state from the
* "firmware". this is bus-neutral, unlike shutdown() methods.
*/
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci;
ohci->rh_state = OHCI_RH_HALTED;
}
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ohci->lock, flags);
+ _ohci_shutdown(hcd);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
died:
usb_hc_died(ohci_to_hcd(ohci));
ohci_dump(ohci);
- ohci_shutdown(ohci_to_hcd(ohci));
+ _ohci_shutdown(ohci_to_hcd(ohci));
goto done;
} else {
/* No write back because the done queue was empty */
return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
- of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+ of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
}
static int xhci_rcar_is_gen3(struct device *dev)
tegra_xusb_config(tegra, regs);
+ /*
+ * The XUSB Falcon microcontroller can only address 40 bits, so set
+ * the DMA mask accordingly.
+ */
+ err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ goto put_rpm;
+ }
+
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+ /* Motorola devices */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
static int auto_delink_en = 1;
module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
#ifdef CONFIG_REALTEK_AUTOPM
static int ss_en = 1;
goto INIT_FAIL;
}
- if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
- CHECK_FW_VER(chip, 0x5901))
- SET_AUTO_DELINK(chip);
- if (STATUS_LEN(chip) == 16) {
- if (SUPPORT_AUTO_DELINK(chip))
+ if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+ CHECK_PID(chip, 0x0159)) {
+ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+ CHECK_FW_VER(chip, 0x5901))
SET_AUTO_DELINK(chip);
+ if (STATUS_LEN(chip) == 16) {
+ if (SUPPORT_AUTO_DELINK(chip))
+ SET_AUTO_DELINK(chip);
+ }
}
#ifdef CONFIG_REALTEK_AUTOPM
if (ss_en)
US_FL_IGNORE_RESIDUE ),
/* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
else if ((pdo_min_voltage(pdo[i]) ==
pdo_min_voltage(pdo[i - 1])) &&
(pdo_max_voltage(pdo[i]) ==
- pdo_min_voltage(pdo[i - 1])))
+ pdo_max_voltage(pdo[i - 1])))
return PDO_ERR_DUPE_PDO;
break;
/*
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_TEST_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
enum {
VHOST_TEST_VQ = 0,
VHOST_TEST_VQ_MAX = 1,
}
vhost_add_used_and_signal(&n->dev, vq, head, 0);
total_len += len;
- if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
+ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
break;
- }
}
mutex_unlock(&vq->mutex);
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
f->private_data = n;
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
__poll_t mask;
- int ret = 0;
if (poll->wqh)
return 0;
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
vhost_poll_stop(poll);
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(vhost_poll_start);
__vhost_vq_meta_reset(d->vqs[i]);
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_map_unprefetch(struct vhost_map *map)
-{
- kfree(map->pages);
- map->pages = NULL;
- map->npages = 0;
- map->addr = NULL;
-}
-
-static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
-{
- struct vhost_map *map[VHOST_NUM_ADDRS];
- int i;
-
- spin_lock(&vq->mmu_lock);
- for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- map[i] = rcu_dereference_protected(vq->maps[i],
- lockdep_is_held(&vq->mmu_lock));
- if (map[i])
- rcu_assign_pointer(vq->maps[i], NULL);
- }
- spin_unlock(&vq->mmu_lock);
-
- synchronize_rcu();
-
- for (i = 0; i < VHOST_NUM_ADDRS; i++)
- if (map[i])
- vhost_map_unprefetch(map[i]);
-
-}
-
-static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
-{
- int i;
-
- vhost_uninit_vq_maps(vq);
- for (i = 0; i < VHOST_NUM_ADDRS; i++)
- vq->uaddrs[i].size = 0;
-}
-
-static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
- unsigned long start,
- unsigned long end)
-{
- if (unlikely(!uaddr->size))
- return false;
-
- return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
-}
-
-static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
- int index,
- unsigned long start,
- unsigned long end)
-{
- struct vhost_uaddr *uaddr = &vq->uaddrs[index];
- struct vhost_map *map;
- int i;
-
- if (!vhost_map_range_overlap(uaddr, start, end))
- return;
-
- spin_lock(&vq->mmu_lock);
- ++vq->invalidate_count;
-
- map = rcu_dereference_protected(vq->maps[index],
- lockdep_is_held(&vq->mmu_lock));
- if (map) {
- if (uaddr->write) {
- for (i = 0; i < map->npages; i++)
- set_page_dirty(map->pages[i]);
- }
- rcu_assign_pointer(vq->maps[index], NULL);
- }
- spin_unlock(&vq->mmu_lock);
-
- if (map) {
- synchronize_rcu();
- vhost_map_unprefetch(map);
- }
-}
-
-static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
- int index,
- unsigned long start,
- unsigned long end)
-{
- if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
- return;
-
- spin_lock(&vq->mmu_lock);
- --vq->invalidate_count;
- spin_unlock(&vq->mmu_lock);
-}
-
-static int vhost_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct vhost_dev *dev = container_of(mn, struct vhost_dev,
- mmu_notifier);
- int i, j;
-
- if (!mmu_notifier_range_blockable(range))
- return -EAGAIN;
-
- for (i = 0; i < dev->nvqs; i++) {
- struct vhost_virtqueue *vq = dev->vqs[i];
-
- for (j = 0; j < VHOST_NUM_ADDRS; j++)
- vhost_invalidate_vq_start(vq, j,
- range->start,
- range->end);
- }
-
- return 0;
-}
-
-static void vhost_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct vhost_dev *dev = container_of(mn, struct vhost_dev,
- mmu_notifier);
- int i, j;
-
- for (i = 0; i < dev->nvqs; i++) {
- struct vhost_virtqueue *vq = dev->vqs[i];
-
- for (j = 0; j < VHOST_NUM_ADDRS; j++)
- vhost_invalidate_vq_end(vq, j,
- range->start,
- range->end);
- }
-}
-
-static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
- .invalidate_range_start = vhost_invalidate_range_start,
- .invalidate_range_end = vhost_invalidate_range_end,
-};
-
-static void vhost_init_maps(struct vhost_dev *dev)
-{
- struct vhost_virtqueue *vq;
- int i, j;
-
- dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
-
- for (i = 0; i < dev->nvqs; ++i) {
- vq = dev->vqs[i];
- for (j = 0; j < VHOST_NUM_ADDRS; j++)
- RCU_INIT_POINTER(vq->maps[j], NULL);
- }
-}
-#endif
-
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
- vq->invalidate_count = 0;
__vhost_vq_meta_reset(vq);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_reset_vq_maps(vq);
-#endif
}
static int vhost_worker(void *data)
INIT_LIST_HEAD(&dev->read_list);
INIT_LIST_HEAD(&dev->pending_list);
spin_lock_init(&dev->iotlb_lock);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_init_maps(dev);
-#endif
+
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->heads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
- spin_lock_init(&vq->mmu_lock);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
if (err)
goto err_cgroup;
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
- if (err)
- goto err_mmu_notifier;
-#endif
-
return 0;
-
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-err_mmu_notifier:
- vhost_dev_free_iovecs(dev);
-#endif
err_cgroup:
kthread_stop(worker);
dev->worker = NULL;
spin_unlock(&dev->iotlb_lock);
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
- int index, unsigned long uaddr,
- size_t size, bool write)
-{
- struct vhost_uaddr *addr = &vq->uaddrs[index];
-
- addr->uaddr = uaddr;
- addr->size = size;
- addr->write = write;
-}
-
-static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
-{
- vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
- (unsigned long)vq->desc,
- vhost_get_desc_size(vq, vq->num),
- false);
- vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
- (unsigned long)vq->avail,
- vhost_get_avail_size(vq, vq->num),
- false);
- vhost_setup_uaddr(vq, VHOST_ADDR_USED,
- (unsigned long)vq->used,
- vhost_get_used_size(vq, vq->num),
- true);
-}
-
-static int vhost_map_prefetch(struct vhost_virtqueue *vq,
- int index)
-{
- struct vhost_map *map;
- struct vhost_uaddr *uaddr = &vq->uaddrs[index];
- struct page **pages;
- int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
- int npinned;
- void *vaddr, *v;
- int err;
- int i;
-
- spin_lock(&vq->mmu_lock);
-
- err = -EFAULT;
- if (vq->invalidate_count)
- goto err;
-
- err = -ENOMEM;
- map = kmalloc(sizeof(*map), GFP_ATOMIC);
- if (!map)
- goto err;
-
- pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
- if (!pages)
- goto err_pages;
-
- err = EFAULT;
- npinned = __get_user_pages_fast(uaddr->uaddr, npages,
- uaddr->write, pages);
- if (npinned > 0)
- release_pages(pages, npinned);
- if (npinned != npages)
- goto err_gup;
-
- for (i = 0; i < npinned; i++)
- if (PageHighMem(pages[i]))
- goto err_gup;
-
- vaddr = v = page_address(pages[0]);
-
- /* For simplicity, fallback to userspace address if VA is not
- * contigious.
- */
- for (i = 1; i < npinned; i++) {
- v += PAGE_SIZE;
- if (v != page_address(pages[i]))
- goto err_gup;
- }
-
- map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
- map->npages = npages;
- map->pages = pages;
-
- rcu_assign_pointer(vq->maps[index], map);
- /* No need for a synchronize_rcu(). This function should be
- * called by dev->worker so we are serialized with all
- * readers.
- */
- spin_unlock(&vq->mmu_lock);
-
- return 0;
-
-err_gup:
- kfree(pages);
-err_pages:
- kfree(map);
-err:
- spin_unlock(&vq->mmu_lock);
- return err;
-}
-#endif
-
void vhost_dev_cleanup(struct vhost_dev *dev)
{
int i;
kthread_stop(dev->worker);
dev->worker = NULL;
}
- if (dev->mm) {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
-#endif
+ if (dev->mm)
mmput(dev->mm);
- }
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- for (i = 0; i < dev->nvqs; i++)
- vhost_uninit_vq_maps(dev->vqs[i]);
-#endif
dev->mm = NULL;
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- *((__virtio16 *)&used->ring[vq->num]) =
- cpu_to_vhost16(vq, vq->avail_idx);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
vhost_avail_event(vq));
}
struct vring_used_elem *head, int idx,
int count)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
- size_t size;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- size = count * sizeof(*head);
- memcpy(used->ring + idx, head, size);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_copy_to_user(vq, vq->used->ring + idx, head,
count * sizeof(*head));
}
static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- used->flags = cpu_to_vhost16(vq, vq->used_flags);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
&vq->used->flags);
}
static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
&vq->used->idx);
}
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *idx = avail->idx;
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *idx, &vq->avail->idx);
}
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
__virtio16 *head, int idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *head = avail->ring[idx & (vq->num - 1)];
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *head,
&vq->avail->ring[idx & (vq->num - 1)]);
}
static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
__virtio16 *flags)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *flags = avail->flags;
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *flags, &vq->avail->flags);
}
static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
__virtio16 *event)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *event = (__virtio16)avail->ring[vq->num];
- rcu_read_unlock();
- return 0;
- }
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *event, vhost_used_event(vq));
}
static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- *idx = used->idx;
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_used(vq, *idx, &vq->used->idx);
}
static inline int vhost_get_desc(struct vhost_virtqueue *vq,
struct vring_desc *desc, int idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_desc *d;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
- if (likely(map)) {
- d = map->addr;
- *desc = *(d + idx);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}
return true;
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
-{
- struct vhost_map __rcu *map;
- int i;
-
- for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- rcu_read_lock();
- map = rcu_dereference(vq->maps[i]);
- rcu_read_unlock();
- if (unlikely(!map))
- vhost_map_prefetch(vq, i);
- }
-}
-#endif
-
int vq_meta_prefetch(struct vhost_virtqueue *vq)
{
unsigned int num = vq->num;
- if (!vq->iotlb) {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_vq_map_prefetch(vq);
-#endif
+ if (!vq->iotlb)
return 1;
- }
return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
mutex_lock(&vq->mutex);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- /* Unregister MMU notifer to allow invalidation callback
- * can access vq->uaddrs[] without holding a lock.
- */
- if (d->mm)
- mmu_notifier_unregister(&d->mmu_notifier, d->mm);
-
- vhost_uninit_vq_maps(vq);
-#endif
-
switch (ioctl) {
case VHOST_SET_VRING_NUM:
r = vhost_vring_set_num(d, vq, argp);
BUG();
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_setup_vq_uaddr(vq);
-
- if (d->mm)
- mmu_notifier_register(&d->mmu_notifier, d->mm);
-#endif
-
mutex_unlock(&vq->mutex);
return r;
/* If this is an input descriptor, increment that count. */
if (access == VHOST_ACCESS_WO) {
*in_num += ret;
- if (unlikely(log)) {
+ if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
- if (unlikely(log)) {
+ if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
-#include <linux/pagemap.h>
-#include <linux/mmu_notifier.h>
-#include <asm/cacheflush.h>
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
VHOST_NUM_ADDRS = 3,
};
-struct vhost_map {
- int npages;
- void *addr;
- struct page **pages;
-};
-
-struct vhost_uaddr {
- unsigned long uaddr;
- size_t size;
- bool write;
-};
-
-#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
-#else
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
-#endif
-
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
struct vring_desc __user *desc;
struct vring_avail __user *avail;
struct vring_used __user *used;
-
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- /* Read by memory accessors, modified by meta data
- * prefetching, MMU notifier and vring ioctl().
- * Synchonrized through mmu_lock (writers) and RCU (writers
- * and readers).
- */
- struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
- /* Read by MMU notifier, modified by vring ioctl(),
- * synchronized through MMU notifier
- * registering/unregistering.
- */
- struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
-#endif
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
-
struct file *kick;
struct eventfd_ctx *call_ctx;
struct eventfd_ctx *error_ctx;
bool user_be;
#endif
u32 busyloop_timeout;
- spinlock_t mmu_lock;
- int invalidate_count;
};
struct vhost_msg_node {
struct vhost_dev {
struct mm_struct *mm;
-#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier mmu_notifier;
-#endif
struct mutex mutex;
struct vhost_virtqueue **vqs;
int nvqs;
case 'M':
case 'm':
size *= 1024;
+ /* Fall through */
case 'K':
case 'k':
size *= 1024;
unmap_release:
err_idx = i;
- i = head;
+
+ if (indirect)
+ i = 0;
+ else
+ i = head;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
vring_unmap_one_split(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
+ i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
if (indirect)
soft_margin = new_margin;
reload = soft_margin * (mem_fclk_21285 / 256);
watchdog_ping();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
ret = put_user(soft_margin, int_arg);
break;
cell = rcu_dereference_raw(net->ws_cell);
if (cell) {
afs_get_cell(cell);
+ ret = 0;
break;
}
ret = -EDESTADDRREQ;
done_seqretry(&net->cells_lock, seq);
+ if (ret != 0 && cell)
+ afs_put_cell(net, cell);
+
return ret == 0 ? cell : ERR_PTR(ret);
}
struct afs_call *call = container_of(work, struct afs_call, work);
struct afs_uuid *r = call->request;
- struct {
- __be32 match;
- } reply;
-
_enter("");
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
- reply.match = htonl(0);
+ afs_send_empty_reply(call);
else
- reply.match = htonl(1);
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ 1, 1, "K-1");
- afs_send_simple_reply(call, &reply, sizeof(reply));
afs_put_call(call);
_leave("");
}
* iterate through the data blob that lists the contents of an AFS directory
*/
static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key)
+ struct key *key, afs_dataversion_t *_dir_version)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_xdr_dir_page *dbuf;
req = afs_read_dir(dvnode, key);
if (IS_ERR(req))
return PTR_ERR(req);
+ *_dir_version = req->data_version;
/* round the file position up to the next entry boundary */
ctx->pos += sizeof(union afs_xdr_dirent) - 1;
*/
static int afs_readdir(struct file *file, struct dir_context *ctx)
{
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file));
+ afs_dataversion_t dir_version;
+
+ return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
+ &dir_version);
}
/*
* - just returns the FID the dentry name maps to if found
*/
static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key)
+ struct afs_fid *fid, struct key *key,
+ afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key);
+ ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
struct afs_server *server;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
+ afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
int ret, i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
cookie->fids[i].vid = as->volume->vid;
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key);
+ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
if (ret < 0) {
inode = ERR_PTR(ret);
goto out;
}
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
+
inode = ERR_PTR(-ENOENT);
if (!cookie->found)
goto out;
inode ? AFS_FS_I(inode) : NULL);
} else {
trace_afs_lookup(dvnode, &dentry->d_name,
- inode ? AFS_FS_I(inode) : NULL);
+ IS_ERR_OR_NULL(inode) ? NULL
+ : AFS_FS_I(inode));
}
return d;
}
struct dentry *parent;
struct inode *inode;
struct key *key;
- long dir_version, de_version;
+ afs_dataversion_t dir_version;
+ long de_version;
int ret;
if (flags & LOOKUP_RCU)
* on a 32-bit system, we only have 32 bits in the dentry to store the
* version.
*/
- dir_version = (long)dir->status.data_version;
+ dir_version = dir->status.data_version;
de_version = (long)dentry->d_fsdata;
- if (de_version == dir_version)
- goto out_valid;
+ if (de_version == (long)dir_version)
+ goto out_valid_noupdate;
- dir_version = (long)dir->invalid_before;
- if (de_version - dir_version >= 0)
+ dir_version = dir->invalid_before;
+ if (de_version - (long)dir_version >= 0)
goto out_valid;
_debug("dir modified");
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
+ ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
}
out_valid:
- dentry->d_fsdata = (void *)dir_version;
+ dentry->d_fsdata = (void *)(unsigned long)dir_version;
+out_valid_noupdate:
dput(parent);
key_put(key);
_leave(" = 1 [valid]");
iget_data->cb_s_break = fc->cbi->server->cb_s_break;
}
+/*
+ * Note that a dentry got changed. We need to set d_fsdata to the data version
+ * number derived from the result of the operation. It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static void afs_update_dentry_version(struct afs_fs_cursor *fc,
+ struct dentry *dentry,
+ struct afs_status_cb *scb)
+{
+ if (fc->ac.error == 0)
+ dentry->d_fsdata =
+ (void *)(unsigned long)scb->status.data_version;
+}
+
/*
* create a directory on an AFS filesystem
*/
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, scb);
+ afs_update_dentry_version(&fc, dentry, scb);
ret = afs_end_vnode_operation(&fc);
if (ret == 0) {
afs_dir_remove_subdir(dentry);
&data_version, &scb[0]);
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
&data_version_2, &scb[1]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
ret = afs_end_vnode_operation(&fc);
if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
ret = afs_dir_remove_link(dvnode, dentry, key);
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
NULL, &scb[1]);
ihold(&vnode->vfs_inode);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
d_instantiate(dentry, &vnode->vfs_inode);
mutex_unlock(&vnode->io_lock);
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
}
}
+ /* This bit is potentially nasty as there's a potential race with
+ * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
+ * to reflect it's new parent's new data_version after the op, but
+ * d_revalidate may see old_dentry between the op having taken place
+ * and the version being updated.
+ *
+ * So drop the old_dentry for now to make other threads go through
+ * lookup instead - which we hold a lock against.
+ */
+ d_drop(old_dentry);
+
ret = -ERESTARTSYS;
if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
afs_dataversion_t orig_data_version;
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- goto error_rehash;
+ goto error_rehash_old;
}
- new_data_version = new_dvnode->status.data_version;
+ new_data_version = new_dvnode->status.data_version + 1;
} else {
new_data_version = orig_data_version;
new_scb = &scb[0];
}
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
- goto error_rehash;
+ goto error_rehash_old;
}
if (ret == 0) {
drop_nlink(new_inode);
spin_unlock(&new_inode->i_lock);
}
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parent's data_version.
+ *
+ * Note that if we ever implement RENAME_EXCHANGE, we'll have
+ * to update both dentries with opposing dir versions.
+ */
+ if (new_dvnode != orig_dvnode) {
+ afs_update_dentry_version(&fc, old_dentry, &scb[1]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[1]);
+ } else {
+ afs_update_dentry_version(&fc, old_dentry, &scb[0]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[0]);
+ }
d_move(old_dentry, new_dentry);
goto error_tmp;
}
+error_rehash_old:
+ d_rehash(new_dentry);
error_rehash:
if (rehash)
d_rehash(rehash);
int i;
if (refcount_dec_and_test(&req->usage)) {
- for (i = 0; i < req->nr_pages; i++)
- if (req->pages[i])
- put_page(req->pages[i]);
- if (req->pages != req->array)
- kfree(req->pages);
+ if (req->pages) {
+ for (i = 0; i < req->nr_pages; i++)
+ if (req->pages[i])
+ put_page(req->pages[i]);
+ if (req->pages != req->array)
+ kfree(req->pages);
+ }
kfree(req);
}
}
struct afs_uuid__xdr *xdr;
struct afs_uuid *uuid;
int j;
+ int n = entry->nr_servers;
tmp = ntohl(uvldb->serverFlags[i]);
if (tmp & AFS_VLSF_DONTUSE ||
(new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
continue;
if (tmp & AFS_VLSF_RWVOL) {
- entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+ entry->fs_mask[n] |= AFS_VOL_VTM_RW;
if (vlflags & AFS_VLF_BACKEXISTS)
- entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+ entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
}
if (tmp & AFS_VLSF_ROVOL)
- entry->fs_mask[i] |= AFS_VOL_VTM_RO;
- if (!entry->fs_mask[i])
+ entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+ if (!entry->fs_mask[n])
continue;
xdr = &uvldb->serverNumber[i];
- uuid = (struct afs_uuid *)&entry->fs_server[i];
+ uuid = (struct afs_uuid *)&entry->fs_server[n];
uuid->time_low = xdr->time_low;
uuid->time_mid = htons(ntohl(xdr->time_mid));
uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32) + size,
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
- bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- gfp_t gfp;
- int ret;
+ int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- if (nowait)
- gfp = GFP_NOWAIT;
- else
- gfp = GFP_KERNEL;
-
- bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
- if (!bio)
- return -EAGAIN;
+ bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
if (!is_poll)
blk_start_plug(&plug);
- ret = 0;
for (;;) {
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
task_io_account_write(bio->bi_iter.bi_size);
}
- /*
- * Tell underlying layer to not block for resource shortage.
- * And if we would have blocked, return error inline instead
- * of through the bio->bi_end_io() callback.
- */
- if (nowait)
- bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
-
+ dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
polled = true;
}
- dio->size += bio->bi_iter.bi_size;
qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- dio->size -= bio->bi_iter.bi_size;
- ret = -EAGAIN;
- goto error;
- }
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
atomic_inc(&dio->ref);
}
- dio->size += bio->bi_iter.bi_size;
- qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- dio->size -= bio->bi_iter.bi_size;
- ret = -EAGAIN;
- goto error;
- }
-
- bio = bio_alloc(gfp, nr_pages);
- if (!bio) {
- ret = -EAGAIN;
- goto error;
- }
+ submit_bio(bio);
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
}
if (!is_poll)
}
__set_current_state(TASK_RUNNING);
-out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
if (likely(!ret))
bio_put(&dio->bio);
return ret;
-error:
- if (!is_poll)
- blk_finish_plug(&plug);
- goto out;
}
static ssize_t
struct raid_kobject {
u64 flags;
struct kobject kobj;
- struct list_head list;
};
/*
u32 thread_pool_size;
struct kobject *space_info_kobj;
- struct list_head pending_raid_kobjs;
- spinlock_t pending_raid_kobjs_lock; /* uncontended */
u64 total_pinned;
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 bytes_used, u64 type, u64 chunk_offset,
u64 size);
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_roots);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
- INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
- spin_lock_init(&fs_info->pending_raid_kobjs_lock);
spin_lock_init(&fs_info->delalloc_root_lock);
spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
return 0;
}
-/* link_block_group will queue up kobjects to add when we're reclaim-safe */
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_space_info *space_info;
- struct raid_kobject *rkobj;
- LIST_HEAD(list);
- int ret = 0;
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_splice_init(&fs_info->pending_raid_kobjs, &list);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
-
- list_for_each_entry(rkobj, &list, list) {
- space_info = btrfs_find_space_info(fs_info, rkobj->flags);
-
- ret = kobject_add(&rkobj->kobj, &space_info->kobj,
- "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
- if (ret) {
- kobject_put(&rkobj->kobj);
- break;
- }
- }
- if (ret)
- btrfs_warn(fs_info,
- "failed to add kobject for block cache, ignoring");
-}
-
static void link_block_group(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *space_info = cache->space_info;
up_write(&space_info->groups_sem);
if (first) {
- struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
+ struct raid_kobject *rkobj;
+ unsigned int nofs_flag;
+ int ret;
+
+ /*
+ * Setup a NOFS context because kobject_add(), deep in its call
+ * chain, does GFP_KERNEL allocations, and we are often called
+ * in a context where if reclaim is triggered we can deadlock
+ * (we are either holding a transaction handle or some lock
+ * required for a transaction commit).
+ */
+ nofs_flag = memalloc_nofs_save();
+ rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
if (!rkobj) {
+ memalloc_nofs_restore(nofs_flag);
btrfs_warn(cache->fs_info,
"couldn't alloc memory for raid level kobject");
return;
}
rkobj->flags = cache->flags;
kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
+ ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
+ btrfs_bg_type_to_raid_name(rkobj->flags));
+ memalloc_nofs_restore(nofs_flag);
+ if (ret) {
+ kobject_put(&rkobj->kobj);
+ btrfs_warn(fs_info,
+ "failed to add kobject for block cache, ignoring");
+ return;
+ }
space_info->block_group_kobjs[index] = &rkobj->kobj;
}
}
inc_block_group_ro(cache, 1);
}
- btrfs_add_raid_kobjects(info);
btrfs_init_global_block_rsv(info);
ret = check_chunk_block_group_mappings(info);
error:
struct btrfs_device *device;
struct list_head *devices;
u64 group_trimmed;
+ u64 range_end = U64_MAX;
u64 start;
u64 end;
u64 trimmed = 0;
int dev_ret = 0;
int ret = 0;
+ /*
+ * Check range overflow if range->len is set.
+ * The default range->len is U64_MAX.
+ */
+ if (range->len != U64_MAX &&
+ check_add_overflow(range->start, range->len, &range_end))
+ return -EINVAL;
+
cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = next_block_group(cache)) {
- if (cache->key.objectid >= (range->start + range->len)) {
+ if (cache->key.objectid >= range_end) {
btrfs_put_block_group(cache);
break;
}
start = max(range->start, cache->key.objectid);
- end = min(range->start + range->len,
- cache->key.objectid + cache->key.offset);
+ end = min(range_end, cache->key.objectid + cache->key.offset);
if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) {
TASK_UNINTERRUPTIBLE);
}
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
/*
* Lock eb pages and flush the bio if we can't the locks
*
if (!trylock_page(p)) {
if (!flush) {
- ret = flush_write_bio(epd);
- if (ret < 0) {
+ int err;
+
+ err = flush_write_bio(epd);
+ if (err < 0) {
+ ret = err;
failed_page_nr = i;
goto err_unlock;
}
/* Unlock already locked pages */
for (i = 0; i < failed_page_nr; i++)
unlock_page(eb->pages[i]);
+ /*
+ * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
+ * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
+ * be made and undo everything done before.
+ */
+ btrfs_tree_lock(eb);
+ spin_lock(&eb->refs_lock);
+ set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+ end_extent_buffer_writeback(eb);
+ spin_unlock(&eb->refs_lock);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
+ fs_info->dirty_metadata_batch);
+ btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+ btrfs_tree_unlock(eb);
return ret;
}
-static void end_extent_buffer_writeback(struct extent_buffer *eb)
-{
- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
-}
-
static void set_btree_ioerr(struct page *page)
{
struct extent_buffer *eb = (struct extent_buffer *)page->private;
BTRFS_I(inode),
LOG_OTHER_INODE_ALL,
0, LLONG_MAX, ctx);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
}
continue;
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
if (ret) {
- iput(inode);
+ btrfs_add_delayed_iput(inode);
continue;
}
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- iput(inode);
+ btrfs_add_delayed_iput(inode);
continue;
}
}
path->slots[0]++;
}
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
return ret;
}
if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
break;
}
if (!ret &&
btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
ret = 1;
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
if (ret)
goto next_dir_inode;
if (ctx->log_new_dentries) {
if (!ret && ctx && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, root,
BTRFS_I(dir_inode), ctx);
- iput(dir_inode);
+ btrfs_add_delayed_iput(dir_inode);
if (ret)
goto out;
}
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_INODE_EXISTS,
0, LLONG_MAX, ctx);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
if (ret)
return ret;
if (ret)
return ret;
- /*
- * We add the kobjects here (and after forcing data chunk creation)
- * since relocation is the only place we'll create chunks of a new
- * type at runtime. The only place where we'll remove the last
- * chunk of a type is the call immediately below this one. Even
- * so, we're protected against races with the cleaner thread since
- * we're covered by the delete_unused_bgs_mutex.
- */
- btrfs_add_raid_kobjects(fs_info);
-
trans = btrfs_start_trans_remove_block_group(root->fs_info,
chunk_offset);
if (IS_ERR(trans)) {
btrfs_end_transaction(trans);
if (ret < 0)
return ret;
-
- btrfs_add_raid_kobjects(fs_info);
-
return 1;
}
}
if (page_offset(page) >= ceph_wbc.i_size) {
dout("%p page eof %llu\n",
page, ceph_wbc.i_size);
- if (ceph_wbc.size_stable ||
- page_offset(page) >= i_size_read(inode))
+ if ((ceph_wbc.size_stable ||
+ page_offset(page) >= i_size_read(inode)) &&
+ clear_page_dirty_for_io(page))
mapping->a_ops->invalidatepage(page,
0, PAGE_SIZE);
unlock_page(page);
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
+ struct ceph_buffer *old_blob = NULL;
struct cap_msg_args arg;
int held, revoking;
int wake = 0;
ci->i_requested_max_size = arg.max_size;
if (flushing & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
+ old_blob = __ceph_build_xattrs_blob(ci);
arg.xattr_version = ci->i_xattrs.version;
arg.xattr_buf = ci->i_xattrs.blob;
} else {
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
+
ret = send_cap_msg(&arg);
if (ret < 0) {
dout("error sending cap msg, must requeue %p\n", inode);
int issued, new_issued, info_caps;
struct timespec64 mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_buffer *old_blob = NULL;
struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
int err = 0;
if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = xattr_blob;
if (xattr_blob)
memcpy(ci->i_xattrs.blob->vec.iov_base,
out:
if (new_cap)
ceph_put_cap(mdsc, new_cap);
- if (xattr_blob)
- ceph_buffer_put(xattr_blob);
+ ceph_buffer_put(old_blob);
+ ceph_buffer_put(xattr_blob);
ceph_put_string(pool_ns);
return err;
}
req->r_wait_for_completion = ceph_lock_wait_for_completion;
err = ceph_mdsc_do_request(mdsc, inode, req);
-
- if (operation == CEPH_MDS_OP_GETFILELOCK) {
+ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
fl->fl_type = F_RDLCK;
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
struct ceph_snap_context *old_snapc, *new_snapc;
+ struct ceph_buffer *old_blob = NULL;
int used, dirty;
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
capsnap->gid = inode->i_gid;
if (dirty & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
+ old_blob = __ceph_build_xattrs_blob(ci);
capsnap->xattr_blob =
ceph_buffer_get(ci->i_xattrs.blob);
capsnap->xattr_version = ci->i_xattrs.version;
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
kfree(capsnap);
ceph_put_snap_context(old_snapc);
}
int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
extern const struct xattr_handler *ceph_xattr_handlers[];
/*
* If there are dirty xattrs, reencode xattrs into the prealloc_blob
- * and swap into place.
+ * and swap into place. It returns the old i_xattrs.blob (or NULL) so
+ * that it can be freed by the caller as the i_ceph_lock is likely to be
+ * held.
*/
-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
{
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
+ struct ceph_buffer *old_blob = NULL;
void *dest;
dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = NULL;
ci->i_xattrs.dirty = false;
ci->i_xattrs.version++;
}
+
+ return old_blob;
}
static inline int __get_request_mask(struct inode *in) {
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
+ struct ceph_buffer *old_blob = NULL;
int issued;
int err;
int dirty = 0;
struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
- dout(" preaallocating new blob size=%d\n", required_blob_size);
+ ceph_buffer_put(old_blob); /* Shouldn't be required */
+ dout(" pre-allocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
spin_lock(&ci->i_ceph_lock);
+ /* prealloc_blob can't be released while holding i_ceph_lock */
if (ci->i_xattrs.prealloc_blob)
- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ old_blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = blob;
goto retry;
}
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
if (lock_snap_rwsem)
up_read(&mdsc->snap_rwsem);
if (dirty)
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.21"
+#define CIFS_VERSION "2.22"
#endif /* _CIFSFS_H */
unsigned int *len, unsigned int *offset);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
+int copy_path_name(char *dst, const char *src);
#ifdef CONFIG_CIFS_DFS_UPCALL
static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB add path length overrun check */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->fileName, name);
}
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->DirName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->DirName, name);
}
pSMB->BufferFormat = 0x04;
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->DirName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->DirName, name);
}
pSMB->BufferFormat = 0x04;
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, name);
}
params = 6 + name_len;
fileName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
+ } else {
count = 0; /* no pad */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, fileName, name_len);
+ name_len = copy_path_name(pSMB->fileName, fileName);
}
if (*pOplock & REQ_OPLOCK)
pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
/* BB improve check for buffer overruns BB */
/* no pad */
count = 0;
- name_len = strnlen(path, PATH_MAX);
- /* trailing null */
- name_len++;
+ name_len = copy_path_name(req->fileName, path);
req->NameLength = cpu_to_le16(name_len);
- strncpy(req->fileName, path, name_len);
}
if (*oplock & REQ_OPLOCK)
remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(from_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, from_name, name_len);
- name_len2 = strnlen(to_name, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, from_name);
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
- name_len2++; /* trailing null */
name_len2++; /* signature byte */
}
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fromName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, fromName, name_len);
- name_len2 = strnlen(toName, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, fromName);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
- name_len2++; /* trailing null */
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
name_len2++; /* signature byte */
}
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fromName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fromName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fromName);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
PATH_MAX, nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len_target = strnlen(toName, PATH_MAX);
- name_len_target++; /* trailing null */
- strncpy(data_offset, toName, name_len_target);
+ } else {
+ name_len_target = copy_path_name(data_offset, toName);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(toName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, toName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, toName);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
PATH_MAX, nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len_target = strnlen(fromName, PATH_MAX);
- name_len_target++; /* trailing null */
- strncpy(data_offset, fromName, name_len_target);
+ } else {
+ name_len_target = copy_path_name(data_offset, fromName);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(from_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, from_name, name_len);
- name_len2 = strnlen(to_name, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, from_name);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
- name_len2++; /* trailing null */
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
name_len2++; /* signature byte */
}
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
name_len *= 2;
pSMB->FileName[name_len] = 0;
pSMB->FileName[name_len+1] = 0;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
name_len++; /* trailing null */
name_len *= 2;
} else {
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
pSMB->BufferFormat = 0x04;
name_len++; /* account for buffer type byte */
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->FileName[name_len+1] = 0;
name_len += 2;
}
- } else { /* BB add check for overrun of SMB buf BB */
- name_len = strnlen(searchName, PATH_MAX);
-/* BB fix here and in unicode clause above ie
- if (name_len > buffersize-header)
- free buffer exit; BB */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
if (msearch) {
- pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb);
- pSMB->FileName[name_len+1] = '*';
- pSMB->FileName[name_len+2] = 0;
- name_len += 3;
+ if (WARN_ON_ONCE(name_len > PATH_MAX-2))
+ name_len = PATH_MAX-2;
+ /* overwrite nul byte */
+ pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
+ pSMB->FileName[name_len] = '*';
+ pSMB->FileName[name_len+1] = 0;
+ name_len += 2;
}
}
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->RequestFileName, search_name, name_len);
+ name_len = copy_path_name(pSMB->RequestFileName, search_name);
}
if (ses->server->sign)
PATH_MAX, cifs_sb->local_nls, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(file_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, file_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, file_name);
}
params = 6 + name_len;
data_count = sizeof(struct file_end_of_file_info);
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->fileName, fileName);
}
pSMB->attr = cpu_to_le16(dos_attrs);
pSMB->BufferFormat = 0x04;
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(file_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, file_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, file_name);
}
params = 6 + name_len;
PATH_MAX, nls_codepage, remap);
list_len++; /* trailing null */
list_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- list_len = strnlen(searchName, PATH_MAX);
- list_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, list_len);
+ } else {
+ list_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
set_freezable();
- allow_signal(SIGKILL);
+ allow_kernel_signal(SIGKILL);
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
+ int is_domain = 0;
const char *delim, *payload;
char *desc;
ssize_t len;
rc = PTR_ERR(key);
goto out_err;
}
+ is_domain = 1;
}
down_read(&key->sem);
goto out_key_put;
}
+ /*
+ * If we have a domain key then we must set the domainName in the
+ * for the request.
+ */
+ if (is_domain && ses->domainName) {
+ vol->domainname = kstrndup(ses->domainName,
+ strlen(ses->domainName),
+ GFP_KERNEL);
+ if (!vol->domainname) {
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+ "domain\n", len);
+ rc = -ENOMEM;
+ kfree(vol->username);
+ vol->username = NULL;
+ kzfree(vol->password);
+ vol->password = NULL;
+ goto out_key_put;
+ }
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
strlen(vol->prepath) + 1 : 0;
unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
+ if (unc_len > MAX_TREE_SIZE)
+ return ERR_PTR(-EINVAL);
+
full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
- strncpy(full_path, vol->UNC, unc_len);
+ memcpy(full_path, vol->UNC, unc_len);
pos = full_path + unc_len;
if (pplen) {
*pos = CIFS_DIR_SEP(cifs_sb);
- strncpy(pos + 1, vol->prepath, pplen);
+ memcpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
return full_path;
if (dfsplen)
- strncpy(full_path, tcon->treeName, dfsplen);
+ memcpy(full_path, tcon->treeName, dfsplen);
full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
- strncpy(full_path + dfsplen + 1, vol->prepath, pplen);
+ memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
- full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
*h = unc;
*len = end - unc;
}
+
+/**
+ * copy_path_name - copy src path to dst, possibly truncating
+ *
+ * returns number of bytes written (including trailing nul)
+ */
+int copy_path_name(char *dst, const char *src)
+{
+ int name_len;
+
+ /*
+ * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
+ * will truncate and strlen(dst) will be PATH_MAX-1
+ */
+ name_len = strscpy(dst, src, PATH_MAX);
+ if (WARN_ON_ONCE(name_len < 0))
+ name_len = PATH_MAX-1;
+
+ /* we count the trailing nul */
+ name_len++;
+ return name_len;
+}
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
+ int len;
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
if (ses->user_name != NULL) {
- strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
- bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_USERNAME_LEN - 1;
+ bcc_ptr += len;
}
/* else null user mount */
*bcc_ptr = 0;
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
kfree(ses->serverOS);
- ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
+ ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
if (ses->serverOS) {
- strncpy(ses->serverOS, bcc_ptr, len);
+ memcpy(ses->serverOS, bcc_ptr, len);
+ ses->serverOS[len] = 0;
if (strncmp(ses->serverOS, "OS/2", 4) == 0)
cifs_dbg(FYI, "OS/2 server\n");
}
kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
- if (ses->serverNOS)
- strncpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
+ if (ses->serverNOS) {
+ memcpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS[len] = 0;
+ }
bcc_ptr += len + 1;
bleft -= len + 1;
#include <linux/list.h>
#include <linux/spinlock.h>
+struct configfs_fragment {
+ atomic_t frag_count;
+ struct rw_semaphore frag_sem;
+ bool frag_dead;
+};
+
+void put_fragment(struct configfs_fragment *);
+struct configfs_fragment *get_fragment(struct configfs_fragment *);
+
struct configfs_dirent {
atomic_t s_count;
int s_dependent_count;
#ifdef CONFIG_LOCKDEP
int s_depth;
#endif
+ struct configfs_fragment *s_frag;
};
#define CONFIGFS_ROOT 0x0001
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_create_bin_file(struct config_item *,
const struct configfs_bin_attribute *);
-extern int configfs_make_dirent(struct configfs_dirent *,
- struct dentry *, void *, umode_t, int);
+extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
+ void *, umode_t, int, struct configfs_fragment *);
extern int configfs_dirent_is_ready(struct configfs_dirent *);
extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
{
if (!(sd->s_type & CONFIGFS_ROOT)) {
kfree(sd->s_iattr);
+ put_fragment(sd->s_frag);
kmem_cache_free(configfs_dir_cachep, sd);
}
}
#endif /* CONFIG_LOCKDEP */
+static struct configfs_fragment *new_fragment(void)
+{
+ struct configfs_fragment *p;
+
+ p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
+ if (p) {
+ atomic_set(&p->frag_count, 1);
+ init_rwsem(&p->frag_sem);
+ p->frag_dead = false;
+ }
+ return p;
+}
+
+void put_fragment(struct configfs_fragment *frag)
+{
+ if (frag && atomic_dec_and_test(&frag->frag_count))
+ kfree(frag);
+}
+
+struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
+{
+ if (likely(frag))
+ atomic_inc(&frag->frag_count);
+ return frag;
+}
+
/*
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
*/
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
- void *element, int type)
+ void *element, int type,
+ struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
kmem_cache_free(configfs_dir_cachep, sd);
return ERR_PTR(-ENOENT);
}
+ sd->s_frag = get_fragment(frag);
list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
int configfs_make_dirent(struct configfs_dirent * parent_sd,
struct dentry * dentry, void * element,
- umode_t mode, int type)
+ umode_t mode, int type, struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
- sd = configfs_new_dirent(parent_sd, element, type);
+ sd = configfs_new_dirent(parent_sd, element, type, frag);
if (IS_ERR(sd))
return PTR_ERR(sd);
* until it is validated by configfs_dir_set_ready()
*/
-static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
return error;
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
- CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+ CONFIGFS_DIR | CONFIGFS_USET_CREATING,
+ frag);
if (unlikely(error))
return error;
{
int err = 0;
umode_t mode = S_IFLNK | S_IRWXUGO;
+ struct configfs_dirent *p = parent->d_fsdata;
- err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
- CONFIGFS_ITEM_LINK);
+ err = configfs_make_dirent(p, dentry, sl, mode,
+ CONFIGFS_ITEM_LINK, p->s_frag);
if (!err) {
err = configfs_create(dentry, mode, init_symlink);
if (err) {
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry);
+ struct dentry *dentry,
+ struct configfs_fragment *frag);
static void configfs_detach_group(struct config_item *item);
static void detach_groups(struct config_group *group)
* try using vfs_mkdir. Just a thought.
*/
static int create_default_group(struct config_group *parent_group,
- struct config_group *group)
+ struct config_group *group,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
d_add(child, NULL);
ret = configfs_attach_group(&parent_group->cg_item,
- &group->cg_item, child);
+ &group->cg_item, child, frag);
if (!ret) {
sd = child->d_fsdata;
sd->s_type |= CONFIGFS_USET_DEFAULT;
return ret;
}
-static int populate_groups(struct config_group *group)
+static int populate_groups(struct config_group *group,
+ struct configfs_fragment *frag)
{
struct config_group *new_group;
int ret = 0;
list_for_each_entry(new_group, &group->default_groups, group_entry) {
- ret = create_default_group(group, new_group);
+ ret = create_default_group(group, new_group, frag);
if (ret) {
detach_groups(group);
break;
*/
static int configfs_attach_item(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
- ret = configfs_create_dir(item, dentry);
+ ret = configfs_create_dir(item, dentry, frag);
if (!ret) {
ret = populate_attrs(item);
if (ret) {
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
if (!ret) {
sd = dentry->d_fsdata;
sd->s_type |= CONFIGFS_USET_DIR;
*/
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
- ret = populate_groups(to_config_group(item));
+ ret = populate_groups(to_config_group(item), frag);
if (ret) {
configfs_detach_item(item);
d_inode(dentry)->i_flags |= S_DEAD;
struct configfs_dirent *sd;
const struct config_item_type *type;
struct module *subsys_owner = NULL, *new_item_owner = NULL;
+ struct configfs_fragment *frag;
char *name;
sd = dentry->d_parent->d_fsdata;
goto out;
}
+ frag = new_fragment();
+ if (!frag) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
spin_unlock(&configfs_dirent_lock);
if (group)
- ret = configfs_attach_group(parent_item, item, dentry);
+ ret = configfs_attach_group(parent_item, item, dentry, frag);
else
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
spin_lock(&configfs_dirent_lock);
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
* reference.
*/
config_item_put(parent_item);
+ put_fragment(frag);
out:
return ret;
struct config_item *item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
}
} while (ret == -EAGAIN);
+ frag = sd->s_frag;
+ if (down_write_killable(&frag->frag_sem)) {
+ spin_lock(&configfs_dirent_lock);
+ configfs_detach_rollback(dentry);
+ spin_unlock(&configfs_dirent_lock);
+ return -EINTR;
+ }
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
/* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
*/
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
- file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
+ file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
if (IS_ERR(file->private_data))
err = PTR_ERR(file->private_data);
else
{
struct configfs_subsystem *subsys = parent_group->cg_subsys;
struct dentry *parent;
+ struct configfs_fragment *frag;
int ret;
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
+
mutex_lock(&subsys->su_mutex);
link_group(parent_group, group);
mutex_unlock(&subsys->su_mutex);
parent = parent_group->cg_item.ci_dentry;
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
- ret = create_default_group(parent_group, group);
+ ret = create_default_group(parent_group, group, frag);
if (ret)
goto err_out;
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
inode_unlock(d_inode(parent));
+ put_fragment(frag);
return 0;
err_out:
inode_unlock(d_inode(parent));
mutex_lock(&subsys->su_mutex);
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
+ put_fragment(frag);
return ret;
}
EXPORT_SYMBOL(configfs_register_group);
struct configfs_subsystem *subsys = group->cg_subsys;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
- mutex_lock(&subsys->su_mutex);
- if (!group->cg_item.ci_parent->ci_group) {
- /*
- * The parent has already been unlinked and detached
- * due to a rmdir.
- */
- goto unlink_group;
- }
- mutex_unlock(&subsys->su_mutex);
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
dput(dentry);
mutex_lock(&subsys->su_mutex);
-unlink_group:
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
struct dentry *dentry;
struct dentry *root;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
+
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
root = configfs_pin_fs();
- if (IS_ERR(root))
+ if (IS_ERR(root)) {
+ put_fragment(frag);
return PTR_ERR(root);
+ }
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
d_add(dentry, NULL);
err = configfs_attach_group(sd->s_element, &group->cg_item,
- dentry);
+ dentry, frag);
if (err) {
BUG_ON(d_inode(dentry));
d_drop(dentry);
unlink_group(group);
configfs_release_fs();
}
+ put_fragment(frag);
return err;
}
struct config_group *group = &subsys->su_group;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *root = dentry->d_sb->s_root;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
if (dentry->d_parent != root) {
pr_err("Tried to unregister non-subsystem!\n");
return;
}
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
inode_lock_nested(d_inode(root),
I_MUTEX_PARENT);
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
bool write_in_progress;
char *bin_buffer;
int bin_buffer_size;
+ int cb_max_size;
+ struct config_item *item;
+ struct module *owner;
+ union {
+ struct configfs_attribute *attr;
+ struct configfs_bin_attribute *bin_attr;
+ };
};
+static inline struct configfs_fragment *to_frag(struct file *file)
+{
+ struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
-/**
- * fill_read_buffer - allocate and fill buffer from item.
- * @dentry: dentry pointer.
- * @buffer: data buffer for file.
- *
- * Allocate @buffer->page, if it hasn't been already, then call the
- * config_item's show() method to fill the buffer with this attribute's
- * data.
- * This is called only once, on the file's first read.
- */
-static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
+ return sd->s_frag;
+}
+
+static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
- int ret = 0;
- ssize_t count;
+ struct configfs_fragment *frag = to_frag(file);
+ ssize_t count = -ENOENT;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
- count = attr->show(item, buffer->page);
-
- BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
- if (count >= 0) {
- buffer->needs_read_fill = 0;
- buffer->count = count;
- } else
- ret = count;
- return ret;
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ count = buffer->attr->show(buffer->item, buffer->page);
+ up_read(&frag->frag_sem);
+
+ if (count < 0)
+ return count;
+ if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
+ return -EIO;
+ buffer->needs_read_fill = 0;
+ buffer->count = count;
+ return 0;
}
/**
static ssize_t
configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
- if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
+ retval = fill_read_buffer(file, buffer);
+ if (retval)
goto out;
}
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
configfs_read_bin_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct configfs_fragment *frag = to_frag(file);
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
ssize_t retval = 0;
ssize_t len = min_t(size_t, count, PAGE_SIZE);
if (buffer->needs_read_fill) {
/* perform first read with buf == NULL to get extent */
- len = bin_attr->read(item, NULL, 0);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item, NULL, 0);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len <= 0) {
retval = len;
goto out;
}
/* do not exceed the maximum value */
- if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size && len > buffer->cb_max_size) {
retval = -EFBIG;
goto out;
}
buffer->bin_buffer_size = len;
/* perform second read to fill buffer */
- len = bin_attr->read(item, buffer->bin_buffer, len);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item,
+ buffer->bin_buffer, len);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len < 0) {
retval = len;
vfree(buffer->bin_buffer);
return error ? -EFAULT : count;
}
-
-/**
- * flush_write_buffer - push buffer to config_item.
- * @dentry: dentry to the attribute
- * @buffer: data buffer for file.
- * @count: number of bytes
- *
- * Get the correct pointers for the config_item and the attribute we're
- * dealing with, then call the store() method for the attribute,
- * passing the buffer that we acquired in fill_write_buffer().
- */
-
static int
-flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
+flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
-
- return attr->store(item, buffer->page, count);
+ struct configfs_fragment *frag = to_frag(file);
+ int res = -ENOENT;
+
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ res = buffer->attr->store(buffer->item, buffer->page, count);
+ up_read(&frag->frag_sem);
+ return res;
}
static ssize_t
configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
- len = flush_write_buffer(file->f_path.dentry, buffer, len);
+ len = flush_write_buffer(file, buffer, len);
if (len > 0)
*ppos += len;
mutex_unlock(&buffer->mutex);
size_t count, loff_t *ppos)
{
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
void *tbuf = NULL;
ssize_t len;
/* buffer grows? */
if (*ppos + count > buffer->bin_buffer_size) {
- if (bin_attr->cb_max_size &&
- *ppos + count > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size &&
+ *ppos + count > buffer->cb_max_size) {
len = -EFBIG;
goto out;
}
return len;
}
-static int check_perm(struct inode * inode, struct file * file, int type)
+static int __configfs_open_file(struct inode *inode, struct file *file, int type)
{
- struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(file->f_path.dentry);
- struct configfs_bin_attribute *bin_attr = NULL;
- struct configfs_buffer * buffer;
- struct configfs_item_operations * ops = NULL;
- int error = 0;
+ struct dentry *dentry = file->f_path.dentry;
+ struct configfs_fragment *frag = to_frag(file);
+ struct configfs_attribute *attr;
+ struct configfs_buffer *buffer;
+ int error;
- if (!item || !attr)
- goto Einval;
+ error = -ENOMEM;
+ buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
+ if (!buffer)
+ goto out;
- if (type & CONFIGFS_ITEM_BIN_ATTR)
- bin_attr = to_bin_attr(file->f_path.dentry);
+ error = -ENOENT;
+ down_read(&frag->frag_sem);
+ if (unlikely(frag->frag_dead))
+ goto out_free_buffer;
- /* Grab the module reference for this attribute if we have one */
- if (!try_module_get(attr->ca_owner)) {
- error = -ENODEV;
- goto Done;
+ error = -EINVAL;
+ buffer->item = to_item(dentry->d_parent);
+ if (!buffer->item)
+ goto out_free_buffer;
+
+ attr = to_attr(dentry);
+ if (!attr)
+ goto out_put_item;
+
+ if (type & CONFIGFS_ITEM_BIN_ATTR) {
+ buffer->bin_attr = to_bin_attr(dentry);
+ buffer->cb_max_size = buffer->bin_attr->cb_max_size;
+ } else {
+ buffer->attr = attr;
}
- if (item->ci_type)
- ops = item->ci_type->ct_item_ops;
- else
- goto Eaccess;
+ buffer->owner = attr->ca_owner;
+ /* Grab the module reference for this attribute if we have one */
+ error = -ENODEV;
+ if (!try_module_get(buffer->owner))
+ goto out_put_item;
+
+ error = -EACCES;
+ if (!buffer->item->ci_type)
+ goto out_put_module;
+
+ buffer->ops = buffer->item->ci_type->ct_item_ops;
/* File needs write support.
* The inode's perms must say it's ok,
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
+ goto out_put_module;
}
/* File needs read support.
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
+ goto out_put_module;
}
- /* No error? Great, allocate a buffer for the file, and store it
- * it in file->private_data for easy access.
- */
- buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
- if (!buffer) {
- error = -ENOMEM;
- goto Enomem;
- }
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->read_in_progress = false;
buffer->write_in_progress = false;
- buffer->ops = ops;
file->private_data = buffer;
- goto Done;
+ up_read(&frag->frag_sem);
+ return 0;
- Einval:
- error = -EINVAL;
- goto Done;
- Eaccess:
- error = -EACCES;
- Enomem:
- module_put(attr->ca_owner);
- Done:
- if (error && item)
- config_item_put(item);
+out_put_module:
+ module_put(buffer->owner);
+out_put_item:
+ config_item_put(buffer->item);
+out_free_buffer:
+ up_read(&frag->frag_sem);
+ kfree(buffer);
+out:
return error;
}
static int configfs_release(struct inode *inode, struct file *filp)
{
- struct config_item * item = to_item(filp->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
- struct module * owner = attr->ca_owner;
- struct configfs_buffer * buffer = filp->private_data;
-
- if (item)
- config_item_put(item);
- /* After this point, attr should not be accessed. */
- module_put(owner);
-
- if (buffer) {
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- mutex_destroy(&buffer->mutex);
- kfree(buffer);
- }
+ struct configfs_buffer *buffer = filp->private_data;
+
+ module_put(buffer->owner);
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ mutex_destroy(&buffer->mutex);
+ kfree(buffer);
return 0;
}
static int configfs_open_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
}
static int configfs_open_bin_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
}
-static int configfs_release_bin_file(struct inode *inode, struct file *filp)
+static int configfs_release_bin_file(struct inode *inode, struct file *file)
{
- struct configfs_buffer *buffer = filp->private_data;
- struct dentry *dentry = filp->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
- ssize_t len = 0;
- int ret;
+ struct configfs_buffer *buffer = file->private_data;
buffer->read_in_progress = false;
if (buffer->write_in_progress) {
+ struct configfs_fragment *frag = to_frag(file);
buffer->write_in_progress = false;
- len = bin_attr->write(item, buffer->bin_buffer,
- buffer->bin_buffer_size);
-
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead) {
+ /* result of ->release() is ignored */
+ buffer->bin_attr->write(buffer->item,
+ buffer->bin_buffer,
+ buffer->bin_buffer_size);
+ }
+ up_read(&frag->frag_sem);
/* vfree on NULL is safe */
vfree(buffer->bin_buffer);
buffer->bin_buffer = NULL;
buffer->needs_read_fill = 1;
}
- ret = configfs_release(inode, filp);
- if (len < 0)
- return len;
- return ret;
+ configfs_release(inode, file);
+ return 0;
}
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
- CONFIGFS_ITEM_ATTR);
+ CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
inode_unlock(d_inode(dir));
return error;
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
- CONFIGFS_ITEM_BIN_ATTR);
+ CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
inode_unlock(dir->d_inode);
return error;
struct buffer_head *bh;
struct super_block *sb = inode->i_sb;
ext4_fsblk_t block;
- struct blk_plug plug;
int inodes_per_block, inode_offset;
iloc->bh = NULL;
* If we need to do any I/O, try to pre-readahead extra
* blocks from the inode table.
*/
- blk_start_plug(&plug);
if (EXT4_SB(sb)->s_inode_readahead_blks) {
ext4_fsblk_t b, end, table;
unsigned num;
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
- blk_finish_plug(&plug);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
io_free_req(req);
}
+static unsigned io_cqring_events(struct io_cq_ring *ring)
+{
+ /* See comment at the top of this file */
+ smp_rmb();
+ return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
+}
+
/*
* Find and free completed poll iocbs
*/
static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
- while (!list_empty(&ctx->poll_list)) {
+ while (!list_empty(&ctx->poll_list) && !need_resched()) {
int ret;
ret = io_do_iopoll(ctx, nr_events, min);
unsigned int nr_events = 0;
io_iopoll_getevents(ctx, &nr_events, 1);
+
+ /*
+ * Ensure we allow local-to-the-cpu processing to take place,
+ * in this case we need to ensure that we reap all events.
+ */
+ cond_resched();
}
mutex_unlock(&ctx->uring_lock);
}
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
long min)
{
- int ret = 0;
+ int iters, ret = 0;
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
+
+ iters = 0;
do {
int tmin = 0;
+ /*
+ * Don't enter poll loop if we already have events pending.
+ * If we do, we can potentially be spinning for commands that
+ * already triggered a CQE (eg in error).
+ */
+ if (io_cqring_events(ctx->cq_ring))
+ break;
+
+ /*
+ * If a submit got punted to a workqueue, we can have the
+ * application entering polling for a command before it gets
+ * issued. That app will hold the uring_lock for the duration
+ * of the poll right here, so we need to take a breather every
+ * now and then to ensure that the issue has a chance to add
+ * the poll to the issued list. Otherwise we can spin here
+ * forever, while the workqueue is stuck trying to acquire the
+ * very same mutex.
+ */
+ if (!(++iters & 7)) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ }
+
if (*nr_events < min)
tmin = min - *nr_events;
ret = 0;
} while (min && !*nr_events && !need_resched());
+ mutex_unlock(&ctx->uring_lock);
return ret;
}
iter->bvec = bvec + seg_skip;
iter->nr_segs -= seg_skip;
- iter->count -= (seg_skip << PAGE_SHIFT);
+ iter->count -= bvec->bv_len + offset;
iter->iov_offset = offset & ~PAGE_MASK;
- if (iter->iov_offset)
- iter->count -= iter->iov_offset;
}
}
{
int ret;
+ ret = io_req_defer(ctx, req, s->sqe);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+ io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ }
+ return 0;
+ }
+
ret = __io_submit_sqe(ctx, req, s, true);
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
struct io_uring_sqe *sqe_copy;
return;
}
- ret = io_req_defer(ctx, req, s->sqe);
- if (ret) {
- if (ret != -EIOCBQUEUED)
- goto err_req;
- return;
- }
-
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
- /*
- * We disallow the app entering submit/complete
- * with polling, but we still need to lock the
- * ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
io_iopoll_check(ctx, &nr_events, 0);
- mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
return submit;
}
-static unsigned io_cqring_events(struct io_cq_ring *ring)
-{
- /* See comment at the top of this file */
- smp_rmb();
- return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
-}
-
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
min_complete = min(min_complete, ctx->cq_entries);
if (ctx->flags & IORING_SETUP_IOPOLL) {
- mutex_lock(&ctx->uring_lock);
ret = io_iopoll_check(ctx, &nr_events, min_complete);
- mutex_unlock(&ctx->uring_lock);
} else {
ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
}
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
nfs_file_set_open_context(file, ctx);
else
- err = -ESTALE;
+ err = -EOPENSTALE;
out:
return err;
}
unsigned long bytes = 0;
struct nfs_direct_req *dreq = hdr->dreq;
- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
- goto out_put;
-
spin_lock(&dreq->lock);
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- else
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ spin_unlock(&dreq->lock);
+ goto out_put;
+ }
+
+ if (hdr->good_bytes != 0)
nfs_direct_good_bytes(dreq, hdr);
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
+ dreq->error = 0;
+
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
bool request_commit = false;
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
- goto out_put;
-
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- if (dreq->error == 0) {
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ spin_unlock(&dreq->lock);
+ goto out_put;
+ }
+
+ if (hdr->good_bytes != 0) {
nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
*/
#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
pgm = &pgio->pg_mirrors[0];
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
- pgio->pg_maxretrans = io_maxretrans;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
return;
out_nolseg:
if (pgio->pg_error < 0)
pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
+ pgio->pg_maxretrans = 0;
nfs_pageio_reset_read_mds(pgio);
}
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
}
- pgio->pg_maxretrans = io_maxretrans;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
return;
out_mds:
pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
+ pgio->pg_maxretrans = 0;
nfs_pageio_reset_write_mds(pgio);
}
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
- case -EAGAIN:
- return -NFS4ERR_RESET_TO_PNFS;
/* Invalidate Layout errors */
case -NFS4ERR_PNFS_NO_LAYOUT:
case -ESTALE: /* mapped NFS4ERR_STALE */
case -EBADHANDLE:
case -ELOOP:
case -ENOSPC:
- case -EAGAIN:
break;
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
ff_layout_read_prepare_common(task, hdr);
}
-static void
-ff_layout_io_prepare_transmit(struct rpc_task *task,
- void *data)
-{
- struct nfs_pgio_header *hdr = data;
-
- if (!pnfs_is_valid_lseg(hdr->lseg))
- rpc_exit(task, -EAGAIN);
-}
-
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
{
struct nfs_pgio_header *hdr = data;
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
.rpc_call_prepare = ff_layout_read_prepare_v3,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
.rpc_call_prepare = ff_layout_read_prepare_v4,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
.rpc_call_prepare = ff_layout_write_prepare_v3,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
.rpc_call_prepare = ff_layout_write_prepare_v4,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
return 0;
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+ /* Only a mounted-on-fileid? Just exit */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ return 0;
/* Has the inode gone and changed behind our back? */
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+ } else if (nfsi->fileid != fattr->fileid) {
+ /* Is this perhaps the mounted-on fileid? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+ nfsi->fileid == fattr->mounted_on_fileid)
+ return 0;
return -ESTALE;
+ }
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
return -ESTALE;
+
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
-static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
- struct nfs_fattr *fattr)
-{
- bool ret1 = true, ret2 = true;
-
- if (fattr->valid & NFS_ATTR_FATTR_FILEID)
- ret1 = (nfsi->fileid == fattr->fileid);
- if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
- ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
- return ret1 || ret2;
-}
-
/*
* Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
- if (!nfs_fileid_valid(nfsi, fattr)) {
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+ /* Only a mounted-on-fileid? Just exit */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ return 0;
+ /* Has the inode gone and changed behind our back? */
+ } else if (nfsi->fileid != fattr->fileid) {
+ /* Is this perhaps the mounted-on fileid? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+ nfsi->fileid == fattr->mounted_on_fileid)
+ return 0;
printk(KERN_ERR "NFS: server %s error: fileid changed\n"
"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
}
}
+static inline bool nfs_error_is_fatal_on_server(int err)
+{
+ switch (err) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return false;
+ }
+ return nfs_error_is_fatal(err);
+}
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- goto out_put_ctx;
default:
+ goto out_put_ctx;
+ case -ENOENT:
+ case -ESTALE:
+ case -EISDIR:
+ case -ENOTDIR:
+ case -ELOOP:
goto out_drop;
}
}
}
hdr->res.fattr = &hdr->fattr;
- hdr->res.count = count;
+ hdr->res.count = 0;
hdr->res.eof = 0;
hdr->res.verf = &hdr->verf;
nfs_fattr_init(&hdr->fattr);
int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
- LIST_HEAD(failed);
+ LIST_HEAD(pages);
desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq;
- while (!list_empty(&hdr->pages)) {
- struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ list_splice_init(&hdr->pages, &pages);
+ while (!list_empty(&pages)) {
+ struct nfs_page *req = nfs_list_entry(pages.next);
if (!nfs_pageio_add_request(desc, req))
- nfs_list_move_request(req, &failed);
+ break;
}
nfs_pageio_complete(desc);
- if (!list_empty(&failed)) {
- list_move(&failed, &hdr->pages);
- return desc->pg_error < 0 ? desc->pg_error : -EIO;
+ if (!list_empty(&pages)) {
+ int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
+ hdr->completion_ops->error_cleanup(&pages, err);
+ nfs_set_pgio_error(hdr, err, hdr->io_start);
+ return err;
}
return 0;
}
/* Add this address as an alias */
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
rpc_clnt_test_and_add_xprt, NULL);
- } else
- clp = get_v3_ds_connect(mds_srv,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP,
- timeo, retrans);
+ continue;
+ }
+ clp = get_v3_ds_connect(mds_srv,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans);
+ if (IS_ERR(clp))
+ continue;
+ clp->cl_rpcclient->cl_softerr = 0;
+ clp->cl_rpcclient->cl_softrtry = 0;
}
if (IS_ERR(clp)) {
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
hdr->res.eof = 1;
}
return 0;
static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
+ hdr->res.count = hdr->args.count;
nfs_writeback_update_inode(hdr);
+ }
return 0;
}
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
-static void nfs_readpage_release(struct nfs_page *req)
+static void nfs_readpage_release(struct nfs_page *req, int error)
{
struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
+ struct page *page = req->wb_page;
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req));
+ if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
+ SetPageError(page);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
- if (PageUptodate(req->wb_page))
- nfs_readpage_to_fscache(inode, req->wb_page, 0);
+ struct address_space *mapping = page_file_mapping(page);
- unlock_page(req->wb_page);
+ if (PageUptodate(page))
+ nfs_readpage_to_fscache(inode, page, 0);
+ else if (!PageError(page) && !PagePrivate(page))
+ generic_error_remove_page(mapping, page);
+ unlock_page(page);
}
nfs_release_request(req);
}
&nfs_async_read_completion_ops);
if (!nfs_pageio_add_request(&pgio, new)) {
nfs_list_remove_request(new);
- nfs_readpage_release(new);
+ nfs_readpage_release(new, pgio.pg_error);
}
nfs_pageio_complete(&pgio);
static void nfs_read_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
+ int error;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
zero_user_segment(page, start, end);
}
}
+ error = 0;
bytes += req->wb_bytes;
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (bytes <= hdr->good_bytes)
nfs_page_group_set_uptodate(req);
+ else {
+ error = hdr->error;
+ xchg(&nfs_req_openctx(req)->error, error);
+ }
} else
nfs_page_group_set_uptodate(req);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ nfs_readpage_release(req, error);
}
out:
hdr->release(hdr);
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ nfs_readpage_release(req, error);
}
}
goto out;
}
+ xchg(&ctx->error, 0);
error = nfs_readpage_async(ctx, inode, page);
-
+ if (!error) {
+ error = wait_on_page_locked_killable(page);
+ if (!PageUptodate(page) && !error)
+ error = xchg(&ctx->error, 0);
+ }
out:
put_nfs_open_context(ctx);
return error;
zero_user_segment(page, len, PAGE_SIZE);
if (!nfs_pageio_add_request(desc->pgio, new)) {
nfs_list_remove_request(new);
- nfs_readpage_release(new);
error = desc->pgio->pg_error;
+ nfs_readpage_release(new, error);
goto out;
}
return 0;
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_inode_remove_request(struct nfs_page *req);
static void nfs_clear_request_commit(struct nfs_page *req);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode);
static void nfs_write_error(struct nfs_page *req, int error)
{
+ nfs_set_pageerror(page_file_mapping(req->wb_page));
nfs_mapping_set_error(req->wb_page, error);
+ nfs_inode_remove_request(req);
nfs_end_page_writeback(req);
nfs_release_request(req);
}
-static bool
-nfs_error_is_fatal_on_server(int err)
-{
- switch (err) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- return false;
- }
- return nfs_error_is_fatal(err);
-}
-
/*
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct page *page)
{
- struct address_space *mapping;
struct nfs_page *req;
int ret = 0;
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
/* If there is a fatal error that covers this write, just exit */
- ret = 0;
- mapping = page_file_mapping(page);
- if (test_bit(AS_ENOSPC, &mapping->flags) ||
- test_bit(AS_EIO, &mapping->flags))
+ ret = pgio->pg_error;
+ if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
+ ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
} else
ret = -EAGAIN;
nfs_redirty_request(req);
+ pgio->pg_error = 0;
} else
nfs_add_stats(page_file_mapping(page)->host,
NFSIOS_WRITEPAGES, 1);
ret = nfs_page_async_flush(pgio, page);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
- ret = 0;
+ ret = AOP_WRITEPAGE_ACTIVATE;
}
return ret;
}
nfs_pageio_init_write(&pgio, inode, 0,
false, &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
+ pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
if (err < 0)
return err;
- if (pgio.pg_error < 0)
+ if (nfs_error_is_fatal(pgio.pg_error))
return pgio.pg_error;
return 0;
}
int ret;
ret = nfs_writepage_locked(page, wbc);
- unlock_page(page);
+ if (ret != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
return ret;
}
int ret;
ret = nfs_do_writepage(page, wbc, data);
- unlock_page(page);
+ if (ret != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
return ret;
}
&nfs_async_write_completion_ops);
pgio.pg_io_completion = ioc;
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
+ pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
nfs_io_completion_put(ioc);
if (err < 0)
goto out_err;
err = pgio.pg_error;
- if (err < 0)
+ if (nfs_error_is_fatal(err))
goto out_err;
return 0;
out_err:
*/
static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{
- struct nfsd_net *nn = v;
+ struct nfsd_net *nn = m->private;
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
return inode;
}
-static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
{
struct inode *inode;
inode = nfsd_get_inode(dir->i_sb, mode);
if (!inode)
return -ENOMEM;
+ if (ncl) {
+ inode->i_private = ncl;
+ kref_get(&ncl->cl_ref);
+ }
d_add(dentry, inode);
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
dentry = d_alloc_name(parent, name);
if (!dentry)
goto out_err;
- ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600);
+ ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
if (ret)
goto out_err;
- if (ncl) {
- d_inode(dentry)->i_private = ncl;
- kref_get(&ncl->cl_ref);
- }
out:
inode_unlock(dir);
return dentry;
out_err:
+ dput(dentry);
dentry = ERR_PTR(ret);
goto out;
}
struct nfsdfs_client *ncl = inode->i_private;
inode->i_private = NULL;
- synchronize_rcu();
kref_put(&ncl->cl_ref, ncl->cl_release);
}
-
static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
{
struct nfsdfs_client *nc = inode->i_private;
{
struct nfsdfs_client *nc;
- rcu_read_lock();
+ inode_lock_shared(inode);
nc = __get_nfsdfs_client(inode);
- rcu_read_unlock();
+ inode_unlock_shared(inode);
return nc;
}
/* from __rpc_unlink */
return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
}
-/*
- * Read a page's worth of file data into the page cache. Return the page
- * locked.
- */
+/* Read a page's worth of file data into the page cache. */
static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
{
struct page *page;
put_page(page);
return ERR_PTR(-EIO);
}
- lock_page(page);
return page;
}
+/*
+ * Lock two pages, ensuring that we lock in offset order if the pages are from
+ * the same file.
+ */
+static void vfs_lock_two_pages(struct page *page1, struct page *page2)
+{
+ /* Always lock in order of increasing index. */
+ if (page1->index > page2->index)
+ swap(page1, page2);
+
+ lock_page(page1);
+ if (page1 != page2)
+ lock_page(page2);
+}
+
+/* Unlock two pages, being careful not to unlock the same page twice. */
+static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
+{
+ unlock_page(page1);
+ if (page1 != page2)
+ unlock_page(page2);
+}
+
/*
* Compare extents of two files to see if they are the same.
* Caller must have locked both inodes to prevent write races.
dest_page = vfs_dedupe_get_page(dest, destoff);
if (IS_ERR(dest_page)) {
error = PTR_ERR(dest_page);
- unlock_page(src_page);
put_page(src_page);
goto out_error;
}
+
+ vfs_lock_two_pages(src_page, dest_page);
+
+ /*
+ * Now that we've locked both pages, make sure they're still
+ * mapped to the file data we're interested in. If not,
+ * someone is invalidating pages on us and we lose.
+ */
+ if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
+ src_page->mapping != src->i_mapping ||
+ dest_page->mapping != dest->i_mapping) {
+ same = false;
+ goto unlock;
+ }
+
src_addr = kmap_atomic(src_page);
dest_addr = kmap_atomic(dest_page);
kunmap_atomic(dest_addr);
kunmap_atomic(src_addr);
- unlock_page(dest_page);
- unlock_page(src_page);
+unlock:
+ vfs_unlock_two_pages(src_page, dest_page);
put_page(dest_page);
put_page(src_page);
}
if (seq_has_overflowed(m))
goto Eoverflow;
+ p = m->op->next(m, p, &m->index);
if (pos + m->count > offset) {
m->from = offset - pos;
m->count -= m->from;
}
pos += m->count;
m->count = 0;
- p = m->op->next(m, p, &m->index);
if (pos == offset)
break;
}
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{
down_read(&c->vfs_sb->s_umount);
- writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE);
+ writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
up_read(&c->vfs_sb->s_umount);
}
static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
{
if (orph->del) {
- spin_unlock(&c->orphan_lock);
dbg_gen("deleted twice ino %lu", orph->inum);
return;
}
orph->del = 1;
orph->dnext = c->orph_dnext;
c->orph_dnext = orph;
- spin_unlock(&c->orphan_lock);
dbg_gen("delete later ino %lu", orph->inum);
return;
}
c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
if (c->max_bu_buf_len > c->leb_size)
c->max_bu_buf_len = c->leb_size;
+
+ /* Log is ready, preserve one LEB for commits. */
+ c->min_log_bytes = c->leb_size;
+
return 0;
}
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
+ bool still_valid;
WRITE_ONCE(ctx->released, true);
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
- if (!mmget_still_valid(mm))
- goto skip_mm;
+ still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
- new_flags, vma->anon_vma,
- vma->vm_file, vma->vm_pgoff,
- vma_policy(vma),
- NULL_VM_UFFD_CTX);
- if (prev)
- vma = prev;
- else
- prev = vma;
+ if (still_valid) {
+ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+ new_flags, vma->anon_vma,
+ vma->vm_file, vma->vm_pgoff,
+ vma_policy(vma),
+ NULL_VM_UFFD_CTX);
+ if (prev)
+ vma = prev;
+ else
+ prev = vma;
+ }
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
-skip_mm:
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
XFS_STATS_INC(mp, xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!ifp) {
+ /* No CoW fork? Return a hole. */
+ if (whichfork == XFS_COW_FORK) {
+ mval->br_startoff = bno;
+ mval->br_startblock = HOLESTARTBLOCK;
+ mval->br_blockcount = len;
+ mval->br_state = XFS_EXT_NORM;
+ *nmap = 1;
+ return 0;
+ }
- /* No CoW fork? Return a hole. */
- if (whichfork == XFS_COW_FORK && !ifp) {
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount = len;
- mval->br_state = XFS_EXT_NORM;
- *nmap = 1;
- return 0;
+ /*
+ * A missing attr ifork implies that the inode says we're in
+ * extents or btree format but failed to pass the inode fork
+ * verifier while trying to load it. Treat that as a file
+ * corruption too.
+ */
+#ifdef DEBUG
+ xfs_alert(mp, "%s: inode %llu missing fork %d",
+ __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
ASSERT(state->path.active == 0);
oldblk = &state->path.blk[0];
error = xfs_da3_root_split(state, oldblk, addblk);
- if (error) {
- addblk->bp = NULL;
- return error; /* GROT: dir is inconsistent */
- }
+ if (error)
+ goto out;
/*
* Update pointers to the node which used to be block 0 and just got
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
- ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
- ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
+out:
addblk->bp = NULL;
- return 0;
+ return error;
}
/*
ents = dp->d_ops->leaf_ents_p(leaf);
xfs_dir3_leaf_check(dp, bp);
- ASSERT(leafhdr.count > 0);
+ if (leafhdr.count <= 0)
+ return -EFSCORRUPTED;
/*
* Look up the hash value in the leaf entries.
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- void __user *arg = (void __user *)p;
+ void __user *arg = compat_ptr(p);
int error;
trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
- /* No size or alignment issues on any arch */
- case XFS_IOC_DIOINFO:
- case XFS_IOC_FSGEOMETRY_V4:
- case XFS_IOC_FSGEOMETRY:
- case XFS_IOC_AG_GEOMETRY:
- case XFS_IOC_FSGETXATTR:
- case XFS_IOC_FSSETXATTR:
- case XFS_IOC_FSGETXATTRA:
- case XFS_IOC_FSSETDM:
- case XFS_IOC_GETBMAP:
- case XFS_IOC_GETBMAPA:
- case XFS_IOC_GETBMAPX:
- case XFS_IOC_FSCOUNTS:
- case XFS_IOC_SET_RESBLKS:
- case XFS_IOC_GET_RESBLKS:
- case XFS_IOC_FSGROWFSLOG:
- case XFS_IOC_GOINGDOWN:
- case XFS_IOC_ERROR_INJECTION:
- case XFS_IOC_ERROR_CLEARALL:
- case FS_IOC_GETFSMAP:
- case XFS_IOC_SCRUB_METADATA:
- case XFS_IOC_BULKSTAT:
- case XFS_IOC_INUMBERS:
- return xfs_file_ioctl(filp, cmd, p);
-#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
- /*
- * These are handled fine if no alignment issues. To support x32
- * which uses native 64-bit alignment we must emit these cases in
- * addition to the ia-32 compat set below.
- */
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- case XFS_IOC_FSGEOMETRY_V1:
- case XFS_IOC_FSGROWFSDATA:
- case XFS_IOC_FSGROWFSRT:
- case XFS_IOC_ZERO_RANGE:
-#ifdef CONFIG_X86_X32
- /*
- * x32 special: this gets a different cmd number from the ia-32 compat
- * case below; the associated data will match native 64-bit alignment.
- */
- case XFS_IOC_SWAPEXT:
-#endif
- return xfs_file_ioctl(filp, cmd, p);
-#endif
#if defined(BROKEN_X86_ALIGNMENT)
case XFS_IOC_ALLOCSP_32:
case XFS_IOC_FREESP_32:
case XFS_IOC_FSSETDM_BY_HANDLE_32:
return xfs_compat_fssetdm_by_handle(filp, arg);
default:
- return -ENOIOCTLCMD;
+ /* try the native version */
+ return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
}
}
out_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
ASSERT(*ticp == NULL);
tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
- KM_SLEEP | KM_MAYFAIL);
- if (!tic)
- return -ENOMEM;
-
+ KM_SLEEP);
*ticp = tic;
xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
struct xfs_inode *ip = XFS_I(inode);
int error;
- while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+ while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
xfs_iunlock(ip, *iolock);
*did_unlock = true;
error = break_layout(inode, true);
}
/*
- * Grab the exclusive iolock for a data copy from src to dest, making
- * sure to abide vfs locking order (lowest pointer value goes first) and
- * breaking the pnfs layout leases on dest before proceeding. The loop
- * is needed because we cannot call the blocking break_layout() with the
- * src iolock held, and therefore have to back out both locks.
+ * Grab the exclusive iolock for a data copy from src to dest, making sure to
+ * abide vfs locking order (lowest pointer value goes first) and breaking the
+ * layout leases before proceeding. The loop is needed because we cannot call
+ * the blocking break_layout() with the iolocks held, and therefore have to
+ * back out both locks.
*/
static int
xfs_iolock_two_inodes_and_break_layout(
{
int error;
-retry:
- if (src < dest) {
- inode_lock_shared(src);
- inode_lock_nested(dest, I_MUTEX_NONDIR2);
- } else {
- /* src >= dest */
- inode_lock(dest);
- }
+ if (src > dest)
+ swap(src, dest);
- error = break_layout(dest, false);
- if (error == -EWOULDBLOCK) {
- inode_unlock(dest);
- if (src < dest)
- inode_unlock_shared(src);
+retry:
+ /* Wait to break both inodes' layouts before we start locking. */
+ error = break_layout(src, true);
+ if (error)
+ return error;
+ if (src != dest) {
error = break_layout(dest, true);
if (error)
return error;
- goto retry;
}
+
+ /* Lock one inode and make sure nobody got in and leased it. */
+ inode_lock(src);
+ error = break_layout(src, false);
if (error) {
+ inode_unlock(src);
+ if (error == -EWOULDBLOCK)
+ goto retry;
+ return error;
+ }
+
+ if (src == dest)
+ return 0;
+
+ /* Lock the other inode and make sure nobody got in and leased it. */
+ inode_lock_nested(dest, I_MUTEX_NONDIR2);
+ error = break_layout(dest, false);
+ if (error) {
+ inode_unlock(src);
inode_unlock(dest);
- if (src < dest)
- inode_unlock_shared(src);
+ if (error == -EWOULDBLOCK)
+ goto retry;
return error;
}
- if (src > dest)
- inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
+
return 0;
}
xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
if (!same_inode)
- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
inode_unlock(inode_out);
if (!same_inode)
- inode_unlock_shared(inode_in);
+ inode_unlock(inode_in);
}
/*
if (same_inode)
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
else
- xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
+ xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
XFS_MMAPLOCK_EXCL);
/* Check file eligibility and prepare for block sharing. */
#define p4d_alloc(mm, pgd, address) (pgd)
#define p4d_offset(pgd, start) (pgd)
-#define p4d_none(p4d) 0
-#define p4d_bad(p4d) 0
-#define p4d_present(p4d) 1
+
+#ifndef __ASSEMBLY__
+static inline int p4d_none(p4d_t p4d)
+{
+ return 0;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ return 1;
+}
+#endif
+
#define p4d_ERROR(p4d) do { } while (0)
#define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d)
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
- __REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
- return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
+ return cookie != BLK_QC_T_NONE;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
static inline void ceph_buffer_put(struct ceph_buffer *b)
{
- kref_put(&b->kref, ceph_buffer_release);
+ if (b)
+ kref_put(&b->kref, ceph_buffer_release);
}
extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
long ______r; \
static struct ftrace_likely_data \
__aligned(4) \
- __section("_ftrace_annotated_branch") \
+ __section(_ftrace_annotated_branch) \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
#define __trace_if_value(cond) ({ \
static struct ftrace_branch_data \
__aligned(4) \
- __section("_ftrace_branch") \
+ __section(_ftrace_branch) \
__if_trace = { \
.func = __func__, \
.file = __FILE__, \
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
+#define __annotate_jump_table __section(.rodata..c_jump_table)
#else
#define annotate_reachable()
* visible to the compiler.
*/
#define __ADDRESSABLE(sym) \
- static void * __section(".discard.addressable") __used \
+ static void * __section(.discard.addressable) __used \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
/**
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
gfp_t gfp)
{
- int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
- size_t align = get_order(PAGE_ALIGN(size));
-
- return alloc_pages_node(node, gfp, align);
+ return NULL;
}
static inline void dma_free_contiguous(struct device *dev, struct page *page,
dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr);
-
-#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs);
+
+#ifdef CONFIG_MMU
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
#else
-# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
-#endif
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ return prot; /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
/* Memory location data */
- unsigned location[EDAC_MAX_LAYERS];
+ unsigned int location[EDAC_MAX_LAYERS];
struct mem_ctl_info *mci; /* the parent */
u32 nr_pages; /* number of pages on this dimm */
- unsigned csrow, cschannel; /* Points to the old API data */
+ unsigned int csrow, cschannel; /* Points to the old API data */
u16 smbios_handle; /* Handle for SMBIOS type 17 */
};
unsigned long page);
int mc_idx;
struct csrow_info **csrows;
- unsigned nr_csrows, num_cschannel;
+ unsigned int nr_csrows, num_cschannel;
/*
* Memory Controller hierarchy
* of the recent drivers enumerate memories per DIMM, instead.
* When the memory controller is per rank, csbased is true.
*/
- unsigned n_layers;
+ unsigned int n_layers;
struct edac_mc_layer *layers;
bool csbased;
/*
* DIMM info. Will eventually remove the entire csrows_info some day
*/
- unsigned tot_dimms;
+ unsigned int tot_dimms;
struct dimm_info **dimms;
/*
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+ int node);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
return -EINVAL;
}
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
- WARN_ON(1);
-}
-
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
const char *label)
{
{ "ELAN0618", 0 },
{ "ELAN0619", 0 },
{ "ELAN061A", 0 },
- { "ELAN061B", 0 },
+/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN061E", 0 },
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
#define QI_EIOTLB_AM(am) (((u64)am))
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_RESP_INVALID 0x1
#define QI_RESP_FAILURE 0xf
-#define QI_GRAN_ALL_ALL 0
-#define QI_GRAN_NONG_ALL 1
#define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3
union {
struct {
#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
- u8 desc_len;
- char desc[sizeof(long) - 1]; /* First few chars of description */
+ u16 desc_len;
+ char desc[sizeof(long) - 2]; /* First few chars of description */
#else
- char desc[sizeof(long) - 1]; /* First few chars of description */
- u8 desc_len;
+ char desc[sizeof(long) - 2]; /* First few chars of description */
+ u16 desc_len;
#endif
};
unsigned long x;
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
resource_size_t hw_addr, resource_size_t size);
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
resource_size_t logic_pio_to_hwaddr(unsigned long pio);
unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
mod_node_page_state(page_pgdat(page), idx, val);
}
+static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+ int val)
+{
+ struct page *page = virt_to_head_page(p);
+
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
static inline
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
__mod_lruvec_page_state(page, idx, -1);
}
+static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
+{
+ __mod_lruvec_slab_state(p, idx, 1);
+}
+
+static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
+{
+ __mod_lruvec_slab_state(p, idx, -1);
+}
+
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_state(struct mem_cgroup *memcg,
int idx)
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
};
enum {
- MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20,
+ MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
};
enum {
- MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20,
+ MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
};
enum {
};
struct mlx5_ifc_tls_progress_params_bits {
- u8 valid[0x1];
- u8 reserved_at_1[0x7];
- u8 pd[0x18];
+ u8 reserved_at_0[0x8];
+ u8 tisn[0x18];
u8 next_record_tcp_sn[0x20];
/** @pgmap: Points to the hosting device page map. */
struct dev_pagemap *pgmap;
void *zone_device_data;
- unsigned long _zd_pad_1; /* uses mapping */
+ /*
+ * ZONE_DEVICE private pages are counted as being
+ * mapped so the next 3 words hold the mapping, index,
+ * and private fields from the source anonymous or
+ * page cache page while the page is migrated to device
+ * private memory.
+ * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
+ * use the mapping, index, and private fields when
+ * pmem backed DAX files are mapped.
+ */
};
/** @rcu_head: You can use this to free a page by RCU. */
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE,
- NR_SLAB_UNRECLAIMABLE,
+ NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
+ NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
+ * memcg_flush_percpu_vmstats() first. */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
*/
+#ifndef _NF_CONNTRACK_H323_TYPES_H
+#define _NF_CONNTRACK_H323_TYPES_H
+
typedef struct TransportAddress_ipAddress { /* SEQUENCE */
int options; /* No use */
unsigned int ip;
InfoRequestResponse infoRequestResponse;
};
} RasMessage;
+
+#endif /* _NF_CONNTRACK_H323_TYPES_H */
#ifdef CONFIG_PCIEASPM
bool pcie_aspm_support_enabled(void);
+bool pcie_aspm_enabled(struct pci_dev *pdev);
#else
static inline bool pcie_aspm_support_enabled(void) { return false; }
+static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_PCIEAER
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
+int genphy_c45_config_aneg(struct phy_device *phydev);
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
};
struct device_node;
+struct gpio_desc;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern const struct file_operations pidfd_fops;
+struct file;
+
+extern struct pid *pidfd_pid(const struct file *file);
+
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
extern void exit_signals(struct task_struct *tsk);
extern void kernel_sigaction(int, __sighandler_t);
+#define SIG_KTHREAD ((__force __sighandler_t)2)
+#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
+
static inline void allow_signal(int sig)
{
/*
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
- kernel_sigaction(sig, (__force __sighandler_t)2);
+ kernel_sigaction(sig, SIG_KTHREAD);
+}
+
+static inline void allow_kernel_signal(int sig)
+{
+ /*
+ * Kernel threads handle their own signals. Let the signal code
+ * know signals sent by the kernel will be handled, so that they
+ * don't get silently dropped.
+ */
+ kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
}
static inline void disallow_signal(int sig)
to->l4_hash = from->l4_hash;
};
+static inline void skb_copy_decrypted(struct sk_buff *to,
+ const struct sk_buff *from)
+{
+#ifdef CONFIG_TLS_DEVICE
+ to->decrypted = from->decrypted;
+#endif
+}
+
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
+#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
+ * plain text and require encryption
+ */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
struct rpc_call_ops {
void (*rpc_call_prepare)(struct rpc_task *, void *);
- void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
void (*rpc_call_done)(struct rpc_task *, void *);
void (*rpc_count_stats)(struct rpc_task *, void *);
void (*rpc_release)(void *);
return old;
}
+/* for __ARCH_WANT_SYS_IPC */
+long ksys_semtimedop(int semid, struct sembuf __user *tsops,
+ unsigned int nsops,
+ const struct __kernel_timespec __user *timeout);
+long ksys_semget(key_t key, int nsems, int semflg);
+long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
+long ksys_msgget(key_t key, int msgflg);
+long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+ long msgtyp, int msgflg);
+long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+ int msgflg);
+long ksys_shmget(key_t key, size_t size, int shmflg);
+long ksys_shmdt(char __user *shmaddr);
+long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
+long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
+ unsigned int nsops,
+ const struct old_timespec32 __user *timeout);
+
#endif
* @cs_was_changed_seq: The sequence number of clocksource change events
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
+ * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
* interval.
*
* wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
+ *
+ * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
+ * accelerate the VDSO update for CLOCK_BOOTTIME.
*/
struct timekeeper {
struct tk_read_base tkr_mono;
u8 cs_was_changed_seq;
ktime_t next_leap_ktime;
u64 raw_sec;
+ struct timespec64 monotonic_to_boot;
/* The following members are for timekeeping internal use */
u64 cycle_interval;
#define is_signed_type(type) (((type)(-1)) < (type)1)
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
/*
* field rather than determining a dma address themselves.
*
* Note that transfer_buffer must still be set if the controller
- * does not support DMA (as indicated by bus.uses_dma) and when talking
+ * does not support DMA (as indicated by hcd_uses_dma()) and when talking
* to root hub. If you have to trasfer between highmem zone and the device
* on such controller, create a bounce buffer or bail out with an error.
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
return hcd->high_prio_bh.completing_ep == ep;
}
+#define hcd_uses_dma(hcd) \
+ (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma)
+
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
R##_e = X##_e; \
+ /* Fall through */ \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
\
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
R##_e = Y##_e; \
+ /* Fall through */ \
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
R##_s = X##_s; \
+ /* Fall through */ \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
R##_s = Y##_s; \
+ /* Fall through */ \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
FP_SET_EXCEPTION(FP_EX_DIVZERO); \
+ /* Fall through */ \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
R##_c = FP_CLS_INF; \
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Character LCD driver for Linux
- *
- * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
- * Copyright (C) 2016-2017 Glider bvba
- */
-
-struct charlcd {
- const struct charlcd_ops *ops;
- const unsigned char *char_conv; /* Optional */
-
- int ifwidth; /* 4-bit or 8-bit (default) */
- int height;
- int width;
- int bwidth; /* Default set by charlcd_alloc() */
- int hwidth; /* Default set by charlcd_alloc() */
-
- void *drvdata; /* Set by charlcd_alloc() */
-};
-
-struct charlcd_ops {
- /* Required */
- void (*write_cmd)(struct charlcd *lcd, int cmd);
- void (*write_data)(struct charlcd *lcd, int data);
-
- /* Optional */
- void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */
- void (*clear_fast)(struct charlcd *lcd);
- void (*backlight)(struct charlcd *lcd, int on);
-};
-
-struct charlcd *charlcd_alloc(unsigned int drvdata_size);
-void charlcd_free(struct charlcd *lcd);
-
-int charlcd_register(struct charlcd *lcd);
-int charlcd_unregister(struct charlcd *lcd);
-
-void charlcd_poke(struct charlcd *lcd);
struct tcf_idrinfo {
struct mutex lock;
struct idr action_idr;
+ struct net *net;
};
struct tc_action_ops;
};
static inline
-int tc_action_net_init(struct tc_action_net *tn,
+int tc_action_net_init(struct net *net, struct tc_action_net *tn,
const struct tc_action_ops *ops)
{
int err = 0;
if (!tn->idrinfo)
return -ENOMEM;
tn->ops = ops;
+ tn->idrinfo->net = net;
mutex_init(&tn->idrinfo->lock);
idr_init(&tn->idrinfo->action_idr);
return err;
unsigned int len)
{
if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
- return -EINVAL;
+ return 0;
return pskb_may_pull(skb, len);
}
__u16 conn_info_min_age;
__u16 conn_info_max_age;
__u16 auth_payload_timeout;
+ __u8 min_enc_key_size;
__u8 ssp_debug_mode;
__u8 hw_error_code;
__u32 clock;
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
struct sk_buff *parent);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
- void *reasm_data);
+ void *reasm_data, bool try_coalesce);
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
#endif
struct netlink_callback *cb);
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
- unsigned char *flags, bool skip_oif);
+ u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
- int nh_weight);
+ int nh_weight, u8 rt_family);
#endif /* _NET_FIB_H */
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
struct net {
- refcount_t passive; /* To decided when the network
+ refcount_t passive; /* To decide when the network
* namespace should be freed.
*/
refcount_t count; /* To decided when the network
spinlock_t rules_mod_lock;
u32 hash_mix;
- atomic64_t cookie_gen;
struct list_head list; /* list of network namespaces */
struct list_head exit_list; /* To linked to call pernet exit
unsigned char *udata;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
- u16 flags:13,
- bound:1,
+ u16 flags:14,
genmask:2;
u8 klen;
u8 dlen;
struct nft_trans_set {
struct nft_set *set;
u32 set_id;
+ bool bound;
};
#define nft_trans_set(trans) \
(((struct nft_trans_set *)trans->data)->set)
#define nft_trans_set_id(trans) \
(((struct nft_trans_set *)trans->data)->set_id)
+#define nft_trans_set_bound(trans) \
+ (((struct nft_trans_set *)trans->data)->bound)
struct nft_trans_chain {
bool update;
struct nft_trans_elem {
struct nft_set *set;
struct nft_set_elem elem;
+ bool bound;
};
#define nft_trans_elem_set(trans) \
(((struct nft_trans_elem *)trans->data)->set)
#define nft_trans_elem(trans) \
(((struct nft_trans_elem *)trans->data)->elem)
+#define nft_trans_elem_set_bound(trans) \
+ (((struct nft_trans_elem *)trans->data)->bound)
struct nft_trans_obj {
struct nft_object *obj;
(__reg)->key = __key; \
memset(&(__reg)->mask, 0xff, (__reg)->len);
+int nft_chain_offload_priority(struct nft_base_chain *basechain);
+
#endif
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
- return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen), policy,
- NL_VALIDATE_STRICT, extack);
+ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
+ NL_VALIDATE_STRICT, extack);
}
/**
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
rc = nh_grp->num_nh;
- } else {
- const struct nh_info *nhi;
-
- nhi = rcu_dereference_rtnl(nh->nh_info);
- if (nhi->reject_nh)
- rc = 0;
}
return rc;
}
static inline
-int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh)
+int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
+ u8 rt_family)
{
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
int i;
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;
- if (fib_add_nexthop(skb, nhc, weight) < 0)
+ if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
return -EMSGSIZE;
}
{
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
- cls_common->prio = tp->prio;
+ cls_common->prio = tp->prio >> 16;
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack;
}
u32 group_num;
u32 refcount;
u32 seq;
+ struct rcu_head rcu;
};
struct psample_group *psample_group_get(struct net *net, u32 group_num);
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
u32 table_id, struct fib_info *fi,
- int *fa_index, int fa_start);
+ int *fa_index, int fa_start, unsigned int flags);
static inline void ip_rt_put(struct rtable *rt)
{
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
+ * Check decrypted mark in case skb_orphan() cleared socket.
*/
static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
struct net_device *dev)
#ifdef CONFIG_SOCK_VALIDATE_XMIT
struct sock *sk = skb->sk;
- if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
+ if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
skb = sk->sk_validate_xmit_skb(sk, dev, skb);
+#ifdef CONFIG_TLS_DEVICE
+ } else if (unlikely(skb->decrypted)) {
+ pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
+ kfree_skb(skb);
+ skb = NULL;
+#endif
+ }
#endif
return skb;
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
- char name[IFNAMSIZ]; /* name of XFRM device */
int link; /* ifindex of underlying L2 interface */
u32 if_id; /* interface identifyer */
};
struct xfrm_if {
struct xfrm_if __rcu *next; /* next interface in list */
struct net_device *dev; /* virtual device associated with interface */
- struct net_device *phydev; /* physical device */
struct net *net; /* netns for packet i/o */
struct xfrm_if_parms p; /* interface parms */
};
int rdma_restrack_count(struct ib_device *dev,
- enum rdma_restrack_type type,
- struct pid_namespace *ns);
+ enum rdma_restrack_type type);
void rdma_restrack_kadd(struct rdma_restrack_entry *res);
void rdma_restrack_uadd(struct rdma_restrack_entry *res);
#define CMD_IDU_ENABLE 0x71
#define CMD_IDU_DISABLE 0x72
#define CMD_IDU_SET_MODE 0x74
+#define CMD_IDU_READ_MODE 0x75
#define CMD_IDU_SET_DEST 0x76
+#define CMD_IDU_ACK_CIRQ 0x79
#define CMD_IDU_SET_MASK 0x7C
#define IDU_M_TRIG_LEVEL 0x0
__mcip_cmd(cmd, param);
}
+/*
+ * Read MCIP register
+ */
+static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
+{
+ __mcip_cmd(cmd, param);
+ return read_aux_reg(ARC_REG_MCIP_READBACK);
+}
+
#endif
#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
enum rxrpc_skb_trace {
- rxrpc_skb_rx_cleaned,
- rxrpc_skb_rx_freed,
- rxrpc_skb_rx_got,
- rxrpc_skb_rx_lost,
- rxrpc_skb_rx_purged,
- rxrpc_skb_rx_received,
- rxrpc_skb_rx_rotated,
- rxrpc_skb_rx_seen,
- rxrpc_skb_tx_cleaned,
- rxrpc_skb_tx_freed,
- rxrpc_skb_tx_got,
- rxrpc_skb_tx_new,
- rxrpc_skb_tx_rotated,
- rxrpc_skb_tx_seen,
+ rxrpc_skb_cleaned,
+ rxrpc_skb_freed,
+ rxrpc_skb_got,
+ rxrpc_skb_lost,
+ rxrpc_skb_new,
+ rxrpc_skb_purged,
+ rxrpc_skb_received,
+ rxrpc_skb_rotated,
+ rxrpc_skb_seen,
+ rxrpc_skb_unshared,
+ rxrpc_skb_unshared_nomem,
};
enum rxrpc_local_trace {
* Declare tracing information enums and their string mappings for display.
*/
#define rxrpc_skb_traces \
- EM(rxrpc_skb_rx_cleaned, "Rx CLN") \
- EM(rxrpc_skb_rx_freed, "Rx FRE") \
- EM(rxrpc_skb_rx_got, "Rx GOT") \
- EM(rxrpc_skb_rx_lost, "Rx *L*") \
- EM(rxrpc_skb_rx_purged, "Rx PUR") \
- EM(rxrpc_skb_rx_received, "Rx RCV") \
- EM(rxrpc_skb_rx_rotated, "Rx ROT") \
- EM(rxrpc_skb_rx_seen, "Rx SEE") \
- EM(rxrpc_skb_tx_cleaned, "Tx CLN") \
- EM(rxrpc_skb_tx_freed, "Tx FRE") \
- EM(rxrpc_skb_tx_got, "Tx GOT") \
- EM(rxrpc_skb_tx_new, "Tx NEW") \
- EM(rxrpc_skb_tx_rotated, "Tx ROT") \
- E_(rxrpc_skb_tx_seen, "Tx SEE")
+ EM(rxrpc_skb_cleaned, "CLN") \
+ EM(rxrpc_skb_freed, "FRE") \
+ EM(rxrpc_skb_got, "GOT") \
+ EM(rxrpc_skb_lost, "*L*") \
+ EM(rxrpc_skb_new, "NEW") \
+ EM(rxrpc_skb_purged, "PUR") \
+ EM(rxrpc_skb_received, "RCV") \
+ EM(rxrpc_skb_rotated, "ROT") \
+ EM(rxrpc_skb_seen, "SEE") \
+ EM(rxrpc_skb_unshared, "UNS") \
+ E_(rxrpc_skb_unshared_nomem, "US0")
#define rxrpc_local_traces \
EM(rxrpc_local_got, "GOT") \
#define E_(a, b) { a, b }
TRACE_EVENT(rxrpc_local,
- TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
+ TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
int usage, const void *where),
- TP_ARGS(local, op, usage, where),
+ TP_ARGS(local_debug_id, op, usage, where),
TP_STRUCT__entry(
__field(unsigned int, local )
),
TP_fast_assign(
- __entry->local = local->debug_id;
+ __entry->local = local_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
TRACE_EVENT(rxrpc_skb,
TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
- int usage, int mod_count, const void *where),
+ int usage, int mod_count, u8 flags, const void *where),
- TP_ARGS(skb, op, usage, mod_count, where),
+ TP_ARGS(skb, op, usage, mod_count, flags, where),
TP_STRUCT__entry(
__field(struct sk_buff *, skb )
__field(enum rxrpc_skb_trace, op )
+ __field(u8, flags )
__field(int, usage )
__field(int, mod_count )
__field(const void *, where )
TP_fast_assign(
__entry->skb = skb;
+ __entry->flags = flags;
__entry->op = op;
__entry->usage = usage;
__entry->mod_count = mod_count;
__entry->where = where;
),
- TP_printk("s=%p %s u=%d m=%d p=%pSR",
+ TP_printk("s=%p %cx %s u=%d m=%d p=%pSR",
__entry->skb,
+ __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R',
__print_symbolic(__entry->op, rxrpc_skb_traces),
__entry->usage,
__entry->mod_count,
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
* If no cookie has been set yet, generate a new cookie. Once
* generated, the socket cookie remains stable for the life of the
* socket. This helper can be useful for monitoring per socket
- * networking traffic statistics as it provides a unique socket
- * identifier per namespace.
+ * networking traffic statistics as it provides a global socket
+ * identifier that can be assumed unique.
* Return
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
#define CAPI_MSG_BASELEN 8
#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
/*----- CAPI commands -----*/
#define CAPI_ALERT 0x01
#define JFFS2_ACL_VERSION 0x0001
-// Maybe later...
-//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
-//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
-
-
#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
mount time, don't wait for it to
happen later */
struct nf_acct *nfacct;
};
+struct xt_nfacct_match_info_v1 {
+ char name[NFACCT_NAME_MAX];
+ struct nf_acct *nfacct __attribute__((aligned(8)));
+};
+
#endif /* _XT_NFACCT_MATCH_H */
__u32 rdma_mr_max;
__u32 rdma_mr_size;
__u8 tos;
+ __u8 sl;
__u32 cache_allocs;
};
__u32 rdma_mr_max;
__u32 rdma_mr_size;
__u8 tos;
+ __u8 sl;
__u32 cache_allocs;
};
#define P_ALL 0
#define P_PID 1
#define P_PGID 2
+#define P_PIDFD 3
#endif /* _UAPI_LINUX_WAIT_H */
* to control CQ arming.
*/
struct siw_cq_ctrl {
- __aligned_u64 notify;
+ __u32 flags;
+ __u32 pad;
};
#endif
*cmd &= ~IPC_64;
return version;
}
-#endif
-/* for __ARCH_WANT_SYS_IPC */
-long ksys_semtimedop(int semid, struct sembuf __user *tsops,
- unsigned int nsops,
- const struct __kernel_timespec __user *timeout);
-long ksys_semget(key_t key, int nsems, int semflg);
-long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
-long ksys_msgget(key_t key, int msgflg);
-long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
-long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
- long msgtyp, int msgflg);
-long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
- int msgflg);
-long ksys_shmget(key_t key, size_t size, int shmflg);
-long ksys_shmdt(char __user *shmaddr);
-long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
-
-/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
-long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned int nsops,
- const struct old_timespec32 __user *timeout);
-#ifdef CONFIG_COMPAT
long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
-#endif /* CONFIG_COMPAT */
+
+#endif
#endif
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
- struct bpf_insn *to_buff)
+ struct bpf_insn *to_buff,
+ bool emit_zext)
{
struct bpf_insn *to = to_buff;
u32 imm_rnd = get_random_int();
case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+ if (emit_zext)
+ *to++ = BPF_ZEXT_REG(BPF_REG_AX);
*to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
break;
insn[1].code == 0)
memcpy(aux, insn, sizeof(aux));
- rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
+ rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
+ clone->aux->verifier_zext);
if (!rewritten)
continue;
if (err)
goto free_used_maps;
- err = bpf_prog_new_fd(prog);
- if (err < 0) {
- /* failed to allocate fd.
- * bpf_prog_put() is needed because the above
- * bpf_prog_alloc_id() has published the prog
- * to the userspace and the userspace may
- * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
- */
- bpf_prog_put(prog);
- return err;
- }
-
+ /* Upon success of bpf_prog_alloc_id(), the BPF prog is
+ * effectively publicly exposed. However, retrieving via
+ * bpf_prog_get_fd_by_id() will take another reference,
+ * therefore it cannot be gone underneath us.
+ *
+ * Only for the time /after/ successful bpf_prog_new_fd()
+ * and before returning to userspace, we might just hold
+ * one reference and any parallel close on that fd could
+ * rip everything out. Hence, below notifications must
+ * happen before bpf_prog_new_fd().
+ *
+ * Also, any failure handling from this point onwards must
+ * be using bpf_prog_put() given the program is exposed.
+ */
bpf_prog_kallsyms_add(prog);
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
+
+ err = bpf_prog_new_fd(prog);
+ if (err < 0)
+ bpf_prog_put(prog);
return err;
free_used_maps:
reg->smax_value = S64_MAX;
reg->umin_value = 0;
reg->umax_value = U64_MAX;
-
- /* constant backtracking is enabled for root only for now */
- reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
}
/* Mark a register as having a completely unknown (scalar) value. */
__mark_reg_not_init(regs + regno);
return;
}
- __mark_reg_unknown(regs + regno);
+ regs += regno;
+ __mark_reg_unknown(regs);
+ /* constant backtracking is enabled for root without bpf2bpf calls */
+ regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
+ true : false;
}
static void __mark_reg_not_init(struct bpf_reg_state *reg)
bitmap_from_u64(mask, stack_mask);
for_each_set_bit(i, mask, 64) {
if (i >= func->allocated_stack / BPF_REG_SIZE) {
- /* This can happen if backtracking
- * is propagating stack precision where
- * caller has larger stack frame
- * than callee, but backtrack_insn() should
- * have returned -ENOTSUPP.
+ /* the sequence of instructions:
+ * 2: (bf) r3 = r10
+ * 3: (7b) *(u64 *)(r3 -8) = r0
+ * 4: (79) r4 = *(u64 *)(r10 -8)
+ * doesn't contain jmps. It's backtracked
+ * as a single block.
+ * During backtracking insn 3 is not recognized as
+ * stack access, so at the end of backtracking
+ * stack slot fp-8 is still marked in stack_mask.
+ * However the parent state may not have accessed
+ * fp-8 and it's "unallocated" stack space.
+ * In such case fallback to conservative.
*/
- verbose(env, "BUG spi %d stack_size %d\n",
- i, func->allocated_stack);
- WARN_ONCE(1, "verifier backtracking bug");
- return -EFAULT;
+ mark_all_scalars_precise(env, st);
+ return 0;
}
if (func->stack[i].slot_type[0] != STACK_SPILL) {
* if the parent has to be frozen, the child has too.
*/
cgrp->freezer.e_freeze = parent->freezer.e_freeze;
- if (cgrp->freezer.e_freeze)
+ if (cgrp->freezer.e_freeze) {
+ /*
+ * Set the CGRP_FREEZE flag, so when a process will be
+ * attached to the child cgroup, it will become frozen.
+ * At this point the new cgroup is unpopulated, so we can
+ * consider it frozen immediately.
+ */
+ set_bit(CGRP_FREEZE, &cgrp->flags);
set_bit(CGRP_FROZEN, &cgrp->flags);
+ }
spin_lock_irq(&css_set_lock);
for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* kernel/configs.c
* Echo the kernel .config file used to build the kernel
* Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
* Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
* Copyright (C) 2002 Hewlett-Packard Company
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
*/
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
{
- int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
- size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- size_t align = get_order(PAGE_ALIGN(size));
+ size_t count = size >> PAGE_SHIFT;
struct page *page = NULL;
struct cma *cma = NULL;
/* CMA can be used only in the context which permits sleeping */
if (cma && gfpflags_allow_blocking(gfp)) {
+ size_t align = get_order(size);
size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
}
- /* Fallback allocation of normal pages */
- if (!page)
- page = alloc_pages_node(node, gfp, align);
return page;
}
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
- if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
- max_dma = dev->bus_dma_mask;
-
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
+ size_t alloc_size = PAGE_ALIGN(size);
+ int node = dev_to_node(dev);
struct page *page = NULL;
u64 phys_mask;
gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_mask);
+ page = dma_alloc_contiguous(dev, alloc_size, gfp);
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ dma_free_contiguous(dev, page, alloc_size);
+ page = NULL;
+ }
again:
- page = dma_alloc_contiguous(dev, size, gfp);
+ if (!page)
+ page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
page = NULL;
if (!page)
return NULL;
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev)) {
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */
return page;
}
{
unsigned int page_order = get_order(size);
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
__dma_direct_free_pages(dev, size, cpu_addr);
return;
}
EXPORT_SYMBOL(dma_get_sgtable_attrs);
+#ifdef CONFIG_MMU
+/*
+ * Return the page attributes used for mapping dma_alloc_* memory, either in
+ * kernel space if remapping is needed, or to userspace through dma_mmap_*.
+ */
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
+{
+ if (dev_is_dma_coherent(dev) ||
+ (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
+ (attrs & DMA_ATTR_NON_CONSISTENT)))
+ return prot;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
+ return arch_dma_mmap_pgprot(dev, prot, attrs);
+ return pgprot_noncached(prot);
+}
+#endif /* CONFIG_MMU */
+
/*
* Create userspace mapping for the DMA-coherent memory.
*/
unsigned long pfn;
int ret = -ENXIO;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
- arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
+ dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
__dma_direct_free_pages(dev, size, page);
int register_perf_hw_breakpoint(struct perf_event *bp)
{
- struct arch_hw_breakpoint hw;
+ struct arch_hw_breakpoint hw = { };
int err;
err = reserve_bp_slot(bp);
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
bool check)
{
- struct arch_hw_breakpoint hw;
+ struct arch_hw_breakpoint hw = { };
int err;
err = hw_breakpoint_parse(bp, attr, &hw);
return retval;
}
+static struct pid *pidfd_get_pid(unsigned int fd)
+{
+ struct fd f;
+ struct pid *pid;
+
+ f = fdget(fd);
+ if (!f.file)
+ return ERR_PTR(-EBADF);
+
+ pid = pidfd_pid(f.file);
+ if (!IS_ERR(pid))
+ get_pid(pid);
+
+ fdput(f);
+ return pid;
+}
+
static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
int options, struct rusage *ru)
{
type = PIDTYPE_PID;
if (upid <= 0)
return -EINVAL;
+
+ pid = find_get_pid(upid);
break;
case P_PGID:
type = PIDTYPE_PGID;
- if (upid <= 0)
+ if (upid < 0)
+ return -EINVAL;
+
+ if (upid)
+ pid = find_get_pid(upid);
+ else
+ pid = get_task_pid(current, PIDTYPE_PGID);
+ break;
+ case P_PIDFD:
+ type = PIDTYPE_PID;
+ if (upid < 0)
return -EINVAL;
+
+ pid = pidfd_get_pid(upid);
+ if (IS_ERR(pid))
+ return PTR_ERR(pid);
break;
default:
return -EINVAL;
}
- if (type < PIDTYPE_MAX)
- pid = find_get_pid(upid);
-
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options;
#endif /* #ifdef CONFIG_TASKS_RCU */
}
+struct pid *pidfd_pid(const struct file *file)
+{
+ if (file->f_op == &pidfd_fops)
+ return file->private_data;
+
+ return ERR_PTR(-EBADF);
+}
+
static int pidfd_release(struct inode *inode, struct file *file)
{
struct pid *pid = file->private_data;
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
+ *
+ * args->exit_signal is expected to be checked for sanity by the caller.
*/
long _do_fork(struct kernel_clone_args *args)
{
if (copy_from_user(&args, uargs, size))
return -EFAULT;
+ /*
+ * Verify that higher 32bits of exit_signal are unset and that
+ * it is a valid signal
+ */
+ if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
+ !valid_signal(args.exit_signal)))
+ return -EINVAL;
+
*kargs = (struct kernel_clone_args){
.flags = args.flags,
.pidfd = u64_to_user_ptr(args.pidfd),
}
}
+static void irq_sysfs_del(struct irq_desc *desc)
+{
+ /*
+ * If irq_sysfs_init() has not yet been invoked (early boot), then
+ * irq_kobj_base is NULL and the descriptor was never added.
+ * kobject_del() complains about a object with no parent, so make
+ * it conditional.
+ */
+ if (irq_kobj_base)
+ kobject_del(&desc->kobj);
+}
+
static int __init irq_sysfs_init(void)
{
struct irq_desc *desc;
};
static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+static void irq_sysfs_del(struct irq_desc *desc) {}
#endif /* CONFIG_SYSFS */
* The sysfs entry must be serialized against a concurrent
* irq_sysfs_init() as well.
*/
- kobject_del(&desc->kobj);
+ irq_sysfs_del(desc);
delete_irq_desc(irq);
/*
irq = find_first_bit(irqs_resend, nr_irqs);
clear_bit(irq, irqs_resend);
desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
local_irq_disable();
desc->handle_irq(desc);
local_irq_enable();
{
char namebuf[KSYM_NAME_LEN];
- if (is_ksym_addr(addr))
- return !!get_symbol_pos(addr, symbolsize, offset);
+ if (is_ksym_addr(addr)) {
+ get_symbol_pos(addr, symbolsize, offset);
+ return 1;
+ }
return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
!!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
}
*/
static void do_optimize_kprobes(void)
{
+ lockdep_assert_held(&text_mutex);
/*
* The optimization/unoptimization refers online_cpus via
* stop_machine() and cpu-hotplug modifies online_cpus.
list_empty(&optimizing_list))
return;
- mutex_lock(&text_mutex);
arch_optimize_kprobes(&optimizing_list);
- mutex_unlock(&text_mutex);
}
/*
{
struct optimized_kprobe *op, *tmp;
+ lockdep_assert_held(&text_mutex);
/* See comment in do_optimize_kprobes() */
lockdep_assert_cpus_held();
if (list_empty(&unoptimizing_list))
return;
- mutex_lock(&text_mutex);
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop free_list for disarming */
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
} else
list_del_init(&op->list);
}
- mutex_unlock(&text_mutex);
}
/* Reclaim all kprobes on the free_list */
{
mutex_lock(&kprobe_mutex);
cpus_read_lock();
+ mutex_lock(&text_mutex);
/* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex);
do_free_cleaned_kprobes();
mutex_unlock(&module_mutex);
+ mutex_unlock(&text_mutex);
cpus_read_unlock();
mutex_unlock(&kprobe_mutex);
/*
* Modules' sections will be aligned on page boundaries
* to ensure complete separation of code and data, but
- * only when CONFIG_STRICT_MODULE_RWX=y
+ * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
*/
-#ifdef CONFIG_STRICT_MODULE_RWX
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
static inline void sched_submit_work(struct task_struct *tsk)
{
- if (!tsk->state || tsk_is_pi_blocked(tsk))
+ if (!tsk->state)
return;
/*
preempt_enable_no_resched();
}
+ if (tsk_is_pi_blocked(tsk))
+ return;
+
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
return retval;
}
-static int sched_read_attr(struct sched_attr __user *uattr,
- struct sched_attr *attr,
- unsigned int usize)
+/*
+ * Copy the kernel size attribute structure (which might be larger
+ * than what user-space knows about) to user-space.
+ *
+ * Note that all cases are valid: user-space buffer can be larger or
+ * smaller than the kernel-space buffer. The usual case is that both
+ * have the same size.
+ */
+static int
+sched_attr_copy_to_user(struct sched_attr __user *uattr,
+ struct sched_attr *kattr,
+ unsigned int usize)
{
- int ret;
+ unsigned int ksize = sizeof(*kattr);
if (!access_ok(uattr, usize))
return -EFAULT;
/*
- * If we're handed a smaller struct than we know of,
- * ensure all the unknown bits are 0 - i.e. old
- * user-space does not get uncomplete information.
+ * sched_getattr() ABI forwards and backwards compatibility:
+ *
+ * If usize == ksize then we just copy everything to user-space and all is good.
+ *
+ * If usize < ksize then we only copy as much as user-space has space for,
+ * this keeps ABI compatibility as well. We skip the rest.
+ *
+ * If usize > ksize then user-space is using a newer version of the ABI,
+ * which part the kernel doesn't know about. Just ignore it - tooling can
+ * detect the kernel's knowledge of attributes from the attr->size value
+ * which is set to ksize in this case.
*/
- if (usize < sizeof(*attr)) {
- unsigned char *addr;
- unsigned char *end;
-
- addr = (void *)attr + usize;
- end = (void *)attr + sizeof(*attr);
+ kattr->size = min(usize, ksize);
- for (; addr < end; addr++) {
- if (*addr)
- return -EFBIG;
- }
-
- attr->size = usize;
- }
-
- ret = copy_to_user(uattr, attr, attr->size);
- if (ret)
+ if (copy_to_user(uattr, kattr, kattr->size))
return -EFAULT;
return 0;
* sys_sched_getattr - similar to sched_getparam, but with sched_attr
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
- * @size: sizeof(attr) for fwd/bwd comp.
+ * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
* @flags: for future extension.
*/
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
- unsigned int, size, unsigned int, flags)
+ unsigned int, usize, unsigned int, flags)
{
- struct sched_attr attr = {
- .size = sizeof(struct sched_attr),
- };
+ struct sched_attr kattr = { };
struct task_struct *p;
int retval;
- if (!uattr || pid < 0 || size > PAGE_SIZE ||
- size < SCHED_ATTR_SIZE_VER0 || flags)
+ if (!uattr || pid < 0 || usize > PAGE_SIZE ||
+ usize < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL;
rcu_read_lock();
if (retval)
goto out_unlock;
- attr.sched_policy = p->policy;
+ kattr.sched_policy = p->policy;
if (p->sched_reset_on_fork)
- attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
if (task_has_dl_policy(p))
- __getparam_dl(p, &attr);
+ __getparam_dl(p, &kattr);
else if (task_has_rt_policy(p))
- attr.sched_priority = p->rt_priority;
+ kattr.sched_priority = p->rt_priority;
else
- attr.sched_nice = task_nice(p);
+ kattr.sched_nice = task_nice(p);
#ifdef CONFIG_UCLAMP_TASK
- attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
- attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
+ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
+ kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
#endif
rcu_read_unlock();
- retval = sched_read_attr(uattr, &attr, size);
- return retval;
+ return sched_attr_copy_to_user(uattr, &kattr, usize);
out_unlock:
rcu_read_unlock();
struct task_struct *thread;
bool work_in_progress;
+ bool limits_changed;
bool need_freq_update;
};
!cpufreq_this_cpu_can_update(sg_policy->policy))
return false;
- if (unlikely(sg_policy->need_freq_update))
+ if (unlikely(sg_policy->limits_changed)) {
+ sg_policy->limits_changed = false;
+ sg_policy->need_freq_update = true;
return true;
+ }
delta_ns = time - sg_policy->last_freq_update_time;
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
- sg_policy->need_freq_update = true;
+ sg_policy->limits_changed = true;
}
static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time))
return;
- busy = sugov_cpu_is_busy(sg_cpu);
+ /* Limits may have changed, don't skip frequency update */
+ busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
sg_policy->last_freq_update_time = 0;
sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
+ sg_policy->limits_changed = false;
sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0;
mutex_unlock(&sg_policy->work_lock);
}
- sg_policy->need_freq_update = true;
+ sg_policy->limits_changed = true;
}
struct cpufreq_governor schedutil_gov = {
if (likely(cfs_rq->runtime_remaining > 0))
return;
+ if (cfs_rq->throttled)
+ return;
/*
* if we're unable to extend our runtime we resched so that the active
* hierarchy can be throttled
if (!cfs_rq_throttled(cfs_rq))
goto next;
+ /* By the above check, this should never be true */
+ SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
+
runtime = -cfs_rq->runtime_remaining + 1;
if (runtime > remaining)
runtime = remaining;
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
if (kworker_to_destroy) {
+ /*
+ * After the RCU grace period has expired, the worker
+ * can no longer be found through group->poll_kworker.
+ * But it might have been already scheduled before
+ * that - deschedule it cleanly before destroying it.
+ */
kthread_cancel_delayed_work_sync(&group->poll_work);
+ atomic_set(&group->poll_scheduled, 0);
+
kthread_destroy_worker(kworker_to_destroy);
}
kfree(t);
handler == SIG_DFL && !(force && sig_kernel_only(sig)))
return true;
+ /* Only allow kernel generated signals to this kthread */
+ if (unlikely((t->flags & PF_KTHREAD) &&
+ (handler == SIG_KTHREAD_KERNEL) && !force))
+ return true;
+
return sig_handler_ignored(handler, sig);
}
static struct pid *pidfd_to_pid(const struct file *file)
{
- if (file->f_op == &pidfd_fops)
- return file->private_data;
+ struct pid *pid;
+
+ pid = pidfd_pid(file);
+ if (!IS_ERR(pid))
+ return pid;
return tgid_pidfd_to_pid(file);
}
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
{
tk->offs_boot = ktime_add(tk->offs_boot, delta);
+ /*
+ * Timespec representation for VDSO update to avoid 64bit division
+ * on every update.
+ */
+ tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
}
/*
struct timekeeper *tk)
{
struct vdso_timestamp *vdso_ts;
- u64 nsec;
+ u64 nsec, sec;
vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask;
}
vdso_ts->nsec = nsec;
- /* CLOCK_MONOTONIC_RAW */
- vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
- vdso_ts->sec = tk->raw_sec;
- vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
+ /* Copy MONOTONIC time for BOOTTIME */
+ sec = vdso_ts->sec;
+ /* Add the boot offset */
+ sec += tk->monotonic_to_boot.tv_sec;
+ nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
/* CLOCK_BOOTTIME */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
- vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
- nsec = tk->tkr_mono.xtime_nsec;
- nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
- ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
+ vdso_ts->sec = sec;
+
while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
vdso_ts->sec++;
}
vdso_ts->nsec = nsec;
+ /* CLOCK_MONOTONIC_RAW */
+ vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
+ vdso_ts->sec = tk->raw_sec;
+ vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
+
/* CLOCK_TAI */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
hnd = &iter->probe_entry->hlist;
hash = iter->probe->ops.func_hash->filter_hash;
+
+ /*
+ * A probe being registered may temporarily have an empty hash
+ * and it's at the end of the func_probes list.
+ */
+ if (!hash || hash == EMPTY_HASH)
+ return NULL;
+
size = 1 << hash->size_bits;
retry:
mutex_unlock(&ftrace_lock);
+ /*
+ * Note, there's a small window here that the func_hash->filter_hash
+ * may be NULL or empty. Need to be carefule when reading the loop.
+ */
mutex_lock(&probe->ops.func_hash->regex_lock);
orig_hash = &probe->ops.func_hash->filter_hash;
old_hash = *orig_hash;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
+ if (!hash) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = ftrace_match_records(hash, glob, strlen(glob));
/* Nothing found? */
/**
* update_max_tr_single - only copy one trace over, and reset the rest
- * @tr - tracer
- * @tsk - task with the latency
- * @cpu - the cpu of the buffer to copy.
+ * @tr: tracer
+ * @tsk: task with the latency
+ * @cpu: the cpu of the buffer to copy.
*
* Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/
/**
* register_tracer - register a tracer with the ftrace system.
- * @type - the plugin for the tracer
+ * @type: the plugin for the tracer
*
* Register a new plugin tracer.
*/
/**
* tracing_record_taskinfo - record the task info of a task
*
- * @task - task to record
- * @flags - TRACE_RECORD_CMDLINE for recording comm
- * - TRACE_RECORD_TGID for recording tgid
+ * @task: task to record
+ * @flags: TRACE_RECORD_CMDLINE for recording comm
+ * TRACE_RECORD_TGID for recording tgid
*/
void tracing_record_taskinfo(struct task_struct *task, int flags)
{
/**
* tracing_record_taskinfo_sched_switch - record task info for sched_switch
*
- * @prev - previous task during sched_switch
- * @next - next task during sched_switch
- * @flags - TRACE_RECORD_CMDLINE for recording comm
- * TRACE_RECORD_TGID for recording tgid
+ * @prev: previous task during sched_switch
+ * @next: next task during sched_switch
+ * @flags: TRACE_RECORD_CMDLINE for recording comm
+ * TRACE_RECORD_TGID for recording tgid
*/
void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
struct task_struct *next, int flags)
/**
* trace_vbprintk - write binary msg to tracing buffer
- *
+ * @ip: The address of the caller
+ * @fmt: The string format to write to the buffer
+ * @args: Arguments for @fmt
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
return ret;
}
-static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
{
char *event = NULL, *sub = NULL, *match;
int ret;
for (i = 0; i < tp->nr_args; i++)
traceprobe_free_probe_arg(&tp->args[i]);
- kfree(call->class->system);
+ if (call->class)
+ kfree(call->class->system);
kfree(call->name);
kfree(call->print_fmt);
}
config PARMAN
tristate "parman" if COMPILE_TEST
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
+
config STRING_SELFTEST
tristate "Test string functions"
config GENERIC_LIB_UCMPDI2
bool
-
-config OBJAGG
- tristate "objagg" if COMPILE_TEST
{
size /= esize;
- size = roundup_pow_of_two(size);
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
struct logic_pio_hwaddr *range;
resource_size_t start;
resource_size_t end;
- resource_size_t mmio_sz = 0;
+ resource_size_t mmio_end = 0;
resource_size_t iio_sz = MMIO_UPPER_LIMIT;
int ret = 0;
end = new_range->hw_start + new_range->size;
mutex_lock(&io_range_mutex);
- list_for_each_entry_rcu(range, &io_range_list, list) {
+ list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
goto end_register;
/* for MMIO ranges we need to check for overlap */
if (start >= range->hw_start + range->size ||
end < range->hw_start) {
- mmio_sz += range->size;
+ mmio_end = range->io_start + range->size;
} else {
ret = -EFAULT;
goto end_register;
/* range not registered yet, check for available space */
if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
- if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+ if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
/* if it's too big check if 64K space can be reserved */
- if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+ if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
ret = -E2BIG;
goto end_register;
}
new_range->size = SZ_64K;
pr_warn("Requested IO range too big, new size set to 64K\n");
}
- new_range->io_start = mmio_sz;
+ new_range->io_start = mmio_end;
} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
ret = -E2BIG;
return ret;
}
+/**
+ * logic_pio_unregister_range - unregister a logical PIO range for a host
+ * @range: pointer to the IO range which has been already registered.
+ *
+ * Unregister a previously-registered IO range node.
+ */
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
+{
+ mutex_lock(&io_range_mutex);
+ list_del_rcu(&range->list);
+ mutex_unlock(&io_range_mutex);
+ synchronize_rcu();
+}
+
/**
* find_io_range_by_fwnode - find logical PIO range for given FW node
* @fwnode: FW node handle associated with logical PIO range
*/
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
{
- struct logic_pio_hwaddr *range;
+ struct logic_pio_hwaddr *range, *found_range = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
- if (range->fwnode == fwnode)
- return range;
+ if (range->fwnode == fwnode) {
+ found_range = range;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+
+ return found_range;
}
/* Return a registered range given an input PIO token */
static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
{
- struct logic_pio_hwaddr *range;
+ struct logic_pio_hwaddr *range, *found_range = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
- if (in_range(pio, range->io_start, range->size))
- return range;
+ if (in_range(pio, range->io_start, range->size)) {
+ found_range = range;
+ break;
+ }
}
- pr_err("PIO entry token %lx invalid\n", pio);
- return NULL;
+ rcu_read_unlock();
+
+ if (!found_range)
+ pr_err("PIO entry token 0x%lx invalid\n", pio);
+
+ return found_range;
}
/**
{
struct logic_pio_hwaddr *range;
+ rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (range->flags != LOGIC_PIO_CPU_MMIO)
continue;
- if (in_range(addr, range->hw_start, range->size))
- return addr - range->hw_start + range->io_start;
+ if (in_range(addr, range->hw_start, range->size)) {
+ unsigned long cpuaddr;
+
+ cpuaddr = addr - range->hw_start + range->io_start;
+
+ rcu_read_unlock();
+ return cpuaddr;
+ }
}
- pr_err("addr %llx not registered in io_range_list\n",
- (unsigned long long) addr);
+ rcu_read_unlock();
+
+ pr_err("addr %pa not registered in io_range_list\n", &addr);
+
return ~0UL;
}
struct page *balloon_page_alloc(void)
{
struct page *page = alloc_page(balloon_mapping_gfp_mask() |
- __GFP_NOMEMALLOC | __GFP_NORETRY);
+ __GFP_NOMEMALLOC | __GFP_NORETRY |
+ __GFP_NOWARN);
return page;
}
EXPORT_SYMBOL_GPL(balloon_page_alloc);
#include <linux/shmem_fs.h>
#include <linux/oom.h>
#include <linux/numa.h>
+#include <linux/page_owner.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
* available
* never: never stall for any thp allocation
*/
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
{
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+ gfp_t this_node = 0;
+
+#ifdef CONFIG_NUMA
+ struct mempolicy *pol;
+ /*
+ * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
+ * specified, to express a general desire to stay on the current
+ * node for optimistic allocation attempts. If the defrag mode
+ * and/or madvise hint requires the direct reclaim then we prefer
+ * to fallback to other node rather than node reclaim because that
+ * can lead to excessive reclaim even though there is free memory
+ * on other nodes. We expect that NUMA preferences are specified
+ * by memory policies.
+ */
+ pol = get_vma_policy(vma, addr);
+ if (pol->mode != MPOL_BIND)
+ this_node = __GFP_THISNODE;
+ mpol_cond_put(pol);
+#endif
- /* Always do synchronous compaction */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
-
- /* Kick kcompactd and fail quickly */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
-
- /* Synchronous compaction if madvised, otherwise kick kcompactd */
+ return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT |
- (vma_madvised ? __GFP_DIRECT_RECLAIM :
- __GFP_KSWAPD_RECLAIM);
-
- /* Only do synchronous compaction if madvised */
+ return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+ __GFP_KSWAPD_RECLAIM | this_node);
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT |
- (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
-
- return GFP_TRANSHUGE_LIGHT;
+ return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+ this_node);
+ return GFP_TRANSHUGE_LIGHT | this_node;
}
/* Caller must hold page table lock. */
pte_free(vma->vm_mm, pgtable);
return ret;
}
- gfp = alloc_hugepage_direct_gfpmask(vma);
- page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+ page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
alloc:
if (__transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
- huge_gfp = alloc_hugepage_direct_gfpmask(vma);
- new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
+ huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+ new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
+ haddr, numa_node_id());
} else
new_page = NULL;
}
ClearPageCompound(head);
+
+ split_page_owner(head, HPAGE_PMD_ORDER);
+
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
/* Additional pin to swap cache */
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
+ /*
+ * Returning error will result in faulting task being
+ * sent SIGBUS. The hugetlb fault mutex prevents two
+ * tasks from racing to fault in the same page which
+ * could result in false unable to allocate errors.
+ * Page migration does not take the fault mutex, but
+ * does a clear then write of pte's under page table
+ * lock. Page fault code could race with migration,
+ * notice the clear pte and try to allocate a page
+ * here. Before returning error, get ptl and make
+ * sure there really is no pte entry.
+ */
+ ptl = huge_pte_lock(h, mm, ptep);
+ if (!huge_pte_none(huge_ptep_get(ptep))) {
+ ret = 0;
+ spin_unlock(ptl);
+ goto out;
+ }
+ spin_unlock(ptl);
ret = vmf_error(PTR_ERR(page));
goto out;
}
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return shadow_byte < 0 ||
shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
- else
- return tag != (u8)shadow_byte;
+
+ /* else CONFIG_KASAN_SW_TAGS: */
+ if ((u8)shadow_byte == KASAN_TAG_INVALID)
+ return true;
+ if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
+ return true;
+
+ return false;
}
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
/* stop any memory operation tracing */
kmemleak_enabled = 0;
+ kmemleak_early_log = 0;
/* check whether it is too early for a kernel thread */
if (kmemleak_initialized)
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
- kmemleak_early_log = 0;
kmemleak_disable();
return;
}
/* Update memcg */
__mod_memcg_state(memcg, idx, val);
+ /* Update lruvec */
+ __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
+
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
struct mem_cgroup_per_node *pi;
- /*
- * Batch local counters to keep them in sync with
- * the hierarchical ones.
- */
- __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
atomic_long_add(x, &pi->lruvec_stat[idx]);
x = 0;
__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
+{
+ struct page *page = virt_to_head_page(p);
+ pg_data_t *pgdat = page_pgdat(page);
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = memcg_from_slab_page(page);
+
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
+ if (!memcg || memcg == root_mem_cgroup) {
+ __mod_node_page_state(pgdat, idx, val);
+ } else {
+ lruvec = mem_cgroup_lruvec(pgdat, memcg);
+ __mod_lruvec_state(lruvec, idx, val);
+ }
+ rcu_read_unlock();
+}
+
/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
css_put(&prev->css);
}
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+ struct mem_cgroup *dead_memcg)
{
- struct mem_cgroup *memcg = dead_memcg;
struct mem_cgroup_reclaim_iter *iter;
struct mem_cgroup_per_node *mz;
int nid;
int i;
- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- for_each_node(nid) {
- mz = mem_cgroup_nodeinfo(memcg, nid);
- for (i = 0; i <= DEF_PRIORITY; i++) {
- iter = &mz->iter[i];
- cmpxchg(&iter->position,
- dead_memcg, NULL);
- }
+ for_each_node(nid) {
+ mz = mem_cgroup_nodeinfo(from, nid);
+ for (i = 0; i <= DEF_PRIORITY; i++) {
+ iter = &mz->iter[i];
+ cmpxchg(&iter->position,
+ dead_memcg, NULL);
}
}
}
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+ struct mem_cgroup *memcg = dead_memcg;
+ struct mem_cgroup *last;
+
+ do {
+ __invalidate_reclaim_iterators(memcg, dead_memcg);
+ last = memcg;
+ } while ((memcg = parent_mem_cgroup(memcg)));
+
+ /*
+ * When cgruop1 non-hierarchy mode is used,
+ * parent_mem_cgroup() does not walk all the way up to the
+ * cgroup root (root_mem_cgroup). So we have to handle
+ * dead_memcg from cgroup root separately.
+ */
+ if (last != root_mem_cgroup)
+ __invalidate_reclaim_iterators(root_mem_cgroup,
+ dead_memcg);
+}
+
/**
* mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
* @memcg: hierarchy root
}
}
+static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
+{
+ unsigned long stat[MEMCG_NR_STAT];
+ struct mem_cgroup *mi;
+ int node, cpu, i;
+ int min_idx, max_idx;
+
+ if (slab_only) {
+ min_idx = NR_SLAB_RECLAIMABLE;
+ max_idx = NR_SLAB_UNRECLAIMABLE;
+ } else {
+ min_idx = 0;
+ max_idx = MEMCG_NR_STAT;
+ }
+
+ for (i = min_idx; i < max_idx; i++)
+ stat[i] = 0;
+
+ for_each_online_cpu(cpu)
+ for (i = min_idx; i < max_idx; i++)
+ stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
+
+ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
+ for (i = min_idx; i < max_idx; i++)
+ atomic_long_add(stat[i], &mi->vmstats[i]);
+
+ if (!slab_only)
+ max_idx = NR_VM_NODE_STAT_ITEMS;
+
+ for_each_node(node) {
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+ struct mem_cgroup_per_node *pi;
+
+ for (i = min_idx; i < max_idx; i++)
+ stat[i] = 0;
+
+ for_each_online_cpu(cpu)
+ for (i = min_idx; i < max_idx; i++)
+ stat[i] += per_cpu(
+ pn->lruvec_stat_cpu->count[i], cpu);
+
+ for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
+ for (i = min_idx; i < max_idx; i++)
+ atomic_long_add(stat[i], &pi->lruvec_stat[i]);
+ }
+}
+
+static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
+{
+ unsigned long events[NR_VM_EVENT_ITEMS];
+ struct mem_cgroup *mi;
+ int cpu, i;
+
+ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
+ events[i] = 0;
+
+ for_each_online_cpu(cpu)
+ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
+ events[i] += per_cpu(memcg->vmstats_percpu->events[i],
+ cpu);
+
+ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
+ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
+ atomic_long_add(events[i], &mi->vmevents[i]);
+}
+
#ifdef CONFIG_MEMCG_KMEM
static int memcg_online_kmem(struct mem_cgroup *memcg)
{
if (!parent)
parent = root_mem_cgroup;
+ /*
+ * Deactivate and reparent kmem_caches. Then flush percpu
+ * slab statistics to have precise values at the parent and
+ * all ancestor levels. It's required to keep slab stats
+ * accurate after the reparenting of kmem_caches.
+ */
memcg_deactivate_kmem_caches(memcg, parent);
+ memcg_flush_percpu_vmstats(memcg, true);
kmemcg_id = memcg->kmemcg_id;
BUG_ON(kmemcg_id < 0);
{
int node;
+ /*
+ * Flush percpu vmstats and vmevents to guarantee the value correctness
+ * on parent's and all ancestor levels.
+ */
+ memcg_flush_percpu_vmstats(memcg, false);
+ memcg_flush_percpu_vmevents(memcg);
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
free_percpu(memcg->vmstats_percpu);
},
};
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
struct queue_pages {
}
/*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- * page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ * existing page was already on a node that does not follow the
+ * policy.
*/
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long end, struct mm_walk *walk)
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+ ret = 2;
goto out;
}
- if (!queue_pages_required(page, qp)) {
- ret = 1;
+ if (!queue_pages_required(page, qp))
goto unlock;
- }
- ret = 1;
flags = qp->flags;
/* go to thp migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- if (!vma_migratable(walk->vma)) {
- ret = -EIO;
+ if (!vma_migratable(walk->vma) ||
+ migrate_page_add(page, qp->pagelist, flags)) {
+ ret = 1;
goto unlock;
}
-
- migrate_page_add(page, qp->pagelist, flags);
} else
ret = -EIO;
unlock:
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ * on a node that does not follow the policy.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int ret;
+ bool has_unmovable = false;
pte_t *pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
- if (ret > 0)
- return 0;
- else if (ret < 0)
+ if (ret != 2)
return ret;
}
+ /* THP was split, fall through to pte walk */
if (pmd_trans_unstable(pmd))
return 0;
if (!queue_pages_required(page, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- if (!vma_migratable(vma))
+ /* MPOL_MF_STRICT must be specified if we get here */
+ if (!vma_migratable(vma)) {
+ has_unmovable = true;
break;
- migrate_page_add(page, qp->pagelist, flags);
+ }
+
+ /*
+ * Do not abort immediately since there may be
+ * temporary off LRU pages in the range. Still
+ * need migrate other LRU pages.
+ */
+ if (migrate_page_add(page, qp->pagelist, flags))
+ has_unmovable = true;
} else
break;
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
+
+ if (has_unmovable)
+ return 1;
+
return addr != end ? -EIO : 0;
}
*
* If pages found in a given range are on a set of nodes (determined by
* @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
/*
* page migration, thp tail pages can be passed.
*/
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
struct page *head = compound_head(page);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
+ } else if (flags & MPOL_MF_STRICT) {
+ /*
+ * Non-movable page may reach here. And, there may be
+ * temporary off LRU pages or non-LRU movable pages.
+ * Treat them as unmovable pages since they can't be
+ * isolated, so they can't be moved at the moment. It
+ * should return -EIO for this case too.
+ */
+ return -EIO;
}
}
+
+ return 0;
}
/* page allocation callback for NUMA node migration */
} else if (PageTransHuge(page)) {
struct page *thp;
- thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
- HPAGE_PMD_ORDER);
+ thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
+ address, numa_node_id());
if (!thp)
return NULL;
prep_transhuge_page(thp);
}
#else
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
+ return -EIO;
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
struct mempolicy *new;
unsigned long end;
int err;
+ int ret;
LIST_HEAD(pagelist);
if (flags & ~(unsigned long)MPOL_MF_VALID)
if (err)
goto mpol_out;
- err = queue_pages_range(mm, start, end, nmask,
+ ret = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
- if (!err)
- err = mbind_range(mm, start, end, new);
+
+ if (ret < 0) {
+ err = -EIO;
+ goto up_out;
+ }
+
+ err = mbind_range(mm, start, end, new);
if (!err) {
int nr_failed = 0;
putback_movable_pages(&pagelist);
}
- if (nr_failed && (flags & MPOL_MF_STRICT))
+ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
err = -EIO;
} else
putback_movable_pages(&pagelist);
+up_out:
up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
mpol_put(new);
return err;
}
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
-static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = __get_vma_policy(vma, addr);
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
* @node: Which node to prefer for allocation (modulo policy).
- * @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, int node, bool hugepage)
+ unsigned long addr, int node)
{
struct mempolicy *pol;
struct page *page;
goto out;
}
- if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
- int hpage_node = node;
-
- /*
- * For hugepage allocation and non-interleave policy which
- * allows the current node (or other explicitly preferred
- * node) we only try to allocate from the current/preferred
- * node and don't fall back to other nodes, as the cost of
- * remote accesses would likely offset THP benefits.
- *
- * If the policy is interleave, or does not allow the current
- * node in its nodemask, we allocate the standard way.
- */
- if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
- hpage_node = pol->v.preferred_node;
-
- nmask = policy_nodemask(gfp, pol);
- if (!nmask || node_isset(hpage_node, *nmask)) {
- mpol_cond_put(pol);
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, order);
- goto out;
- }
- }
-
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
mem_cgroup_uncharge(page);
+ /*
+ * When a device_private page is freed, the page->mapping field
+ * may still contain a (stale) mapping value. For example, the
+ * lower bits of page->mapping may still identify the page as
+ * an anonymous page. Ultimately, this entire field is just
+ * stale and wrong, and it will cause errors if not cleared.
+ * One example is:
+ *
+ * migrate_vma_pages()
+ * migrate_vma_insert_page()
+ * page_add_new_anon_rmap()
+ * __page_set_anon_rmap()
+ * ...checks page->mapping, via PageAnon(page) call,
+ * and incorrectly concludes that the page is an
+ * anonymous page. Therefore, it incorrectly,
+ * silently fails to set up the new anon rmap.
+ *
+ * For other types of ZONE_DEVICE pages, migration is either
+ * handled differently or not done at all, so there is no need
+ * to clear page->mapping.
+ */
+ if (is_device_private_page(page))
+ page->mapping = NULL;
+
page->pgmap->ops->page_free(page);
} else if (!count)
__put_page(page);
unsigned int order;
int pages_moved = 0;
-#ifndef CONFIG_HOLES_IN_ZONE
- /*
- * page_zone is not safe to call in this context when
- * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
- * anyway as we check zone boundaries in move_freepages_block().
- * Remove at a later date when no bug reports exist related to
- * grouping pages by mobility
- */
- VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
- pfn_valid(page_to_pfn(end_page)) &&
- page_zone(start_page) != page_zone(end_page));
-#endif
for (page = start_page; page <= end_page;) {
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
continue;
}
- /* Make sure we are not inadvertently changing nodes */
- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
if (!PageBuddy(page)) {
/*
* We assume that pages that could be isolated for
continue;
}
+ /* Make sure we are not inadvertently changing nodes */
+ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+ VM_BUG_ON_PAGE(page_zone(page) != zone, page);
+
order = page_order(page);
move_to_free_area(page, &zone->free_area[order], migratetype);
page += 1 << order;
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
+ *
+ * The assignment to subpage above was computed from a
+ * swap PTE which results in an invalid pointer.
+ * Since only PAGE_SIZE pages can currently be
+ * migrated, just set it to page. This will need to be
+ * changed when hugepage migrations to device private
+ * memory are supported.
*/
+ subpage = page;
goto discard;
}
shmem_pseudo_vma_init(&pvma, info, hindex);
page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
- HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
+ HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
shmem_pseudo_vma_destroy(&pvma);
if (page)
prep_transhuge_page(page);
bool to_user)
{
/* Reject if object wraps past end of memory. */
- if (ptr + n < ptr)
+ if (ptr + (n - 1) < ptr)
usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
/* Reject if NULL or ZERO-allocation. */
if (va == NULL)
goto overflow;
+ /*
+ * If required width exeeds current VA block, move
+ * base downwards and then recheck.
+ */
+ if (base + end > va->va_end) {
+ base = pvm_determine_end_from_reverse(&va, align) - end;
+ term_area = area;
+ continue;
+ }
+
/*
* If this VA does not fit, move base downwards and recheck.
*/
- if (base + start < va->va_start || base + end > va->va_end) {
+ if (base + start < va->va_start) {
va = node_to_va(rb_prev(&va->rb_node));
base = pvm_determine_end_from_reverse(&va, align) - end;
term_area = area;
/* Can pages be swapped as part of reclaim? */
unsigned int may_swap:1;
- /* e.g. boosted watermark reclaim leaves slabs alone */
- unsigned int may_shrinkslab:1;
-
/*
* Cgroups are not reclaimed below their configured memory.low,
* unless we threaten to OOM. If any cgroups are skipped due to
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
node_lru_pages += lru_pages;
- if (sc->may_shrinkslab) {
- shrink_slab(sc->gfp_mask, pgdat->node_id,
- memcg, sc->priority);
- }
+ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
+ sc->priority);
/* Record the group's reclaim efficiency */
vmpressure(sc->gfp_mask, memcg, false,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = 1,
- .may_shrinkslab = 1,
};
/*
#ifdef CONFIG_MEMCG
+/* Only used by soft limit reclaim. Do not reuse for anything else. */
unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
- .may_shrinkslab = 1,
};
unsigned long lru_pages;
- set_task_reclaim_state(current, &sc.reclaim_state);
+ WARN_ON_ONCE(!current->reclaim_state);
+
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
- set_task_reclaim_state(current, NULL);
*nr_scanned = sc.nr_scanned;
return sc.nr_reclaimed;
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = may_swap,
- .may_shrinkslab = 1,
};
set_task_reclaim_state(current, &sc.reclaim_state);
*/
sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
sc.may_swap = !nr_boost_reclaim;
- sc.may_shrinkslab = !nr_boost_reclaim;
/*
* Do some background aging of the anon list, to give
if (node->count && node->count == node->nr_values) {
if (list_empty(&node->private_list)) {
list_lru_add(&shadow_nodes, &node->private_list);
- __inc_lruvec_page_state(virt_to_page(node),
- WORKINGSET_NODES);
+ __inc_lruvec_slab_state(node, WORKINGSET_NODES);
}
} else {
if (!list_empty(&node->private_list)) {
list_lru_del(&shadow_nodes, &node->private_list);
- __dec_lruvec_page_state(virt_to_page(node),
- WORKINGSET_NODES);
+ __dec_lruvec_slab_state(node, WORKINGSET_NODES);
}
}
}
}
list_lru_isolate(lru, item);
- __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES);
+ __dec_lruvec_slab_state(node, WORKINGSET_NODES);
spin_unlock(lru_lock);
* shadow entries we were tracking ...
*/
xas_store(&xas, NULL);
- __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
+ __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
out_invalid:
xa_unlock_irq(&mapping->i_pages);
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <linux/zpool.h>
#include <linux/magic.h>
* @release_wq: workqueue for safe page release
* @work: work_struct for safe page release
* @inode: inode for z3fold pseudo filesystem
+ * @destroying: bool to stop migration once we start destruction
+ * @isolated: int to count the number of pages currently in isolation
*
* This structure is allocated at pool creation time and maintains metadata
* pertaining to a particular z3fold pool.
const struct zpool_ops *zpool_ops;
struct workqueue_struct *compact_wq;
struct workqueue_struct *release_wq;
+ struct wait_queue_head isolate_wait;
struct work_struct work;
struct inode *inode;
+ bool destroying;
+ int isolated;
};
/*
goto out_c;
spin_lock_init(&pool->lock);
spin_lock_init(&pool->stale_lock);
+ init_waitqueue_head(&pool->isolate_wait);
pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
if (!pool->unbuddied)
goto out_pool;
return NULL;
}
+static bool pool_isolated_are_drained(struct z3fold_pool *pool)
+{
+ bool ret;
+
+ spin_lock(&pool->lock);
+ ret = pool->isolated == 0;
+ spin_unlock(&pool->lock);
+ return ret;
+}
/**
* z3fold_destroy_pool() - destroys an existing z3fold pool
* @pool: the z3fold pool to be destroyed
static void z3fold_destroy_pool(struct z3fold_pool *pool)
{
kmem_cache_destroy(pool->c_handle);
- z3fold_unregister_migration(pool);
- destroy_workqueue(pool->release_wq);
+ /*
+ * We set pool-> destroying under lock to ensure that
+ * z3fold_page_isolate() sees any changes to destroying. This way we
+ * avoid the need for any memory barriers.
+ */
+
+ spin_lock(&pool->lock);
+ pool->destroying = true;
+ spin_unlock(&pool->lock);
+
+ /*
+ * We need to ensure that no pages are being migrated while we destroy
+ * these workqueues, as migration can queue work on either of the
+ * workqueues.
+ */
+ wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
+
+ /*
+ * We need to destroy pool->compact_wq before pool->release_wq,
+ * as any pending work on pool->compact_wq will call
+ * queue_work(pool->release_wq, &pool->work).
+ *
+ * There are still outstanding pages until both workqueues are drained,
+ * so we cannot unregister migration until then.
+ */
+
destroy_workqueue(pool->compact_wq);
+ destroy_workqueue(pool->release_wq);
+ z3fold_unregister_migration(pool);
kfree(pool);
}
return atomic64_read(&pool->pages_nr);
}
+/*
+ * z3fold_dec_isolated() expects to be called while pool->lock is held.
+ */
+static void z3fold_dec_isolated(struct z3fold_pool *pool)
+{
+ assert_spin_locked(&pool->lock);
+ VM_BUG_ON(pool->isolated <= 0);
+ pool->isolated--;
+
+ /*
+ * If we have no more isolated pages, we have to see if
+ * z3fold_destroy_pool() is waiting for a signal.
+ */
+ if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
+ wake_up_all(&pool->isolate_wait);
+}
+
+static void z3fold_inc_isolated(struct z3fold_pool *pool)
+{
+ pool->isolated++;
+}
+
static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
{
struct z3fold_header *zhdr;
spin_lock(&pool->lock);
if (!list_empty(&page->lru))
list_del(&page->lru);
+ /*
+ * We need to check for destruction while holding pool->lock, as
+ * otherwise destruction could see 0 isolated pages, and
+ * proceed.
+ */
+ if (unlikely(pool->destroying)) {
+ spin_unlock(&pool->lock);
+ /*
+ * If this page isn't stale, somebody else holds a
+ * reference to it. Let't drop our refcount so that they
+ * can call the release logic.
+ */
+ if (unlikely(kref_put(&zhdr->refcount,
+ release_z3fold_page_locked))) {
+ /*
+ * If we get here we have kref problems, so we
+ * should freak out.
+ */
+ WARN(1, "Z3fold is experiencing kref problems\n");
+ z3fold_page_unlock(zhdr);
+ return false;
+ }
+ z3fold_page_unlock(zhdr);
+ return false;
+ }
+
+
+ z3fold_inc_isolated(pool);
spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr);
return true;
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
+ spin_lock(&pool->lock);
+ z3fold_dec_isolated(pool);
+ spin_unlock(&pool->lock);
+
page_mapcount_reset(page);
put_page(page);
return 0;
INIT_LIST_HEAD(&page->lru);
if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr);
+ spin_lock(&pool->lock);
+ z3fold_dec_isolated(pool);
+ spin_unlock(&pool->lock);
return;
}
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
+ z3fold_dec_isolated(pool);
spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr);
}
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/migrate.h>
+#include <linux/wait.h>
#include <linux/pagemap.h>
#include <linux/fs.h>
#ifdef CONFIG_COMPACTION
struct inode *inode;
struct work_struct free_work;
+ /* A wait queue for when migration races with async_free_zspage() */
+ struct wait_queue_head migration_wait;
+ atomic_long_t isolated_pages;
+ bool destroying;
#endif
};
zspage->isolated--;
}
+static void putback_zspage_deferred(struct zs_pool *pool,
+ struct size_class *class,
+ struct zspage *zspage)
+{
+ enum fullness_group fg;
+
+ fg = putback_zspage(class, zspage);
+ if (fg == ZS_EMPTY)
+ schedule_work(&pool->free_work);
+
+}
+
+static inline void zs_pool_dec_isolated(struct zs_pool *pool)
+{
+ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
+ atomic_long_dec(&pool->isolated_pages);
+ /*
+ * There's no possibility of racing, since wait_for_isolated_drain()
+ * checks the isolated count under &class->lock after enqueuing
+ * on migration_wait.
+ */
+ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
+ wake_up_all(&pool->migration_wait);
+}
+
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
struct page *newpage, struct page *oldpage)
{
*/
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
get_zspage_mapping(zspage, &class_idx, &fullness);
+ atomic_long_inc(&pool->isolated_pages);
remove_zspage(class, zspage, fullness);
}
* Page migration is done so let's putback isolated zspage to
* the list if @page is final isolated subpage in the zspage.
*/
- if (!is_zspage_isolated(zspage))
- putback_zspage(class, zspage);
+ if (!is_zspage_isolated(zspage)) {
+ /*
+ * We cannot race with zs_destroy_pool() here because we wait
+ * for isolation to hit zero before we start destroying.
+ * Also, we ensure that everyone can see pool->destroying before
+ * we start waiting.
+ */
+ putback_zspage_deferred(pool, class, zspage);
+ zs_pool_dec_isolated(pool);
+ }
reset_page(page);
put_page(page);
spin_lock(&class->lock);
dec_zspage_isolation(zspage);
if (!is_zspage_isolated(zspage)) {
- fg = putback_zspage(class, zspage);
/*
* Due to page_lock, we cannot free zspage immediately
* so let's defer.
*/
- if (fg == ZS_EMPTY)
- schedule_work(&pool->free_work);
+ putback_zspage_deferred(pool, class, zspage);
+ zs_pool_dec_isolated(pool);
}
spin_unlock(&class->lock);
}
return 0;
}
+static bool pool_isolated_are_drained(struct zs_pool *pool)
+{
+ return atomic_long_read(&pool->isolated_pages) == 0;
+}
+
+/* Function for resolving migration */
+static void wait_for_isolated_drain(struct zs_pool *pool)
+{
+
+ /*
+ * We're in the process of destroying the pool, so there are no
+ * active allocations. zs_page_isolate() fails for completely free
+ * zspages, so we need only wait for the zs_pool's isolated
+ * count to hit zero.
+ */
+ wait_event(pool->migration_wait,
+ pool_isolated_are_drained(pool));
+}
+
static void zs_unregister_migration(struct zs_pool *pool)
{
+ pool->destroying = true;
+ /*
+ * We need a memory barrier here to ensure global visibility of
+ * pool->destroying. Thus pool->isolated pages will either be 0 in which
+ * case we don't care, or it will be > 0 and pool->destroying will
+ * ensure that we wake up once isolation hits 0.
+ */
+ smp_mb();
+ wait_for_isolated_drain(pool); /* This can block */
flush_work(&pool->free_work);
iput(pool->inode);
}
if (!pool->name)
goto err;
+#ifdef CONFIG_COMPACTION
+ init_waitqueue_head(&pool->migration_wait);
+#endif
+
if (create_cache(pool))
goto err;
* batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm_packet: potential OGM in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
- __be16 tvlv_len)
+static bool
+batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm_packet *ogm_packet)
{
int next_buff_pos = 0;
- next_buff_pos += buff_pos + BATADV_OGM_HLEN;
- next_buff_pos += ntohs(tvlv_len);
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
/* adjust all flags and log packets */
while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
- batadv_ogm_packet->tvlv_len)) {
+ batadv_ogm_packet)) {
/* we might have aggregated direct link packets with an
* ordinary base packet
*/
/* unpack the aggregated packets and process them one by one */
while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
- ogm_packet->tvlv_len)) {
+ ogm_packet)) {
batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM_HLEN;
* batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm2_packet: potential OGM2 in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
- __be16 tvlv_len)
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm2_packet *ogm2_packet)
{
int next_buff_pos = 0;
- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
- next_buff_pos += ntohs(tvlv_len);
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
- ogm_packet->tvlv_len)) {
+ ogm_packet)) {
batadv_v_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM2_HLEN;
while (bucket_tmp < hash->size) {
if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
- *bucket, &idx_tmp))
+ bucket_tmp, &idx_tmp))
break;
bucket_tmp++;
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
- batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS);
- batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS);
+ batadv_mcast_want_rtr4_update(bat_priv, orig,
+ BATADV_MCAST_WANT_NO_RTR4);
+ batadv_mcast_want_rtr6_update(bat_priv, orig,
+ BATADV_MCAST_WANT_NO_RTR6);
spin_unlock_bh(&orig->mcast_handler_lock);
}
{
struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
- return attr ? nla_get_u32(attr) : 0;
+ return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
}
/**
hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
+ hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);
return 0;
}
+static int min_encrypt_key_size_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 1 || val > 16)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->min_enc_key_size = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int min_encrypt_key_size_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->min_enc_key_size;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
+ min_encrypt_key_size_get,
+ min_encrypt_key_size_set, "%llu\n");
+
static int auto_accept_delay_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
if (lmp_ssp_capable(hdev)) {
debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
hdev, &ssp_debug_mode_fops);
+ debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs,
+ hdev, &min_encrypt_key_size_fops);
debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
hdev, &auto_accept_delay_fops);
}
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);
- if (min < hcon->le_conn_min_interval ||
- max > hcon->le_conn_max_interval)
- return send_conn_param_neg_reply(hdev, handle,
- HCI_ERROR_INVALID_LL_PARAMS);
-
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
+ int ret;
BT_DBG("session %p data %p size %d", session, data, size);
}
skb_put_u8(skb, hdr);
- if (data && size > 0)
+ if (data && size > 0) {
skb_put_data(skb, data, size);
+ ret = size;
+ } else {
+ ret = 0;
+ }
skb_queue_tail(transmit, skb);
wake_up_interruptible(sk_sleep(sk));
- return 0;
+ return ret;
}
static int hidp_send_ctrl_message(struct hidp_session *session,
* actually encrypted before enforcing a key size.
*/
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
- hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
+ hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
}
static void l2cap_do_start(struct l2cap_chan *chan)
memset(&rsp, 0, sizeof(rsp));
- if (min < hcon->le_conn_min_interval ||
- max > hcon->le_conn_max_interval) {
- BT_DBG("requested connection interval exceeds current bounds.");
- err = -EINVAL;
- } else {
- err = hci_check_conn_params(min, max, latency, to_multiplier);
- }
-
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
struct nlmsghdr *nlh;
struct nlattr *nest;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
if (!brnet->call_ip6tables &&
!br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
return NF_ACCEPT;
+ if (!ipv6_mod_enabled()) {
+ pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
+ return NF_DROP;
+ }
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(priv, skb, state);
return NF_DROP;
}
- ADD_COUNTER(*(counter_base + i), 1, skb->len);
+ ADD_COUNTER(*(counter_base + i), skb->len, 1);
/* these should only watch: not modify, nor tell us
* what to do with the packet
continue;
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
for (i = 0; i < nentries; i++)
- ADD_COUNTER(counters[i], counter_base[i].pcnt,
- counter_base[i].bcnt);
+ ADD_COUNTER(counters[i], counter_base[i].bcnt,
+ counter_base[i].pcnt);
}
}
/* we add to the counters of the first cpu */
for (i = 0; i < num_counters; i++)
- ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
+ ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
write_unlock_bh(&t->lock);
ret = 0;
goto err;
br_vlan_get_proto(br_dev, &p_proto);
- nft_reg_store16(dest, p_proto);
+ nft_reg_store16(dest, htons(p_proto));
return;
}
default:
if (key) {
kfree(key->key);
key->key = NULL;
- crypto_free_sync_skcipher(key->tfm);
- key->tfm = NULL;
+ if (key->tfm) {
+ crypto_free_sync_skcipher(key->tfm);
+ key->tfm = NULL;
+ }
}
}
struct ceph_osds up, acting;
bool force_resend = false;
bool unpaused = false;
- bool legacy_change;
+ bool legacy_change = false;
bool split = false;
bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
bool recovery_deletes = ceph_osdmap_flag(osdc,
t->osd = acting.primary;
}
- if (unpaused || legacy_change || force_resend ||
- (split && con && CEPH_HAVE_FEATURE(con->peer_features,
- RESEND_ON_SPLIT)))
+ if (unpaused || legacy_change || force_resend || split)
ct_res = CALC_TARGET_NEED_RESEND;
else
ct_res = CALC_TARGET_NO_ACTION;
out:
- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
+ legacy_change, force_resend, split, ct_res, t->osd);
return ct_res;
}
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
+ rcu_barrier();
+
dev->reg_state = NETREG_UNREGISTERED;
}
/*
return size == size_default;
/* Fields that allow narrowing */
- case offsetof(struct sk_reuseport_md, eth_protocol):
+ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
if (size < FIELD_SIZEOF(struct sk_buff, protocol))
return false;
/* fall through */
- case offsetof(struct sk_reuseport_md, ip_protocol):
- case offsetof(struct sk_reuseport_md, bind_inany):
- case offsetof(struct sk_reuseport_md, len):
+ case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
+ case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
+ case bpf_ctx_range(struct sk_reuseport_md, len):
bpf_ctx_record_field_size(info, size_default);
return bpf_ctx_narrow_access_ok(off, size, size_default);
mutex_unlock(&flow_dissector_mutex);
return -ENOENT;
}
- bpf_prog_put(attached);
RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
+ bpf_prog_put(attached);
mutex_unlock(&flow_dissector_mutex);
return 0;
}
txq = netdev_get_tx_queue(dev, q_index);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
- netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+ !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
skb_queue_head(&npinfo->txq, skb);
HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
HARD_TX_UNLOCK(dev, txq);
- if (status == NETDEV_TX_OK)
+ if (dev_xmit_complete(status))
break;
}
}
- if (status != NETDEV_TX_OK) {
+ if (!dev_xmit_complete(status)) {
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
int pos;
int dummy;
+ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
+ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
+ /* gso_size is untrusted, and we have a frag_list with a linear
+ * non head_frag head.
+ *
+ * (we assume checking the first list_skb member suffices;
+ * i.e if either of the list_skb members have non head_frag
+ * head, then the first one has too).
+ *
+ * If head_skb's headlen does not fit requested gso_size, it
+ * means that the frag_list members do NOT terminate on exact
+ * gso_size boundaries. Hence we cannot perform skb_frag_t page
+ * sharing. Therefore we must fallback to copying the frag_list
+ * skbs; we do so by disabling SG.
+ */
+ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
+ features &= ~NETIF_F_SG;
+ }
+
__skb_push(head_skb, doffset);
proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
}
EXPORT_SYMBOL(skb_set_owner_w);
+static bool can_skb_orphan_partial(const struct sk_buff *skb)
+{
+#ifdef CONFIG_TLS_DEVICE
+ /* Drivers depend on in-order delivery for crypto offload,
+ * partial orphan breaks out-of-order-OK logic.
+ */
+ if (skb->decrypted)
+ return false;
+#endif
+ return (skb->destructor == sock_wfree ||
+ (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
+}
+
/* This helper is used by netem, as it can hold packets in its
* delay queue. We want to allow the owner socket to send more
* packets, as if they were already TX completed by a typical driver.
if (skb_is_tcp_pure_ack(skb))
return;
- if (skb->destructor == sock_wfree
-#ifdef CONFIG_INET
- || skb->destructor == tcp_wfree
-#endif
- ) {
+ if (can_skb_orphan_partial(skb)) {
struct sock *sk = skb->sk;
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
core_initcall(net_inuse_init);
-static void assign_proto_idx(struct proto *prot)
+static int assign_proto_idx(struct proto *prot)
{
prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
pr_err("PROTO_INUSE_NR exhausted\n");
- return;
+ return -ENOSPC;
}
set_bit(prot->inuse_idx, proto_inuse_idx);
+ return 0;
}
static void release_proto_idx(struct proto *prot)
clear_bit(prot->inuse_idx, proto_inuse_idx);
}
#else
-static inline void assign_proto_idx(struct proto *prot)
+static inline int assign_proto_idx(struct proto *prot)
{
+ return 0;
}
static inline void release_proto_idx(struct proto *prot)
int proto_register(struct proto *prot, int alloc_slab)
{
+ int ret = -ENOBUFS;
+
if (alloc_slab) {
prot->slab = kmem_cache_create_usercopy(prot->name,
prot->obj_size, 0,
}
mutex_lock(&proto_list_mutex);
+ ret = assign_proto_idx(prot);
+ if (ret) {
+ mutex_unlock(&proto_list_mutex);
+ goto out_free_timewait_sock_slab_name;
+ }
list_add(&prot->node, &proto_list);
- assign_proto_idx(prot);
mutex_unlock(&proto_list_mutex);
- return 0;
+ return ret;
out_free_timewait_sock_slab_name:
- kfree(prot->twsk_prot->twsk_slab_name);
+ if (alloc_slab && prot->twsk_prot)
+ kfree(prot->twsk_prot->twsk_slab_name);
out_free_request_sock_slab:
- req_prot_cleanup(prot->rsk_prot);
+ if (alloc_slab) {
+ req_prot_cleanup(prot->rsk_prot);
- kmem_cache_destroy(prot->slab);
- prot->slab = NULL;
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+ }
out:
- return -ENOBUFS;
+ return ret;
}
EXPORT_SYMBOL(proto_register);
static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
static DEFINE_MUTEX(sock_diag_table_mutex);
static struct workqueue_struct *broadcast_wq;
+static atomic64_t cookie_gen;
u64 sock_gen_cookie(struct sock *sk)
{
if (res)
return res;
- res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
+ res = atomic64_inc_return(&cookie_gen);
atomic64_cmpxchg(&sk->sk_cookie, 0, res);
}
}
struct sock *sk, u64 flags)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct inet_connection_sock *icsk = inet_csk(sk);
u32 key_size = map->key_size, hash;
struct bpf_htab_elem *elem, *elem_new;
struct bpf_htab_bucket *bucket;
WARN_ON_ONCE(!rcu_read_lock_held());
if (unlikely(flags > BPF_EXIST))
return -EINVAL;
+ if (unlikely(icsk->icsk_ulp_data))
+ return -EINVAL;
link = sk_psock_init_link();
if (!link)
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
- bool noblock = (*timeo_p ? false : true);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
if (sk_stream_memory_free(sk))
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
- if (!*timeo_p) {
- if (noblock)
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- goto do_nonblock;
- }
+ if (!*timeo_p)
+ goto do_eagain;
if (signal_pending(current))
goto do_interrupted;
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
do_error:
err = -EPIPE;
goto out;
-do_nonblock:
+do_eagain:
+ /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
+ * be generated later.
+ * When TCP receives ACK packets that make room, tcp_check_space()
+ * only calls tcp_new_space() if SOCK_NOSPACE is set.
+ */
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
goto out;
do_interrupted:
{
int port;
+ if (!ds->ops->port_mdb_add)
+ return;
+
for_each_set_bit(port, bitmap, ds->num_ports)
ds->ops->port_mdb_add(ds, port, mdb);
}
*
* RSV - VID[9]:
* To be used for further expansion of SWITCH_ID or for other purposes.
+ * Must be transmitted as zero and ignored on receive.
*
* SWITCH_ID - VID[8:6]:
* Index of switch within DSA tree. Must be between 0 and
*
* RSV - VID[5:4]:
* To be used for further expansion of PORT or for other purposes.
+ * Must be transmitted as zero and ignored on receive.
*
* PORT - VID[3:0]:
* Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
if (!reasm_data)
goto out_oom;
- inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb->dev = ldev;
skb->tstamp = fq->q.stamp;
static int __init af_ieee802154_init(void)
{
- int rc = -EINVAL;
+ int rc;
rc = proto_register(&ieee802154_raw_prot, 1);
if (rc)
}
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
- unsigned char *flags, bool skip_oif)
+ u8 rt_family, unsigned char *flags, bool skip_oif)
{
if (nhc->nhc_flags & RTNH_F_DEAD)
*flags |= RTNH_F_DEAD;
/* if gateway family does not match nexthop family
* gateway is encoded as RTA_VIA
*/
- if (nhc->nhc_gw_family != nhc->nhc_family) {
+ if (rt_family != nhc->nhc_gw_family) {
int alen = sizeof(struct in6_addr);
struct nlattr *nla;
struct rtvia *via;
#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
- int nh_weight)
+ int nh_weight, u8 rt_family)
{
const struct net_device *dev = nhc->nhc_dev;
struct rtnexthop *rtnh;
rtnh->rtnh_hops = nh_weight - 1;
rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
- if (fib_nexthop_info(skb, nhc, &flags, true) < 0)
+ if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0)
goto nla_put_failure;
rtnh->rtnh_flags = flags;
goto nla_put_failure;
if (unlikely(fi->nh)) {
- if (nexthop_mpath_fill_node(skb, fi->nh) < 0)
+ if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
goto nla_put_failure;
goto mp_end;
}
for_nexthops(fi) {
- if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0)
+ if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+ AF_INET) < 0)
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid &&
const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
unsigned char flags = 0;
- if (fib_nexthop_info(skb, nhc, &flags, false) < 0)
+ if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0)
goto nla_put_failure;
rtm->rtm_flags = flags;
if (filter->dump_exceptions) {
err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
- &i_fa, s_fa);
+ &i_fa, s_fa, flags);
if (err < 0)
goto stop;
}
if (!rt)
goto out;
- net = dev_net(rt->dst.dev);
+
+ if (rt->dst.dev)
+ net = dev_net(rt->dst.dev);
+ else if (skb_in->dev)
+ net = dev_net(skb_in->dev);
+ else
+ goto out;
/*
* Find the original header. It is expected to be valid, of course.
return false;
}
- icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
+ icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
return true;
}
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
{
- __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
+ __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
}
EXPORT_SYMBOL(ip_mc_inc_group);
iml->sflist = NULL;
iml->sfmode = mode;
rcu_assign_pointer(inet->mc_list, iml);
- __ip_mc_inc_group(in_dev, addr, mode);
+ ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
err = 0;
done:
return err;
EXPORT_SYMBOL(inet_frag_reasm_prepare);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
- void *reasm_data)
+ void *reasm_data, bool try_coalesce)
{
struct sk_buff **nextp = (struct sk_buff **)reasm_data;
struct rb_node *rbn;
struct sk_buff *fp;
+ int sum_truesize;
skb_push(head, head->data - skb_network_header(head));
fp = FRAG_CB(head)->next_frag;
rbn = rb_next(&head->rbnode);
rb_erase(&head->rbnode, &q->rb_fragments);
+
+ sum_truesize = head->truesize;
while (rbn || fp) {
/* fp points to the next sk_buff in the current run;
* rbn points to the next run.
*/
/* Go through the current run. */
while (fp) {
- *nextp = fp;
- nextp = &fp->next;
- fp->prev = NULL;
- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
- fp->sk = NULL;
- head->data_len += fp->len;
- head->len += fp->len;
+ struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
+ bool stolen;
+ int delta;
+
+ sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp = FRAG_CB(fp)->next_frag;
+
+ if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
+ &delta)) {
+ kfree_skb_partial(fp, stolen);
+ } else {
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+
+ *nextp = fp;
+ nextp = &fp->next;
+ }
+
+ fp = next_frag;
}
/* Move to the next run. */
if (rbn) {
rbn = rbnext;
}
}
- sub_frag_mem_limit(q->fqdir, head->truesize);
+ sub_frag_mem_limit(q->fqdir, sum_truesize);
*nextp = NULL;
skb_mark_not_on_list(head);
return err;
}
+static bool ip_frag_coalesce_ok(const struct ipq *qp)
+{
+ return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
+}
+
/* Build a new IP datagram from all its fragments. */
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct sk_buff *prev_tail, struct net_device *dev)
if (len > 65535)
goto out_oversize;
- inet_frag_reasm_finish(&qp->q, skb, reasm_data);
+ inet_frag_reasm_finish(&qp->q, skb, reasm_data,
+ ip_frag_coalesce_ok(qp));
skb->dev = dev;
IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
struct rtable *rt, u32 table_id, struct flowi4 *fl4,
- struct sk_buff *skb, u32 portid, u32 seq)
+ struct sk_buff *skb, u32 portid, u32 seq,
+ unsigned int flags)
{
struct rtmsg *r;
struct nlmsghdr *nlh;
u32 error;
u32 metrics[RTAX_MAX];
- nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
+ nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
if (!nlh)
return -EMSGSIZE;
static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
struct netlink_callback *cb, u32 table_id,
struct fnhe_hash_bucket *bucket, int genid,
- int *fa_index, int fa_start)
+ int *fa_index, int fa_start, unsigned int flags)
{
int i;
err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
table_id, NULL, skb,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
+ cb->nlh->nlmsg_seq, flags);
if (err)
return err;
next:
int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
u32 table_id, struct fib_info *fi,
- int *fa_index, int fa_start)
+ int *fa_index, int fa_start, unsigned int flags)
{
struct net *net = sock_net(cb->skb->sk);
int nhsel, genid = fnhe_genid(net);
err = 0;
if (bucket)
err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
- genid, fa_index, fa_start);
+ genid, fa_index, fa_start,
+ flags);
rcu_read_unlock();
if (err)
return err;
fl4.flowi4_tos, res.fi, 0);
} else {
err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
}
if (err < 0)
goto errout_rcu;
return mss_now;
}
+/* In some cases, both sendpage() and sendmsg() could have added
+ * an skb to the write queue, but failed adding payload on it.
+ * We need to remove it to consume less memory, but more
+ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
+ * users.
+ */
+static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (skb && !skb->len) {
+ tcp_unlink_write_queue(skb, sk);
+ if (tcp_write_queue_empty(sk))
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+ sk_wmem_free_skb(sk, skb);
+ }
+}
+
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
if (!skb)
goto wait_for_memory;
+#ifdef CONFIG_TLS_DEVICE
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
skb_entail(sk, skb);
copy = size_goal;
}
return copied;
do_error:
+ tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
if (copied)
goto out;
out_err:
sock_zerocopy_put(uarg);
return copied + copied_syn;
+do_error:
+ skb = tcp_write_queue_tail(sk);
do_fault:
- if (!skb->len) {
- tcp_unlink_write_queue(skb, sk);
- /* It is the one place in all of TCP, except connection
- * reset, where we can be unlinking the send_head.
- */
- if (tcp_write_queue_empty(sk))
- tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
- sk_wmem_free_skb(sk, skb);
- }
+ tcp_remove_empty_skb(sk, skb);
-do_error:
if (copied + copied_syn)
goto out;
out_err:
static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_msg tmp, *msg_tx = NULL;
- int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
int copied = 0, err = 0;
struct sk_psock *psock;
long timeo;
+ int flags;
+
+ /* Don't let internal do_tcp_sendpages() flags through */
+ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
+ flags |= MSG_NO_SHARED_FRAGS;
psock = sk_psock_get(sk);
if (unlikely(!psock))
static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
{
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
}
static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
if (!buff)
return -ENOMEM; /* We'll just try again later. */
+ skb_copy_decrypted(buff, skb);
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
buff = sk_stream_alloc_skb(sk, 0, gfp, true);
if (unlikely(!buff))
return -ENOMEM;
+ skb_copy_decrypted(buff, skb);
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
if (len <= skb->len)
break;
- if (unlikely(TCP_SKB_CB(skb)->eor))
+ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
return false;
len -= skb->len;
sk_mem_charge(sk, nskb->truesize);
skb = tcp_send_head(sk);
+ skb_copy_decrypted(nskb, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
* we need to propagate it to the new skb.
*/
TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
+ tcp_skb_collapse_tstamp(nskb, skb);
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
} else {
if (!idev) {
idev = ipv6_add_dev(dev);
if (IS_ERR(idev))
- return NULL;
+ return idev;
}
if (dev->flags&IFF_UP)
int err = 0;
if (addr_type == IPV6_ADDR_ANY ||
- addr_type & IPV6_ADDR_MULTICAST ||
+ (addr_type & IPV6_ADDR_MULTICAST &&
+ !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
(!(idev->dev->flags & IFF_LOOPBACK) &&
!netif_is_l3_master(idev->dev) &&
addr_type & IPV6_ADDR_LOOPBACK))
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
- if (!idev)
- return ERR_PTR(-ENOBUFS);
+ if (IS_ERR(idev))
+ return idev;
if (idev->cnf.disable_ipv6)
return ERR_PTR(-EACCES);
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
- if (!idev) {
+ if (IS_ERR(idev)) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
*/
idev = ipv6_find_idev(dev);
- if (!idev) {
+ if (IS_ERR(idev)) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
- if (!idev) {
+ if (IS_ERR(idev)) {
pr_debug("%s: add_dev failed\n", __func__);
return;
}
IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
idev = ipv6_find_idev(dev);
- if (!idev)
- return -ENOBUFS;
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
if (!ipv6_allow_optimistic_dad(net, idev))
cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
if (pmc) {
im->idev = pmc->idev;
if (im->mca_sfmode == MCAST_INCLUDE) {
- im->mca_tomb = pmc->mca_tomb;
- im->mca_sources = pmc->mca_sources;
+ swap(im->mca_tomb, pmc->mca_tomb);
+ swap(im->mca_sources, pmc->mca_sources);
for (psf = im->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = idev->mc_qrv;
} else {
im->mca_crcount = idev->mc_qrv;
}
in6_dev_put(pmc->idev);
+ ip6_mc_clear_src(pmc);
kfree(pmc);
}
spin_unlock_bh(&im->mca_lock);
skb_reset_transport_header(skb);
- inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb->ignore_df = 1;
skb->dev = dev;
return 0;
}
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
{
remove_proc_entry("icmp6", net->proc_net);
}
skb_reset_transport_header(skb);
- inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
skb->dev = dev;
ipv6_hdr(skb)->payload_len = htons(payload_len);
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
.fc_ifindex = idev->dev->ifindex,
- .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
+ .fc_flags = RTF_UP | RTF_NONEXTHOP,
.fc_dst = *addr,
.fc_dst_len = 128,
.fc_protocol = RTPROT_KERNEL,
.fc_nlinfo.nl_net = net,
.fc_ignore_dev_down = true,
};
+ struct fib6_info *f6i;
if (anycast) {
cfg.fc_type = RTN_ANYCAST;
cfg.fc_flags |= RTF_LOCAL;
}
- return ip6_route_info_create(&cfg, gfp_flags, NULL);
+ f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
+ if (!IS_ERR(f6i))
+ f6i->dst_nocount = true;
+ return f6i;
}
/* remove deleted ip from prefsrc entries */
if (nexthop_is_multipath(nh)) {
struct nlattr *mp;
- mp = nla_nest_start(skb, RTA_MULTIPATH);
+ mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp)
goto nla_put_failure;
- if (nexthop_mpath_fill_node(skb, nh))
+ if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
goto nla_put_failure;
nla_nest_end(skb, mp);
struct fib6_nh *fib6_nh;
fib6_nh = nexthop_fib6_nh(nh);
- if (fib_nexthop_info(skb, &fib6_nh->nh_common,
+ if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
flags, false) < 0)
goto nla_put_failure;
}
goto nla_put_failure;
if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
- rt->fib6_nh->fib_nh_weight) < 0)
+ rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings, fib6_siblings) {
if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
- sibling->fib6_nh->fib_nh_weight) < 0)
+ sibling->fib6_nh->fib_nh_weight,
+ AF_INET6) < 0)
goto nla_put_failure;
}
rtm->rtm_flags |= nh_flags;
} else {
- if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
+ if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
&nh_flags, false) < 0)
goto nla_put_failure;
struct sta_info *sta;
struct ieee80211_sub_if_data *sdata;
int err;
- int layer2_update;
if (params->vlan) {
sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
if (is_multicast_ether_addr(mac))
return -EINVAL;
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
if (!sta)
return -ENOMEM;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
- if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated)
- return -EINVAL;
-
err = sta_apply_parameters(local, sta, params);
if (err) {
sta_info_free(local, sta);
test_sta_flag(sta, WLAN_STA_ASSOC))
rate_control_rate_init(sta);
- layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_AP;
-
err = sta_info_insert_rcu(sta);
if (err) {
rcu_read_unlock();
return err;
}
- if (layer2_update)
- cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
-
rcu_read_unlock();
return 0;
sta->sdata = vlansdata;
ieee80211_check_fast_xmit(sta);
- if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
ieee80211_vif_inc_num_mcast(sta->sdata);
-
- cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
+ cfg80211_send_layer2_update(sta->sdata->dev,
+ sta->sta.addr);
+ }
}
err = sta_apply_parameters(local, sta, params);
skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
sdata->control_port_over_nl80211)) {
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
+ bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
cfg80211_rx_control_port(dev, skb, noencrypt);
dev_kfree_skb(skb);
} else {
+ memset(skb->cb, 0, sizeof(skb->cb));
+
/* deliver to local stack */
if (rx->napi)
napi_gro_receive(rx->napi, skb);
if (skb) {
skb->protocol = eth_type_trans(skb, dev);
- memset(skb->cb, 0, sizeof(skb->cb));
-
ieee80211_deliver_skb_to_local_stack(skb, rx);
}
ieee80211_check_fast_xmit(sta);
ieee80211_check_fast_rx(sta);
}
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sta->sdata->vif.type == NL80211_IFTYPE_AP)
+ cfg80211_send_layer2_update(sta->sdata->dev,
+ sta->sta.addr);
break;
default:
break;
mpls_stats_inc_outucastpkts(out_dev, skb);
if (rt) {
- if (rt->rt_gw_family == AF_INET)
- err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
- skb);
- else if (rt->rt_gw_family == AF_INET6)
+ if (rt->rt_gw_family == AF_INET6)
err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
skb);
+ else
+ err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
+ skb);
} else if (rt6) {
if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
/* 6PE (RFC 4798) */
checksum = ncsi_calculate_checksum((unsigned char *)h,
sizeof(*h) + nca->payload);
pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
- nca->payload);
+ ALIGN(nca->payload, 4));
*pchecksum = htonl(checksum);
}
int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
{
+ struct ncsi_cmd_handler *nch = NULL;
struct ncsi_request *nr;
+ unsigned char type;
struct ethhdr *eh;
- struct ncsi_cmd_handler *nch = NULL;
int i, ret;
+ /* Use OEM generic handler for Netlink request */
+ if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
+ type = NCSI_PKT_CMD_OEM;
+ else
+ type = nca->type;
+
/* Search for the handler */
for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
- if (ncsi_cmd_handlers[i].type == nca->type) {
+ if (ncsi_cmd_handlers[i].type == type) {
if (ncsi_cmd_handlers[i].handler)
nch = &ncsi_cmd_handlers[i];
else
if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
netdev_dbg(nr->ndp->ndev.dev,
- "NCSI: non zero response/reason code\n");
+ "NCSI: non zero response/reason code %04xh, %04xh\n",
+ ntohs(h->code), ntohs(h->reason));
return -EPERM;
}
* sender doesn't support checksum according to NCSI
* specification.
*/
- pchecksum = (__be32 *)((void *)(h + 1) + payload - 4);
+ pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
if (ntohl(*pchecksum) == 0)
return 0;
sizeof(*h) + payload - 4);
if (*pchecksum != htonl(checksum)) {
- netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n");
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
+ *pchecksum, htonl(checksum));
return -EINVAL;
}
* table location, we assume id gets exposed to userspace.
*
* Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
*
* 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
a = (unsigned long)ct;
- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
- c = (unsigned long)ct->ext;
- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+ b = (unsigned long)ct->master;
+ c = (unsigned long)nf_ct_net(ct);
+ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
&ct_id_seed);
#ifdef CONFIG_64BIT
return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
i++;
}
- pr_debug("Skipped up to `%c'!\n", skip);
+ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
*numoff = i;
*numlen = getnum(data + i, dlen - i, cmd, term, numoff);
goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0 ||
- ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_acct(skb, ct, type) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0 ||
- ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_ct_synproxy(skb, ct) < 0)
goto nla_put_failure;
+ if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
+ (ctnetlink_dump_timeout(skb, ct) < 0 ||
+ ctnetlink_dump_protoinfo(skb, ct) < 0))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return skb->len;
table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
+ table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
+ table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
#endif
table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - (u32)jiffies);
+}
+
+static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
+ int l4num = nf_ct_protonum(ct);
unsigned int timeout;
- int l4num;
-
- l4num = nf_ct_protonum(ct);
- if (l4num == IPPROTO_TCP)
- flow_offload_fixup_tcp(&ct->proto.tcp);
l4proto = nf_ct_l4proto_find(l4num);
if (!l4proto)
else
return;
- ct->timeout = nfct_time_stamp + timeout;
+ if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
+ ct->timeout = nfct_time_stamp + timeout;
+}
+
+static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+{
+ if (nf_ct_protonum(ct) == IPPROTO_TCP)
+ flow_offload_fixup_tcp(&ct->proto.tcp);
+}
+
+static void flow_offload_fixup_ct(struct nf_conn *ct)
+{
+ flow_offload_fixup_ct_state(ct);
+ flow_offload_fixup_ct_timeout(ct);
}
void flow_offload_free(struct flow_offload *flow)
return err;
}
- flow->timeout = (u32)jiffies;
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
return 0;
}
EXPORT_SYMBOL_GPL(flow_offload_add);
+static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+{
+ return nf_flow_timeout_delta(flow->timeout) <= 0;
+}
+
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
e = container_of(flow, struct flow_offload_entry, flow);
clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+ if (nf_flow_has_expired(flow))
+ flow_offload_fixup_ct(e->ct);
+ else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
+ flow_offload_fixup_ct_timeout(e->ct);
+
flow_offload_free(flow);
}
return err;
}
-static inline bool nf_flow_has_expired(const struct flow_offload *flow)
-{
- return (__s32)(flow->timeout - (u32)jiffies) <= 0;
-}
-
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
return true;
}
+static int nf_flow_offload_dst_check(struct dst_entry *dst)
+{
+ if (unlikely(dst_xfrm(dst)))
+ return dst_check(dst, 0) ? 0 : -1;
+
+ return 0;
+}
+
+static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ struct dst_entry *dst)
+{
+ skb_orphan(skb);
+ skb_dst_set_noref(skb, dst);
+ dst_output(state->net, state->sk, skb);
+ return NF_STOLEN;
+}
+
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
return NF_ACCEPT;
+ if (nf_flow_offload_dst_check(&rt->dst)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
+ skb->tstamp = 0;
+
+ if (unlikely(dst_xfrm(&rt->dst))) {
+ memset(skb->cb, 0, sizeof(struct inet_skb_parm));
+ IPCB(skb)->iif = skb->dev->ifindex;
+ IPCB(skb)->flags = IPSKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
sizeof(*ip6h)))
return NF_ACCEPT;
+ if (nf_flow_offload_dst_check(&rt->dst)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+
if (skb_try_make_writable(skb, sizeof(*ip6h)))
return NF_DROP;
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
+ skb->tstamp = 0;
+
+ if (unlikely(dst_xfrm(&rt->dst))) {
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->iif = skb->dev->ifindex;
+ IP6CB(skb)->flags = IP6SKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
return;
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
- if (trans->msg_type == NFT_MSG_NEWSET &&
- nft_trans_set(trans) == set) {
- set->bound = true;
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWSET:
+ if (nft_trans_set(trans) == set)
+ nft_trans_set_bound(trans) = true;
+ break;
+ case NFT_MSG_NEWSETELEM:
+ if (nft_trans_elem_set(trans) == set)
+ nft_trans_elem_set_bound(trans) = true;
break;
}
}
chain->flags |= NFT_BASE_CHAIN | flags;
basechain->policy = NF_ACCEPT;
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
+ nft_chain_offload_priority(basechain) < 0)
+ return -EOPNOTSUPP;
+
flow_block_init(&basechain->flow_block);
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
break;
case NFT_MSG_NEWSET:
trans->ctx.table->use--;
- if (nft_trans_set(trans)->bound) {
+ if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
- if (nft_trans_elem_set(trans)->bound) {
+ if (nft_trans_elem_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
}
static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
- __be16 proto,
- struct netlink_ext_ack *extack)
+ __be16 proto, int priority,
+ struct netlink_ext_ack *extack)
{
common->protocol = proto;
+ common->prio = priority;
common->extack = extack;
}
return 0;
}
+int nft_chain_offload_priority(struct nft_base_chain *basechain)
+{
+ if (basechain->ops.priority <= 0 ||
+ basechain->ops.priority > USHRT_MAX)
+ return -1;
+
+ return 0;
+}
+
static int nft_flow_offload_rule(struct nft_trans *trans,
enum flow_cls_command command)
{
if (flow)
proto = flow->proto;
- nft_flow_offload_common_init(&cls_flow.common, proto, &extack);
+ nft_flow_offload_common_init(&cls_flow.common, proto,
+ basechain->ops.priority, &extack);
cls_flow.command = command;
cls_flow.cookie = (unsigned long) rule;
if (flow)
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
+#include <net/ipv6.h>
#include <net/netfilter/nft_fib.h>
}
break;
case ETH_P_IPV6:
+ if (!ipv6_mod_enabled())
+ break;
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
+ struct tcphdr _tcph, *tcph = NULL;
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
enum ip_conntrack_dir dir;
- bool is_tcp = false;
struct nf_conn *ct;
int ret;
switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
case IPPROTO_TCP:
- is_tcp = true;
+ tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
+ sizeof(_tcph), &_tcph);
+ if (unlikely(!tcph || tcph->fin || tcph->rst))
+ goto out;
break;
case IPPROTO_UDP:
break;
if (!flow)
goto err_flow_alloc;
- if (is_tcp) {
+ if (tcph) {
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
return nft_chain_validate_hooks(ctx->chain, hook_mask);
}
+static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
+ [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
+ .len = NFT_NAME_MAXLEN - 1 },
+};
+
static int nft_flow_offload_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
static struct nft_expr_type nft_flow_offload_type __read_mostly = {
.name = "flow_offload",
.ops = &nft_flow_offload_ops,
+ .policy = nft_flow_offload_policy,
.maxattr = NFTA_FLOW_MAX,
.owner = THIS_MODULE,
};
return;
}
- /* So that subsequent socket matching not to require other lookups. */
- skb->sk = sk;
-
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
nft_reg_store8(dest, inet_sk_transparent(sk));
WARN_ON(1);
regs->verdict.code = NFT_BREAK;
}
+
+ if (sk != skb->sk)
+ sock_gen_put(sk);
}
static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
nfnl_acct_put(info->nfacct);
}
-static struct xt_match nfacct_mt_reg __read_mostly = {
- .name = "nfacct",
- .family = NFPROTO_UNSPEC,
- .checkentry = nfacct_mt_checkentry,
- .match = nfacct_mt,
- .destroy = nfacct_mt_destroy,
- .matchsize = sizeof(struct xt_nfacct_match_info),
- .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
- .me = THIS_MODULE,
+static struct xt_match nfacct_mt_reg[] __read_mostly = {
+ {
+ .name = "nfacct",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = nfacct_mt_checkentry,
+ .match = nfacct_mt,
+ .destroy = nfacct_mt_destroy,
+ .matchsize = sizeof(struct xt_nfacct_match_info),
+ .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "nfacct",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = nfacct_mt_checkentry,
+ .match = nfacct_mt,
+ .destroy = nfacct_mt_destroy,
+ .matchsize = sizeof(struct xt_nfacct_match_info_v1),
+ .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
+ .me = THIS_MODULE,
+ },
};
static int __init nfacct_mt_init(void)
{
- return xt_register_match(&nfacct_mt_reg);
+ return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
}
static void __exit nfacct_mt_exit(void)
{
- xt_unregister_match(&nfacct_mt_reg);
+ xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
}
module_init(nfacct_mt_init);
if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
(!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
- par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
- (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
+ par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
- if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
- return -EINVAL;
+ return -EINVAL;
}
if (!brnf_probed) {
struct md_mark mark;
struct md_labels labels;
char timeout[CTNL_TIMEOUT_NAME_MAX];
+ struct nf_ct_timeout *nf_ct_timeout;
#if IS_ENABLED(CONFIG_NF_NAT)
struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
#endif
return -EPFNOSUPPORT;
}
+ /* The key extracted from the fragment that completed this datagram
+ * likely didn't have an L4 header, so regenerate it.
+ */
+ ovs_flow_key_update_l3l4(skb, key);
+
key->ip.frag = OVS_FRAG_TYPE_NONE;
skb_clear_hash(skb);
skb->ignore_df = 1;
if (help && rcu_access_pointer(help->helper) != info->helper)
return false;
}
+ if (info->nf_ct_timeout) {
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_ext = nf_ct_timeout_find(ct);
+ if (!timeout_ext || info->nf_ct_timeout !=
+ rcu_dereference(timeout_ext->timeout))
+ return false;
+ }
/* Force conntrack entry direction to the current packet? */
if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
/* Delete the conntrack entry if confirmed, else just release
case OVS_CT_ATTR_TIMEOUT:
memcpy(info->timeout, nla_data(a), nla_len(a));
if (!memchr(info->timeout, '\0', nla_len(a))) {
- OVS_NLERR(log, "Invalid conntrack helper");
+ OVS_NLERR(log, "Invalid conntrack timeout");
return -EINVAL;
}
break;
ct_info.timeout))
pr_info_ratelimited("Failed to associated timeout "
"policy `%s'\n", ct_info.timeout);
+ else
+ ct_info.nf_ct_timeout = rcu_dereference(
+ nf_ct_timeout_find(ct_info.ct)->timeout);
+
}
if (helper) {
}
/**
- * key_extract - extracts a flow key from an Ethernet frame.
+ * key_extract_l3l4 - extracts L3/L4 header information.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
- * Ethernet header
+ * L3 header
* @key: output flow key
*
- * The caller must ensure that skb->len >= ETH_HLEN.
- *
- * Returns 0 if successful, otherwise a negative errno value.
- *
- * Initializes @skb header fields as follows:
- *
- * - skb->mac_header: the L2 header.
- *
- * - skb->network_header: just past the L2 header, or just past the
- * VLAN header, to the first byte of the L2 payload.
- *
- * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
- * on output, then just past the IP header, if one is present and
- * of a correct length, otherwise the same as skb->network_header.
- * For other key->eth.type values it is left untouched.
- *
- * - skb->protocol: the type of the data starting at skb->network_header.
- * Equals to key->eth.type.
*/
-static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
+static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
{
int error;
- struct ethhdr *eth;
-
- /* Flags are always used as part of stats */
- key->tp.flags = 0;
-
- skb_reset_mac_header(skb);
-
- /* Link layer. */
- clear_vlan(key);
- if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
- if (unlikely(eth_type_vlan(skb->protocol)))
- return -EINVAL;
-
- skb_reset_network_header(skb);
- key->eth.type = skb->protocol;
- } else {
- eth = eth_hdr(skb);
- ether_addr_copy(key->eth.src, eth->h_source);
- ether_addr_copy(key->eth.dst, eth->h_dest);
-
- __skb_pull(skb, 2 * ETH_ALEN);
- /* We are going to push all headers that we pull, so no need to
- * update skb->csum here.
- */
-
- if (unlikely(parse_vlan(skb, key)))
- return -ENOMEM;
-
- key->eth.type = parse_ethertype(skb);
- if (unlikely(key->eth.type == htons(0)))
- return -ENOMEM;
-
- /* Multiple tagged packets need to retain TPID to satisfy
- * skb_vlan_pop(), which will later shift the ethertype into
- * skb->protocol.
- */
- if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
- skb->protocol = key->eth.cvlan.tpid;
- else
- skb->protocol = key->eth.type;
-
- skb_reset_network_header(skb);
- __skb_push(skb, skb->data - skb_mac_header(skb));
- }
- skb_reset_mac_len(skb);
/* Network layer. */
if (key->eth.type == htons(ETH_P_IP)) {
offset = nh->frag_off & htons(IP_OFFSET);
if (offset) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
+ memset(&key->tp, 0, sizeof(key->tp));
return 0;
}
if (nh->frag_off & htons(IP_MF) ||
return error;
}
- if (key->ip.frag == OVS_FRAG_TYPE_LATER)
+ if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
+ memset(&key->tp, 0, sizeof(key->tp));
return 0;
+ }
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
return 0;
}
+/**
+ * key_extract - extracts a flow key from an Ethernet frame.
+ * @skb: sk_buff that contains the frame, with skb->data pointing to the
+ * Ethernet header
+ * @key: output flow key
+ *
+ * The caller must ensure that skb->len >= ETH_HLEN.
+ *
+ * Returns 0 if successful, otherwise a negative errno value.
+ *
+ * Initializes @skb header fields as follows:
+ *
+ * - skb->mac_header: the L2 header.
+ *
+ * - skb->network_header: just past the L2 header, or just past the
+ * VLAN header, to the first byte of the L2 payload.
+ *
+ * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
+ * on output, then just past the IP header, if one is present and
+ * of a correct length, otherwise the same as skb->network_header.
+ * For other key->eth.type values it is left untouched.
+ *
+ * - skb->protocol: the type of the data starting at skb->network_header.
+ * Equals to key->eth.type.
+ */
+static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ struct ethhdr *eth;
+
+ /* Flags are always used as part of stats */
+ key->tp.flags = 0;
+
+ skb_reset_mac_header(skb);
+
+ /* Link layer. */
+ clear_vlan(key);
+ if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
+ if (unlikely(eth_type_vlan(skb->protocol)))
+ return -EINVAL;
+
+ skb_reset_network_header(skb);
+ key->eth.type = skb->protocol;
+ } else {
+ eth = eth_hdr(skb);
+ ether_addr_copy(key->eth.src, eth->h_source);
+ ether_addr_copy(key->eth.dst, eth->h_dest);
+
+ __skb_pull(skb, 2 * ETH_ALEN);
+ /* We are going to push all headers that we pull, so no need to
+ * update skb->csum here.
+ */
+
+ if (unlikely(parse_vlan(skb, key)))
+ return -ENOMEM;
+
+ key->eth.type = parse_ethertype(skb);
+ if (unlikely(key->eth.type == htons(0)))
+ return -ENOMEM;
+
+ /* Multiple tagged packets need to retain TPID to satisfy
+ * skb_vlan_pop(), which will later shift the ethertype into
+ * skb->protocol.
+ */
+ if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
+ skb->protocol = key->eth.cvlan.tpid;
+ else
+ skb->protocol = key->eth.type;
+
+ skb_reset_network_header(skb);
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+ }
+
+ skb_reset_mac_len(skb);
+
+ /* Fill out L3/L4 key info, if any */
+ return key_extract_l3l4(skb, key);
+}
+
+/* In the case of conntrack fragment handling it expects L3 headers,
+ * add a helper.
+ */
+int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ return key_extract_l3l4(skb, key);
+}
+
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
{
int res;
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
+int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
struct sk_buff *skb,
struct sw_flow_key *key);
mutex_lock(&po->pg_vec_lock);
+ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+ * we need to confirm it under protection of pg_vec_lock.
+ */
+ if (unlikely(!po->tx_ring.pg_vec)) {
+ err = -EBUSY;
+ goto out;
+ }
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
{
psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
list_del(&group->list);
- kfree(group);
+ kfree_rcu(group, rcu);
}
static struct psample_group *
if (!kbuf)
return -ENOMEM;
- if (!copy_from_iter_full(kbuf, len, from))
+ if (!copy_from_iter_full(kbuf, len, from)) {
+ kfree(kbuf);
return -EFAULT;
+ }
ret = qrtr_endpoint_post(&tun->ep, kbuf, len);
+ kfree(kbuf);
return ret < 0 ? ret : len;
}
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
goto out;
}
- sock_set_flag(sk, SOCK_RCU_FREE);
- ret = rds_add_bound(rs, binding_addr, &port, scope_id);
- if (ret)
- goto out;
-
- if (rs->rs_transport) { /* previously bound */
+ /* The transport can be set using SO_RDS_TRANSPORT option before the
+ * socket is bound.
+ */
+ if (rs->rs_transport) {
trans = rs->rs_transport;
if (trans->laddr_check(sock_net(sock->sk),
binding_addr, scope_id) != 0) {
ret = -ENOPROTOOPT;
- rds_remove_bound(rs);
- } else {
- ret = 0;
+ goto out;
}
- goto out;
- }
- trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
- scope_id);
- if (!trans) {
- ret = -EADDRNOTAVAIL;
- rds_remove_bound(rs);
- pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
- __func__, binding_addr);
- goto out;
+ } else {
+ trans = rds_trans_get_preferred(sock_net(sock->sk),
+ binding_addr, scope_id);
+ if (!trans) {
+ ret = -EADDRNOTAVAIL;
+ pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+ __func__, binding_addr);
+ goto out;
+ }
+ rs->rs_transport = trans;
}
- rs->rs_transport = trans;
- ret = 0;
+ sock_set_flag(sk, SOCK_RCU_FREE);
+ ret = rds_add_bound(rs, binding_addr, &port, scope_id);
out:
release_sock(sk);
void *buffer)
{
struct rds_info_rdma_connection *iinfo = buffer;
- struct rds_ib_connection *ic;
+ struct rds_ib_connection *ic = conn->c_transport_data;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
iinfo->src_addr = conn->c_laddr.s6_addr32[3];
iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
- iinfo->tos = conn->c_tos;
+ if (ic) {
+ iinfo->tos = conn->c_tos;
+ iinfo->sl = ic->i_sl;
+ }
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- ic = conn->c_transport_data;
-
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
(union ib_gid *)&iinfo->dst_gid);
void *buffer)
{
struct rds6_info_rdma_connection *iinfo6 = buffer;
- struct rds_ib_connection *ic;
+ struct rds_ib_connection *ic = conn->c_transport_data;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
iinfo6->src_addr = conn->c_laddr;
iinfo6->dst_addr = conn->c_faddr;
+ if (ic) {
+ iinfo6->tos = conn->c_tos;
+ iinfo6->sl = ic->i_sl;
+ }
memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- ic = conn->c_transport_data;
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
(union ib_gid *)&iinfo6->dst_gid);
rds_ibdev = ic->rds_ibdev;
/* Send/Recv vectors */
int i_scq_vector;
int i_rcq_vector;
+ u8 i_sl;
};
/* This assumes that atomic_t is at least 32 bits */
RDS_PROTOCOL_MINOR(conn->c_version),
ic->i_flowctl ? ", flow control" : "");
+ /* receive sl from the peer */
+ ic->i_sl = ic->i_cm_id->route.path_rec->sl;
+
atomic_set(&ic->i_cq_quiesce, 0);
/* Init rings and fill recv. this needs to wait until protocol
static struct rdma_cm_id *rds6_rdma_listen_id;
#endif
+/* Per IB specification 7.7.3, service level is a 4-bit field. */
+#define TOS_TO_SL(tos) ((tos) & 0xF)
+
static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event,
bool isv6)
struct rds_ib_connection *ibic;
ibic = conn->c_transport_data;
- if (ibic && ibic->i_cm_id == cm_id)
+ if (ibic && ibic->i_cm_id == cm_id) {
+ cm_id->route.path_rec[0].sl =
+ TOS_TO_SL(conn->c_tos);
ret = trans->cm_initiate_connect(cm_id, isv6);
- else
+ } else {
rds_conn_drop(conn);
+ }
}
break;
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
+ minfo6.tos = inc->i_conn->c_tos;
if (flip) {
minfo6.laddr = *daddr;
minfo6.fport = inc->i_hdr.h_dport;
}
+ minfo6.flags = 0;
+
rds_info_copy(iter, &minfo6, sizeof(minfo6));
}
#endif
service_in_use:
write_unlock(&local->services_lock);
- rxrpc_put_local(local);
+ rxrpc_unuse_local(local);
ret = -EADDRINUSE;
error_unlock:
release_sock(&rx->sk);
*/
void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
{
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
rxrpc_propose_ack_ping_for_check_life);
rxrpc_send_ack_packet(call, true, NULL);
}
static int rxrpc_release_sock(struct sock *sk)
{
struct rxrpc_sock *rx = rxrpc_sk(sk);
- struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue);
- rxrpc_queue_work(&rxnet->service_conn_reaper);
- rxrpc_queue_work(&rxnet->client_conn_reaper);
- rxrpc_put_local(rx->local);
+ rxrpc_unuse_local(rx->local);
rx->local = NULL;
key_put(rx->key);
rx->key = NULL;
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- union {
- u8 nr_jumbo; /* Number of jumbo subpackets */
- };
+ atomic_t nr_ring_pins; /* Number of rxtx ring pins */
+ u8 nr_subpackets; /* Number of subpackets */
+ u8 rx_flags; /* Received packet flags */
+#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
+#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
union {
int remain; /* amount of space remaining for next write */
+
+ /* List of requested ACKs on subpackets */
+ unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
+ BITS_PER_LONG];
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
*/
struct rxrpc_local {
struct rcu_head rcu;
- atomic_t usage;
+ atomic_t active_users; /* Number of users of the local endpoint */
+ atomic_t usage; /* Number of references to the structure */
struct rxrpc_net *rxnet; /* The network ns in which this resides */
struct list_head link;
struct socket *socket; /* my UDP socket */
#define RXRPC_TX_ANNO_LAST 0x04
#define RXRPC_TX_ANNO_RESENT 0x08
-#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
-#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
+#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
* not hard-ACK'd packet follows this.
/* receive-phase ACK management */
u8 ackr_reason; /* reason to ACK */
- u16 ackr_skew; /* skew on packet being ACK'd */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
rxrpc_serial_t ackr_first_seq; /* first sequence number received */
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
/*
* call_event.c
*/
-void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
void rxrpc_discard_expired_client_conns(struct work_struct *);
void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
+void rxrpc_clean_up_local_conns(struct rxrpc_local *);
/*
* conn_event.c
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
void rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
+void rxrpc_unuse_local(struct rxrpc_local *);
void rxrpc_queue_local(struct rxrpc_local *);
void rxrpc_destroy_all_locals(struct rxrpc_net *);
void rxrpc_packet_destructor(struct sk_buff *);
void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_purge_queue(struct sk_buff_head *);
* propose an ACK be sent
*/
static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u16 skew, u32 serial, bool immediate,
- bool background,
+ u32 serial, bool immediate, bool background,
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
outcome = rxrpc_propose_ack_update;
call->ackr_serial = serial;
- call->ackr_skew = skew;
}
if (!immediate)
goto trace;
} else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
call->ackr_reason = ack_reason;
call->ackr_serial = serial;
- call->ackr_skew = skew;
} else {
outcome = rxrpc_propose_ack_subsume;
}
* propose an ACK be sent, locking the call structure
*/
void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u16 skew, u32 serial, bool immediate, bool background,
+ u32 serial, bool immediate, bool background,
enum rxrpc_propose_ack_trace why)
{
spin_lock_bh(&call->lock);
- __rxrpc_propose_ACK(call, ack_reason, skew, serial,
+ __rxrpc_propose_ACK(call, ack_reason, serial,
immediate, background, why);
spin_unlock_bh(&call->lock);
}
continue;
skb = call->rxtx_buffer[ix];
- rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
if (anno_type == RXRPC_TX_ANNO_UNACK) {
if (ktime_after(skb->tstamp, max_age)) {
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
goto out;
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
rxrpc_send_ack_packet(call, true, NULL);
goto out;
continue;
skb = call->rxtx_buffer[ix];
- rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+ rxrpc_get_skb(skb, rxrpc_skb_got);
spin_unlock_bh(&call->lock);
if (rxrpc_send_data_packet(call, skb, true) < 0) {
- rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
return;
}
if (rxrpc_is_client_call(call))
rxrpc_expose_client_call(call);
- rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
spin_lock_bh(&call->lock);
/* We need to clear the retransmit state, but there are two
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
rxrpc_propose_ack_ping_for_keepalive);
set_bit(RXRPC_CALL_EV_PING, &call->events);
}
send_ack = NULL;
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
call->acks_lost_top = call->tx_top;
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
send_ack = &call->acks_lost_ping;
}
trace_rxrpc_call(call, op, n, here, NULL);
}
+/*
+ * Clean up the RxTx skb ring.
+ */
+static void rxrpc_cleanup_ring(struct rxrpc_call *call)
+{
+ int i;
+
+ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
+ rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
+ call->rxtx_buffer[i] = NULL;
+ }
+}
+
/*
* Detach a call from its owning socket.
*/
const void *here = __builtin_return_address(0);
struct rxrpc_connection *conn = call->conn;
bool put = false;
- int i;
_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
if (conn)
rxrpc_disconnect_call(call);
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
- rxrpc_free_skb(call->rxtx_buffer[i],
- (call->tx_phase ? rxrpc_skb_tx_cleaned :
- rxrpc_skb_rx_cleaned));
- call->rxtx_buffer[i] = NULL;
- }
-
+ rxrpc_cleanup_ring(call);
_leave("");
}
*/
void rxrpc_cleanup_call(struct rxrpc_call *call)
{
- int i;
-
_net("DESTROY CALL %d", call->debug_id);
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
ASSERTCMP(call->conn, ==, NULL);
- /* Clean up the Rx/Tx buffer */
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
- rxrpc_free_skb(call->rxtx_buffer[i],
- (call->tx_phase ? rxrpc_skb_tx_cleaned :
- rxrpc_skb_rx_cleaned));
-
- rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
+ rxrpc_cleanup_ring(call);
+ rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
}
_leave("");
}
+
+/*
+ * Clean up the client connections on a local endpoint.
+ */
+void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
+{
+ struct rxrpc_connection *conn, *tmp;
+ struct rxrpc_net *rxnet = local->rxnet;
+ unsigned int nr_active;
+ LIST_HEAD(graveyard);
+
+ _enter("");
+
+ spin_lock(&rxnet->client_conn_cache_lock);
+ nr_active = rxnet->nr_active_client_conns;
+
+ list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
+ cache_link) {
+ if (conn->params.local == local) {
+ ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
+
+ trace_rxrpc_client(conn, -1, rxrpc_client_discard);
+ if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
+ BUG();
+ conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
+ list_move(&conn->cache_link, &graveyard);
+ nr_active--;
+ }
+ }
+
+ rxnet->nr_active_client_conns = nr_active;
+ spin_unlock(&rxnet->client_conn_cache_lock);
+ ASSERTCMP(nr_active, >=, 0);
+
+ while (!list_empty(&graveyard)) {
+ conn = list_entry(graveyard.next,
+ struct rxrpc_connection, cache_link);
+ list_del_init(&conn->cache_link);
+
+ rxrpc_put_connection(conn);
+ }
+
+ _leave(" [culled]");
+}
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
- rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
ret = rxrpc_process_event(conn, skb, &abort_code);
switch (ret) {
case -EPROTO:
goto requeue_and_leave;
case -ECONNABORTED:
default:
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
break;
}
}
protocol_error:
if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
goto requeue_and_leave;
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
goto out;
}
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
continue;
- if (rxnet->live) {
+ if (rxnet->live && !conn->params.local->dead) {
idle_timestamp = READ_ONCE(conn->idle_timestamp);
expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
if (conn->params.local->service_closed)
* Ping the other end to fill our RTT cache and to retrieve the rwind
* and MTU parameters.
*/
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
- int skew)
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
ktime_t now = skb->tstamp;
if (call->peer->rtt_usage < 3 ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
true, true,
rxrpc_propose_ack_ping_for_params);
}
ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
skb = call->rxtx_buffer[ix];
annotation = call->rxtx_annotations[ix];
- rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
+ rxrpc_see_skb(skb, rxrpc_skb_rotated);
call->rxtx_buffer[ix] = NULL;
call->rxtx_annotations[ix] = 0;
skb->next = list;
skb = list;
list = skb->next;
skb_mark_not_on_list(skb);
- rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
}
return rot_last;
}
/*
- * Scan a jumbo packet to validate its structure and to work out how many
+ * Scan a data packet to validate its structure and to work out how many
* subpackets it contains.
*
* A jumbo packet is a collection of consecutive packets glued together with
* the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
* size.
*/
-static bool rxrpc_validate_jumbo(struct sk_buff *skb)
+static bool rxrpc_validate_data(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len = skb->len;
- int nr_jumbo = 1;
u8 flags = sp->hdr.flags;
- do {
- nr_jumbo++;
+ for (;;) {
+ if (flags & RXRPC_REQUEST_ACK)
+ __set_bit(sp->nr_subpackets, sp->rx_req_ack);
+ sp->nr_subpackets++;
+
+ if (!(flags & RXRPC_JUMBO_PACKET))
+ break;
+
if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
goto protocol_error;
if (flags & RXRPC_LAST_PACKET)
if (skb_copy_bits(skb, offset, &flags, 1) < 0)
goto protocol_error;
offset += sizeof(struct rxrpc_jumbo_header);
- } while (flags & RXRPC_JUMBO_PACKET);
+ }
- sp->nr_jumbo = nr_jumbo;
+ if (flags & RXRPC_LAST_PACKET)
+ sp->rx_flags |= RXRPC_SKB_INCL_LAST;
return true;
protocol_error:
* (that information is encoded in the ACK packet).
*/
static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
- u8 annotation, bool *_jumbo_bad)
+ bool is_jumbo, bool *_jumbo_bad)
{
/* Discard normal packets that are duplicates. */
- if (annotation == 0)
+ if (is_jumbo)
return;
/* Skip jumbo subpackets that are duplicates. When we've had three or
}
/*
- * Process a DATA packet, adding the packet to the Rx ring.
+ * Process a DATA packet, adding the packet to the Rx ring. The caller's
+ * packet ref must be passed on or discarded.
*/
-static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
- u16 skew)
+static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
enum rxrpc_call_state state;
- unsigned int offset = sizeof(struct rxrpc_wire_header);
- unsigned int ix;
+ unsigned int j;
rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
- rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
- bool immediate_ack = false, jumbo_bad = false, queued;
- u16 len;
- u8 ack = 0, flags, annotation = 0;
+ rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
+ bool immediate_ack = false, jumbo_bad = false;
+ u8 ack = 0;
_enter("{%u,%u},{%u,%u}",
- call->rx_hard_ack, call->rx_top, skb->len, seq);
+ call->rx_hard_ack, call->rx_top, skb->len, seq0);
- _proto("Rx DATA %%%u { #%u f=%02x }",
- sp->hdr.serial, seq, sp->hdr.flags);
+ _proto("Rx DATA %%%u { #%u f=%02x n=%u }",
+ sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
state = READ_ONCE(call->state);
- if (state >= RXRPC_CALL_COMPLETE)
+ if (state >= RXRPC_CALL_COMPLETE) {
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
return;
+ }
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
unsigned long timo = READ_ONCE(call->next_req_timo);
!rxrpc_receiving_reply(call))
goto unlock;
- call->ackr_prev_seq = seq;
-
+ call->ackr_prev_seq = seq0;
hard_ack = READ_ONCE(call->rx_hard_ack);
- if (after(seq, hard_ack + call->rx_winsize)) {
- ack = RXRPC_ACK_EXCEEDS_WINDOW;
- ack_serial = serial;
- goto ack;
- }
- flags = sp->hdr.flags;
- if (flags & RXRPC_JUMBO_PACKET) {
+ if (sp->nr_subpackets > 1) {
if (call->nr_jumbo_bad > 3) {
ack = RXRPC_ACK_NOSPACE;
ack_serial = serial;
goto ack;
}
- annotation = 1;
}
-next_subpacket:
- queued = false;
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- len = skb->len;
- if (flags & RXRPC_JUMBO_PACKET)
- len = RXRPC_JUMBO_DATALEN;
-
- if (flags & RXRPC_LAST_PACKET) {
- if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- seq != call->rx_top) {
- rxrpc_proto_abort("LSN", call, seq);
- goto unlock;
- }
- } else {
- if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- after_eq(seq, call->rx_top)) {
- rxrpc_proto_abort("LSA", call, seq);
- goto unlock;
+ for (j = 0; j < sp->nr_subpackets; j++) {
+ rxrpc_serial_t serial = sp->hdr.serial + j;
+ rxrpc_seq_t seq = seq0 + j;
+ unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
+ bool terminal = (j == sp->nr_subpackets - 1);
+ bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
+ u8 flags, annotation = j;
+
+ _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
+ j, serial, seq, terminal, last);
+
+ if (last) {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ seq != call->rx_top) {
+ rxrpc_proto_abort("LSN", call, seq);
+ goto unlock;
+ }
+ } else {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ after_eq(seq, call->rx_top)) {
+ rxrpc_proto_abort("LSA", call, seq);
+ goto unlock;
+ }
}
- }
-
- trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
- if (before_eq(seq, hard_ack)) {
- ack = RXRPC_ACK_DUPLICATE;
- ack_serial = serial;
- goto skip;
- }
- if (flags & RXRPC_REQUEST_ACK && !ack) {
- ack = RXRPC_ACK_REQUESTED;
- ack_serial = serial;
- }
+ flags = 0;
+ if (last)
+ flags |= RXRPC_LAST_PACKET;
+ if (!terminal)
+ flags |= RXRPC_JUMBO_PACKET;
+ if (test_bit(j, sp->rx_req_ack))
+ flags |= RXRPC_REQUEST_ACK;
+ trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
- if (call->rxtx_buffer[ix]) {
- rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
- if (ack != RXRPC_ACK_DUPLICATE) {
+ if (before_eq(seq, hard_ack)) {
ack = RXRPC_ACK_DUPLICATE;
ack_serial = serial;
+ continue;
}
- immediate_ack = true;
- goto skip;
- }
-
- /* Queue the packet. We use a couple of memory barriers here as need
- * to make sure that rx_top is perceived to be set after the buffer
- * pointer and that the buffer pointer is set after the annotation and
- * the skb data.
- *
- * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
- * and also rxrpc_fill_out_ack().
- */
- rxrpc_get_skb(skb, rxrpc_skb_rx_got);
- call->rxtx_annotations[ix] = annotation;
- smp_wmb();
- call->rxtx_buffer[ix] = skb;
- if (after(seq, call->rx_top)) {
- smp_store_release(&call->rx_top, seq);
- } else if (before(seq, call->rx_top)) {
- /* Send an immediate ACK if we fill in a hole */
- if (!ack) {
- ack = RXRPC_ACK_DELAY;
- ack_serial = serial;
- }
- immediate_ack = true;
- }
- if (flags & RXRPC_LAST_PACKET) {
- set_bit(RXRPC_CALL_RX_LAST, &call->flags);
- trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
- } else {
- trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
- }
- queued = true;
- if (after_eq(seq, call->rx_expect_next)) {
- if (after(seq, call->rx_expect_next)) {
- _net("OOS %u > %u", seq, call->rx_expect_next);
- ack = RXRPC_ACK_OUT_OF_SEQUENCE;
- ack_serial = serial;
+ if (call->rxtx_buffer[ix]) {
+ rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1,
+ &jumbo_bad);
+ if (ack != RXRPC_ACK_DUPLICATE) {
+ ack = RXRPC_ACK_DUPLICATE;
+ ack_serial = serial;
+ }
+ immediate_ack = true;
+ continue;
}
- call->rx_expect_next = seq + 1;
- }
-skip:
- offset += len;
- if (flags & RXRPC_JUMBO_PACKET) {
- if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
- rxrpc_proto_abort("XJF", call, seq);
- goto unlock;
- }
- offset += sizeof(struct rxrpc_jumbo_header);
- seq++;
- serial++;
- annotation++;
- if (flags & RXRPC_JUMBO_PACKET)
- annotation |= RXRPC_RX_ANNO_JLAST;
if (after(seq, hard_ack + call->rx_winsize)) {
ack = RXRPC_ACK_EXCEEDS_WINDOW;
ack_serial = serial;
- if (!jumbo_bad) {
- call->nr_jumbo_bad++;
- jumbo_bad = true;
+ if (flags & RXRPC_JUMBO_PACKET) {
+ if (!jumbo_bad) {
+ call->nr_jumbo_bad++;
+ jumbo_bad = true;
+ }
}
+
goto ack;
}
- _proto("Rx DATA Jumbo %%%u", serial);
- goto next_subpacket;
- }
+ if (flags & RXRPC_REQUEST_ACK && !ack) {
+ ack = RXRPC_ACK_REQUESTED;
+ ack_serial = serial;
+ }
+
+ /* Queue the packet. We use a couple of memory barriers here as need
+ * to make sure that rx_top is perceived to be set after the buffer
+ * pointer and that the buffer pointer is set after the annotation and
+ * the skb data.
+ *
+ * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
+ * and also rxrpc_fill_out_ack().
+ */
+ if (!terminal)
+ rxrpc_get_skb(skb, rxrpc_skb_got);
+ call->rxtx_annotations[ix] = annotation;
+ smp_wmb();
+ call->rxtx_buffer[ix] = skb;
+ if (after(seq, call->rx_top)) {
+ smp_store_release(&call->rx_top, seq);
+ } else if (before(seq, call->rx_top)) {
+ /* Send an immediate ACK if we fill in a hole */
+ if (!ack) {
+ ack = RXRPC_ACK_DELAY;
+ ack_serial = serial;
+ }
+ immediate_ack = true;
+ }
- if (queued && flags & RXRPC_LAST_PACKET && !ack) {
- ack = RXRPC_ACK_DELAY;
- ack_serial = serial;
+ if (terminal) {
+ /* From this point on, we're not allowed to touch the
+ * packet any longer as its ref now belongs to the Rx
+ * ring.
+ */
+ skb = NULL;
+ }
+
+ if (last) {
+ set_bit(RXRPC_CALL_RX_LAST, &call->flags);
+ if (!ack) {
+ ack = RXRPC_ACK_DELAY;
+ ack_serial = serial;
+ }
+ trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
+ } else {
+ trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
+ }
+
+ if (after_eq(seq, call->rx_expect_next)) {
+ if (after(seq, call->rx_expect_next)) {
+ _net("OOS %u > %u", seq, call->rx_expect_next);
+ ack = RXRPC_ACK_OUT_OF_SEQUENCE;
+ ack_serial = serial;
+ }
+ call->rx_expect_next = seq + 1;
+ }
}
ack:
if (ack)
- rxrpc_propose_ACK(call, ack, skew, ack_serial,
+ rxrpc_propose_ACK(call, ack, ack_serial,
immediate_ack, true,
rxrpc_propose_ack_input_data);
else
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
false, true,
rxrpc_propose_ack_input_data);
- if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) {
+ if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
}
unlock:
spin_unlock(&call->input_lock);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
_leave(" [queued]");
}
* soft-ACK means that the packet may be discarded and retransmission
* requested. A phase is complete when all packets are hard-ACK'd.
*/
-static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
- u16 skew)
+static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
if (buf.ack.reason == RXRPC_ACK_PING) {
_proto("Rx ACK %%%u PING Request", sp->hdr.serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- skew, sp->hdr.serial, true, true,
+ sp->hdr.serial, true, true,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- skew, sp->hdr.serial, true, true,
+ sp->hdr.serial, true, true,
rxrpc_propose_ack_respond_to_ack);
}
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
* Process an incoming call packet.
*/
static void rxrpc_input_call_packet(struct rxrpc_call *call,
- struct sk_buff *skb, u16 skew)
+ struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long timo;
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
- rxrpc_input_data(call, skb, skew);
- break;
+ rxrpc_input_data(call, skb);
+ goto no_free;
case RXRPC_PACKET_TYPE_ACK:
- rxrpc_input_ack(call, skb, skew);
+ rxrpc_input_ack(call, skb);
break;
case RXRPC_PACKET_TYPE_BUSY:
break;
}
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+no_free:
_leave("");
}
{
_enter("%p,%p", local, skb);
- skb_queue_tail(&local->event_queue, skb);
- rxrpc_queue_local(local);
+ if (rxrpc_get_local_maybe(local)) {
+ skb_queue_tail(&local->event_queue, skb);
+ rxrpc_queue_local(local);
+ } else {
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+ }
}
/*
{
CHECK_SLAB_OKAY(&local->usage);
- skb_queue_tail(&local->reject_queue, skb);
- rxrpc_queue_local(local);
+ if (rxrpc_get_local_maybe(local)) {
+ skb_queue_tail(&local->reject_queue, skb);
+ rxrpc_queue_local(local);
+ } else {
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+ }
}
/*
struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL;
unsigned int channel;
- int skew = 0;
_enter("%p", udp_sk);
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
- rxrpc_new_skb(skb, rxrpc_skb_rx_received);
+ rxrpc_new_skb(skb, rxrpc_skb_received);
skb_pull(skb, sizeof(struct udphdr));
static int lose;
if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp);
- rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
+ rxrpc_free_skb(skb, rxrpc_skb_lost);
return 0;
}
}
if (sp->hdr.callNumber == 0 ||
sp->hdr.seq == 0)
goto bad_message;
- if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
- !rxrpc_validate_jumbo(skb))
+ if (!rxrpc_validate_data(skb))
goto bad_message;
+
+ /* Unshare the packet so that it can be modified for in-place
+ * decryption.
+ */
+ if (sp->hdr.securityIndex != 0) {
+ struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
+ if (!nskb) {
+ rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
+ goto out;
+ }
+
+ if (nskb != skb) {
+ rxrpc_eaten_skb(skb, rxrpc_skb_received);
+ skb = nskb;
+ rxrpc_new_skb(skb, rxrpc_skb_unshared);
+ sp = rxrpc_skb(skb);
+ }
+ }
break;
case RXRPC_PACKET_TYPE_CHALLENGE:
goto out;
}
- /* Note the serial number skew here */
- skew = (int)sp->hdr.serial - (int)conn->hi_serial;
- if (skew >= 0) {
- if (skew > 0)
- conn->hi_serial = sp->hdr.serial;
- } else {
- skew = -skew;
- skew = min(skew, 65535);
- }
+ if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
+ conn->hi_serial = sp->hdr.serial;
/* Call-bound packets are routed by connection channel. */
channel = sp->hdr.cid & RXRPC_CHANNELMASK;
call = rxrpc_new_incoming_call(local, rx, skb);
if (!call)
goto reject_packet;
- rxrpc_send_ping(call, skb, skew);
+ rxrpc_send_ping(call, skb);
mutex_unlock(&call->user_mutex);
}
- rxrpc_input_call_packet(call, skb, skew);
- goto discard;
+ /* Process a call packet; this either discards or passes on the ref
+ * elsewhere.
+ */
+ rxrpc_input_call_packet(call, skb);
+ goto out;
discard:
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
out:
trace_rxrpc_rx_done(0, 0);
return 0;
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
_debug("{%d},{%u}", local->debug_id, sp->hdr.type);
switch (sp->hdr.type) {
break;
}
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
}
_leave("");
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
atomic_set(&local->usage, 1);
+ atomic_set(&local->active_users, 1);
local->rxnet = rxnet;
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
local->srx.srx_service = 0;
- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
}
_leave(" = %p", local);
* bind the transport socket may still fail if we're attempting
* to use a local address that the dying object is still using.
*/
- if (!rxrpc_get_local_maybe(local)) {
- cursor = cursor->next;
- list_del_init(&local->link);
+ if (!rxrpc_use_local(local))
break;
- }
age = "old";
goto found;
if (ret < 0)
goto sock_error;
- list_add_tail(&local->link, cursor);
+ if (cursor != &rxnet->local_endpoints)
+ list_replace_init(cursor, &local->link);
+ else
+ list_add_tail(&local->link, cursor);
age = "new";
found:
int n;
n = atomic_inc_return(&local->usage);
- trace_rxrpc_local(local, rxrpc_local_got, n, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
return local;
}
if (local) {
int n = atomic_fetch_add_unless(&local->usage, 1, 0);
if (n > 0)
- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got,
+ n + 1, here);
else
local = NULL;
}
}
/*
- * Queue a local endpoint.
+ * Queue a local endpoint and pass the caller's reference to the work item.
*/
void rxrpc_queue_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = local->debug_id;
+ int n = atomic_read(&local->usage);
if (rxrpc_queue_work(&local->processor))
- trace_rxrpc_local(local, rxrpc_local_queued,
- atomic_read(&local->usage), here);
-}
-
-/*
- * A local endpoint reached its end of life.
- */
-static void __rxrpc_put_local(struct rxrpc_local *local)
-{
- _enter("%d", local->debug_id);
- rxrpc_queue_work(&local->processor);
+ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+ else
+ rxrpc_put_local(local);
}
/*
if (local) {
n = atomic_dec_return(&local->usage);
- trace_rxrpc_local(local, rxrpc_local_put, n, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
if (n == 0)
- __rxrpc_put_local(local);
+ call_rcu(&local->rcu, rxrpc_local_rcu);
+ }
+}
+
+/*
+ * Start using a local endpoint.
+ */
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
+{
+ unsigned int au;
+
+ local = rxrpc_get_local_maybe(local);
+ if (!local)
+ return NULL;
+
+ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
+ if (au == 0) {
+ rxrpc_put_local(local);
+ return NULL;
+ }
+
+ return local;
+}
+
+/*
+ * Cease using a local endpoint. Once the number of active users reaches 0, we
+ * start the closure of the transport in the work processor.
+ */
+void rxrpc_unuse_local(struct rxrpc_local *local)
+{
+ unsigned int au;
+
+ if (local) {
+ au = atomic_dec_return(&local->active_users);
+ if (au == 0)
+ rxrpc_queue_local(local);
+ else
+ rxrpc_put_local(local);
}
}
_enter("%d", local->debug_id);
- /* We can get a race between an incoming call packet queueing the
- * processor again and the work processor starting the destruction
- * process which will shut down the UDP socket.
- */
- if (local->dead) {
- _leave(" [already dead]");
- return;
- }
local->dead = true;
mutex_lock(&rxnet->local_mutex);
list_del_init(&local->link);
mutex_unlock(&rxnet->local_mutex);
- ASSERT(RB_EMPTY_ROOT(&local->client_conns));
+ rxrpc_clean_up_local_conns(local);
+ rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
ASSERT(!local->service);
if (socket) {
*/
rxrpc_purge_queue(&local->reject_queue);
rxrpc_purge_queue(&local->event_queue);
-
- _debug("rcu local %d", local->debug_id);
- call_rcu(&local->rcu, rxrpc_local_rcu);
}
/*
- * Process events on an endpoint
+ * Process events on an endpoint. The work item carries a ref which
+ * we must release.
*/
static void rxrpc_local_processor(struct work_struct *work)
{
container_of(work, struct rxrpc_local, processor);
bool again;
- trace_rxrpc_local(local, rxrpc_local_processing,
+ trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
atomic_read(&local->usage), NULL);
do {
again = false;
- if (atomic_read(&local->usage) == 0)
- return rxrpc_local_destroyer(local);
+ if (atomic_read(&local->active_users) == 0) {
+ rxrpc_local_destroyer(local);
+ break;
+ }
if (!skb_queue_empty(&local->reject_queue)) {
rxrpc_reject_packets(local);
again = true;
}
} while (again);
+
+ rxrpc_put_local(local);
}
/*
*_top = top;
pkt->ack.bufferSpace = htons(8);
- pkt->ack.maxSkew = htons(call->ackr_skew);
+ pkt->ack.maxSkew = htons(0);
pkt->ack.firstPacket = htonl(hard_ack + 1);
pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
pkt->ack.serial = htonl(serial);
if (ping)
clear_bit(RXRPC_CALL_PINGING, &call->flags);
rxrpc_propose_ACK(call, pkt->ack.reason,
- ntohs(pkt->ack.maxSkew),
ntohl(pkt->ack.serial),
false, true,
rxrpc_propose_ack_retry_tx);
memset(&whdr, 0, sizeof(whdr));
while ((skb = skb_dequeue(&local->reject_queue))) {
- rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
sp = rxrpc_skb(skb);
switch (skb->mark) {
ioc = 2;
break;
default:
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
continue;
}
rxrpc_tx_point_reject);
}
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
}
_leave("");
_leave("UDP socket errqueue empty");
return;
}
- rxrpc_new_skb(skb, rxrpc_skb_rx_received);
+ rxrpc_new_skb(skb, rxrpc_skb_received);
serr = SKB_EXT_ERR(skb);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
return;
}
peer = NULL;
if (!peer) {
rcu_read_unlock();
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
_leave(" [no peer]");
return;
}
serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
rxrpc_adjust_mtu(peer, serr);
rcu_read_unlock();
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
rxrpc_put_peer(peer);
_leave(" [MTU update]");
return;
rxrpc_store_error(peer, serr);
rcu_read_unlock();
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
rxrpc_put_peer(peer);
_leave("");
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
+/*
+ * The maximum number of subpackets that can possibly fit in a UDP packet is:
+ *
+ * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1
+ * = ((65535 - 28 - 28) / 1416) + 1
+ * = 46 non-terminal packets and 1 terminal packet.
+ */
+#define RXRPC_MAX_NR_JUMBO 47
+
/*****************************************************************************/
/*
* on-the-wire Rx ACK packet data payload
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
- rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true,
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
rxrpc_propose_ack_terminal_ack);
//rxrpc_send_ack_packet(call, false, NULL);
}
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock);
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
rxrpc_propose_ack_processing_op);
break;
default:
struct sk_buff *skb;
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
- u8 flags;
+ bool last = false;
+ u8 subpacket;
int ix;
_enter("%d", call->debug_id);
hard_ack++;
ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
skb = call->rxtx_buffer[ix];
- rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
+ rxrpc_see_skb(skb, rxrpc_skb_rotated);
sp = rxrpc_skb(skb);
- flags = sp->hdr.flags;
- serial = sp->hdr.serial;
- if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
- serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;
+
+ subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
+ serial = sp->hdr.serial + subpacket;
+
+ if (subpacket == sp->nr_subpackets - 1 &&
+ sp->rx_flags & RXRPC_SKB_INCL_LAST)
+ last = true;
call->rxtx_buffer[ix] = NULL;
call->rxtx_annotations[ix] = 0;
/* Barrier against rxrpc_input_data(). */
smp_store_release(&call->rx_hard_ack, hard_ack);
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
- _debug("%u,%u,%02x", hard_ack, top, flags);
trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
- if (flags & RXRPC_LAST_PACKET) {
+ if (last) {
rxrpc_end_rx_phase(call, serial);
} else {
/* Check to see if there's an ACK that needs sending. */
if (after_eq(hard_ack, call->ackr_consumed + 2) ||
after_eq(top, call->ackr_seen + 2) ||
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
true, true,
rxrpc_propose_ack_rotate_rx);
if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
rxrpc_seq_t seq = sp->hdr.seq;
u16 cksum = sp->hdr.cksum;
+ u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
_enter("");
/* For all but the head jumbo subpacket, the security checksum is in a
* jumbo header immediately prior to the data.
*/
- if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) {
+ if (subpacket > 0) {
__be16 tmp;
if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
BUG();
cksum = ntohs(tmp);
- seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1;
+ seq += subpacket;
}
return call->conn->security->verify_packet(call, skb, offset, len,
u8 *_annotation,
unsigned int *_offset, unsigned int *_len)
{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len;
int ret;
u8 annotation = *_annotation;
+ u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
/* Locate the subpacket */
+ offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
len = skb->len - offset;
- if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
- offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
- RXRPC_JUMBO_SUBPKTLEN);
- len = (annotation & RXRPC_RX_ANNO_JLAST) ?
- skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
- }
+ if (subpacket < sp->nr_subpackets - 1)
+ len = RXRPC_JUMBO_DATALEN;
if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
+ rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top, seq;
size_t remain;
bool last;
break;
}
smp_rmb();
- rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
sp = rxrpc_skb(skb);
- if (!(flags & MSG_PEEK))
+ if (!(flags & MSG_PEEK)) {
+ serial = sp->hdr.serial;
+ serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
trace_rxrpc_receive(call, rxrpc_receive_front,
- sp->hdr.serial, seq);
+ serial, seq);
+ }
if (msg)
sock_recv_timestamp(msg, sock->sk, skb);
struct rxrpc_skb_priv *sp;
struct rxrpc_crypt iv;
struct scatterlist sg[16];
- struct sk_buff *trailer;
unsigned int len;
u16 check;
- int nsg;
int err;
sp = rxrpc_skb(skb);
crypto_skcipher_encrypt(req);
/* we want to encrypt the skbuff in-place */
- nsg = skb_cow_data(skb, 0, &trailer);
- err = -ENOMEM;
- if (nsg < 0 || nsg > 16)
+ err = -EMSGSIZE;
+ if (skb_shinfo(skb)->nr_frags > 16)
goto out;
len = data_size + call->conn->size_align - 1;
len &= ~(call->conn->size_align - 1);
- sg_init_table(sg, nsg);
+ sg_init_table(sg, ARRAY_SIZE(sg));
err = skb_to_sgvec(skb, sg, 0, len);
if (unlikely(err < 0))
goto out;
struct rxkad_level1_hdr sechdr;
struct rxrpc_crypt iv;
struct scatterlist sg[16];
- struct sk_buff *trailer;
bool aborted;
u32 data_size, buf;
u16 check;
- int nsg, ret;
+ int ret;
_enter("");
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
* directly into the target buffer.
*/
- nsg = skb_cow_data(skb, 0, &trailer);
- if (nsg < 0 || nsg > 16)
- goto nomem;
-
- sg_init_table(sg, nsg);
+ sg_init_table(sg, ARRAY_SIZE(sg));
ret = skb_to_sgvec(skb, sg, offset, 8);
if (unlikely(ret < 0))
return ret;
if (aborted)
rxrpc_send_abort_packet(call);
return -EPROTO;
-
-nomem:
- _leave(" = -ENOMEM");
- return -ENOMEM;
}
/*
struct rxkad_level2_hdr sechdr;
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
- struct sk_buff *trailer;
bool aborted;
u32 data_size, buf;
u16 check;
/* Decrypt the skbuff in-place. TODO: We really want to decrypt
* directly into the target buffer.
*/
- nsg = skb_cow_data(skb, 0, &trailer);
- if (nsg < 0)
- goto nomem;
-
sg = _sg;
- if (unlikely(nsg > 4)) {
+ nsg = skb_shinfo(skb)->nr_frags;
+ if (nsg <= 4) {
+ nsg = 4;
+ } else {
sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
if (!sg)
goto nomem;
skb->tstamp = ktime_get_real();
ix = seq & RXRPC_RXTX_BUFF_MASK;
- rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+ rxrpc_get_skb(skb, rxrpc_skb_got);
call->rxtx_annotations[ix] = annotation;
smp_wmb();
call->rxtx_buffer[ix] = skb;
}
out:
- rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
_leave(" = %d", ret);
return ret;
}
skb = call->tx_pending;
call->tx_pending = NULL;
- rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+ rxrpc_see_skb(skb, rxrpc_skb_seen);
copied = 0;
do {
if (!skb)
goto maybe_error;
- rxrpc_new_skb(skb, rxrpc_skb_tx_new);
+ sp = rxrpc_skb(skb);
+ sp->rx_flags |= RXRPC_SKB_TX_BUFFER;
+ rxrpc_new_skb(skb, rxrpc_skb_new);
_debug("ALLOC SEND %p", skb);
skb_reserve(skb, call->conn->security_size);
skb->len += call->conn->security_size;
- sp = rxrpc_skb(skb);
sp->remain = chunk;
if (sp->remain > skb_tailroom(skb))
sp->remain = skb_tailroom(skb);
return ret;
call_terminated:
- rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
_leave(" = %d", call->error);
return call->error;
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
+#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
+#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
/*
* Note the allocation or reception of a socket buffer.
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+ int n = atomic_inc_return(select_skb_count(skb));
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+ rxrpc_skb(skb)->rx_flags, here);
}
/*
{
const void *here = __builtin_return_address(0);
if (skb) {
- int n = atomic_read(select_skb_count(op));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+ int n = atomic_read(select_skb_count(skb));
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+ rxrpc_skb(skb)->rx_flags, here);
}
}
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+ int n = atomic_inc_return(select_skb_count(skb));
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+ rxrpc_skb(skb)->rx_flags, here);
skb_get(skb);
}
+/*
+ * Note the dropping of a ref on a socket buffer by the core.
+ */
+void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
+{
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&rxrpc_n_rx_skbs);
+ trace_rxrpc_skb(skb, op, 0, n, 0, here);
+}
+
/*
* Note the destruction of a socket buffer.
*/
if (skb) {
int n;
CHECK_SLAB_OKAY(&skb->users);
- n = atomic_dec_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
+ n = atomic_dec_return(select_skb_count(skb));
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
+ rxrpc_skb(skb)->rx_flags, here);
kfree_skb(skb);
}
}
const void *here = __builtin_return_address(0);
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) {
- int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
- trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
- refcount_read(&skb->users), n, here);
+ int n = atomic_dec_return(select_skb_count(skb));
+ trace_rxrpc_skb(skb, rxrpc_skb_purged,
+ refcount_read(&skb->users), n,
+ rxrpc_skb(skb)->rx_flags, here);
kfree_skb(skb);
}
}
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
- return tc_action_net_init(tn, &act_bpf_ops);
+ return tc_action_net_init(net, tn, &act_bpf_ops);
}
static void __net_exit bpf_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
- return tc_action_net_init(tn, &act_connmark_ops);
+ return tc_action_net_init(net, tn, &act_connmark_ops);
}
static void __net_exit connmark_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
- return tc_action_net_init(tn, &act_csum_ops);
+ return tc_action_net_init(net, tn, &act_csum_ops);
}
static void __net_exit csum_exit_net(struct list_head *net_list)
tn->labels = true;
}
- return tc_action_net_init(&tn->tn, &act_ct_ops);
+ return tc_action_net_init(net, &tn->tn, &act_ct_ops);
}
static void __net_exit ct_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
- return tc_action_net_init(tn, &act_ctinfo_ops);
+ return tc_action_net_init(net, tn, &act_ctinfo_ops);
}
static void __net_exit ctinfo_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
- return tc_action_net_init(tn, &act_gact_ops);
+ return tc_action_net_init(net, tn, &act_gact_ops);
}
static void __net_exit gact_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
- return tc_action_net_init(tn, &act_ife_ops);
+ return tc_action_net_init(net, tn, &act_ife_ops);
}
static void __net_exit ife_exit_net(struct list_head *net_list)
return 0;
}
-static void ipt_destroy_target(struct xt_entry_target *t)
+static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
{
struct xt_tgdtor_param par = {
.target = t->u.kernel.target,
.targinfo = t->data,
.family = NFPROTO_IPV4,
+ .net = net,
};
if (par.target->destroy != NULL)
par.target->destroy(&par);
struct tcf_ipt *ipt = to_ipt(a);
if (ipt->tcfi_t) {
- ipt_destroy_target(ipt->tcfi_t);
+ ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
kfree(ipt->tcfi_t);
}
kfree(ipt->tcfi_tname);
spin_lock_bh(&ipt->tcf_lock);
if (ret != ACT_P_CREATED) {
- ipt_destroy_target(ipt->tcfi_t);
+ ipt_destroy_target(ipt->tcfi_t, net);
kfree(ipt->tcfi_tname);
kfree(ipt->tcfi_t);
}
{
struct tc_action_net *tn = net_generic(net, ipt_net_id);
- return tc_action_net_init(tn, &act_ipt_ops);
+ return tc_action_net_init(net, tn, &act_ipt_ops);
}
static void __net_exit ipt_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, xt_net_id);
- return tc_action_net_init(tn, &act_xt_ops);
+ return tc_action_net_init(net, tn, &act_xt_ops);
}
static void __net_exit xt_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
- return tc_action_net_init(tn, &act_mirred_ops);
+ return tc_action_net_init(net, tn, &act_mirred_ops);
}
static void __net_exit mirred_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, mpls_net_id);
- return tc_action_net_init(tn, &act_mpls_ops);
+ return tc_action_net_init(net, tn, &act_mpls_ops);
}
static void __net_exit mpls_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
- return tc_action_net_init(tn, &act_nat_ops);
+ return tc_action_net_init(net, tn, &act_nat_ops);
}
static void __net_exit nat_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
- return tc_action_net_init(tn, &act_pedit_ops);
+ return tc_action_net_init(net, tn, &act_pedit_ops);
}
static void __net_exit pedit_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
- return tc_action_net_init(tn, &act_police_ops);
+ return tc_action_net_init(net, tn, &act_police_ops);
}
static void __net_exit police_exit_net(struct list_head *net_list)
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
s->rate = rate;
s->psample_group_num = psample_group_num;
- RCU_INIT_POINTER(s->psample_group, psample_group);
+ rcu_swap_protected(s->psample_group, psample_group,
+ lockdep_is_held(&s->tcf_lock));
if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
s->truncate = true;
s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
}
spin_unlock_bh(&s->tcf_lock);
+
+ if (psample_group)
+ psample_group_put(psample_group);
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
- return tc_action_net_init(tn, &act_sample_ops);
+ return tc_action_net_init(net, tn, &act_sample_ops);
}
static void __net_exit sample_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
- return tc_action_net_init(tn, &act_simp_ops);
+ return tc_action_net_init(net, tn, &act_simp_ops);
}
static void __net_exit simp_exit_net(struct list_head *net_list)
return tcf_idr_search(tn, a, index);
}
+static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
+{
+ return nla_total_size(sizeof(struct tc_skbedit))
+ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
+ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
+ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
+ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
+ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
+ + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
+}
+
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.id = TCA_ID_SKBEDIT,
.init = tcf_skbedit_init,
.cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
+ .get_fill_size = tcf_skbedit_get_fill_size,
.lookup = tcf_skbedit_search,
.size = sizeof(struct tcf_skbedit),
};
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
- return tc_action_net_init(tn, &act_skbedit_ops);
+ return tc_action_net_init(net, tn, &act_skbedit_ops);
}
static void __net_exit skbedit_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
- return tc_action_net_init(tn, &act_skbmod_ops);
+ return tc_action_net_init(net, tn, &act_skbmod_ops);
}
static void __net_exit skbmod_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
- return tc_action_net_init(tn, &act_tunnel_key_ops);
+ return tc_action_net_init(net, tn, &act_tunnel_key_ops);
}
static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
- return tc_action_net_init(tn, &act_vlan_ops);
+ return tc_action_net_init(net, tn, &act_vlan_ops);
}
static void __net_exit vlan_exit_net(struct list_head *net_list)
cl = cops->find(q, portid);
if (!cl)
return;
+ if (!cops->tcf_block)
+ return;
block = cops->tcf_block(q, cl, NULL);
if (!block)
return;
s64 credits;
int len;
- if (atomic64_read(&q->port_rate) == -1) {
- WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
- return NULL;
- }
-
if (q->credits < 0) {
credits = timediff_to_credits(now - q->last, q->idleslope);
static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
{
struct ethtool_link_ksettings ecmd;
+ int speed = SPEED_10;
int port_rate = -1;
+ int err;
+
+ err = __ethtool_get_link_ksettings(dev, &ecmd);
+ if (err < 0)
+ goto skip;
+
+ if (ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
- if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
- ecmd.base.speed != SPEED_UNKNOWN)
- port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
+skip:
+ port_rate = speed * 1000 * BYTES_PER_KBIT;
atomic64_set(&q->port_rate, port_rate);
netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
+#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
+
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
const struct netdev_queue *txq = q->dev_queue;
q->q.qlen--;
}
} else {
- skb = NULL;
+ skb = SKB_XOFF_MAGIC;
}
}
return skb;
skb = qdisc_dequeue_skb_bad_txq(q);
- if (unlikely(skb))
+ if (unlikely(skb)) {
+ if (skb == SKB_XOFF_MAGIC)
+ return NULL;
goto bulk;
+ }
skb = q->dequeue(q);
if (skb) {
bulk:
err = skb_array_produce(q, skb);
- if (unlikely(err))
- return qdisc_drop_cpu(skb, qdisc, to_free);
+ if (unlikely(err)) {
+ if (qdisc_is_percpu_stats(qdisc))
+ return qdisc_drop_cpu(skb, qdisc, to_free);
+ else
+ return qdisc_drop(skb, qdisc, to_free);
+ }
qdisc_update_stats_at_enqueue(qdisc, pkt_len);
return NET_XMIT_SUCCESS;
kfree_skb(skb);
}
- for_each_possible_cpu(i) {
- struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+ if (qdisc_is_percpu_stats(qdisc)) {
+ for_each_possible_cpu(i) {
+ struct gnet_stats_queue *q;
- q->backlog = 0;
- q->qlen = 0;
+ q = per_cpu_ptr(qdisc->cpu_qstats, i);
+ q->backlog = 0;
+ q->qlen = 0;
+ }
}
}
new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
- if (non_hh_quantum > INT_MAX)
+ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
return -EINVAL;
sch_tree_lock(sch);
u32 gate_mask;
int i;
- if (atomic64_read(&q->picos_per_byte) == -1) {
- WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
- return NULL;
- }
-
rcu_read_lock();
entry = rcu_dereference(q->current_entry);
/* if there's no entry, it means that the schedule didn't
struct taprio_sched *q)
{
struct ethtool_link_ksettings ecmd;
- int picos_per_byte = -1;
+ int speed = SPEED_10;
+ int picos_per_byte;
+ int err;
- if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
- ecmd.base.speed != SPEED_UNKNOWN)
- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
- ecmd.base.speed * 1000 * 1000);
+ err = __ethtool_get_link_ksettings(dev, &ecmd);
+ if (err < 0)
+ goto skip;
+
+ if (ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+skip:
+ picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+ speed * 1000 * 1000);
atomic64_set(&q->picos_per_byte, picos_per_byte);
netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
spin_unlock_bh(qdisc_lock(sch));
free_sched:
- kfree(new_admin);
+ if (new_admin)
+ call_rcu(&new_admin->rcu, taprio_free_sched_cb);
return err;
}
*/
q->clockid = -1;
+ spin_lock(&taprio_list_lock);
+ list_add(&q->taprio_list, &taprio_list);
+ spin_unlock(&taprio_list_lock);
+
if (sch->parent != TC_H_ROOT)
return -EOPNOTSUPP;
if (!opt)
return -EINVAL;
- spin_lock(&taprio_list_lock);
- list_add(&q->taprio_list, &taprio_list);
- spin_unlock(&taprio_list_lock);
-
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
struct Qdisc *qdisc;
return status;
}
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
{
/* Free the control endpoint. */
inet_ctl_sock_destroy(net->sctp.ctl_sock);
*/
if (net->sctp.pf_enable &&
(transport->state == SCTP_ACTIVE) &&
- (asoc->pf_retrans < transport->pathmaxrxt) &&
- (transport->error_count > asoc->pf_retrans)) {
+ (transport->error_count < transport->pathmaxrxt) &&
+ (transport->error_count > transport->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
SCTP_TRANSPORT_PF,
return retval;
}
-static long sctp_get_port_local(struct sock *, union sctp_addr *);
+static int sctp_get_port_local(struct sock *, union sctp_addr *);
/* Verify this is a valid sockaddr. */
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
* detection.
*/
addr->v4.sin_port = htons(snum);
- if ((ret = sctp_get_port_local(sk, addr))) {
+ if (sctp_get_port_local(sk, addr))
return -EADDRINUSE;
- }
/* Refresh ephemeral port. */
if (!bp->port)
ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
SCTP_ADDR_SRC, GFP_ATOMIC);
- /* Copy back into socket for getsockname() use. */
- if (!ret) {
- inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
- sp->pf->to_sk_saddr(addr, sk);
+ if (ret) {
+ sctp_put_port(sk);
+ return ret;
}
+ /* Copy back into socket for getsockname() use. */
+ inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
+ sp->pf->to_sk_saddr(addr, sk);
return ret;
}
val.spt_pathmaxrxt = trans->pathmaxrxt;
val.spt_pathpfthld = trans->pf_retrans;
- return 0;
+ goto out;
}
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
val.spt_pathmaxrxt = sp->pathmaxrxt;
}
+out:
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
return -EFAULT;
static struct sctp_bind_bucket *sctp_bucket_create(
struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
-static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
+static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_sock *sp = sctp_sk(sk);
bool reuse = (sk->sk_reuse || sp->reuse);
if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
addr, sp2, sp)) {
- ret = (long)sk2;
+ ret = 1;
goto fail_unlock;
}
}
addr.v4.sin_port = htons(snum);
/* Note: sk->sk_num gets filled in if ephemeral port request. */
- return !!sctp_get_port_local(sk, &addr);
+ return sctp_get_port_local(sk, &addr);
}
/*
nstr_list[i] = htons(str_list[i]);
if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+ kfree(nstr_list);
retval = -EAGAIN;
goto out;
}
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct smc_connection *conn = &smc->conn;
struct sock *sk = &smc->sk;
- bool noblock;
long timeo;
int rc = 0;
/* similar to sk_stream_wait_memory */
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- noblock = timeo ? false : true;
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
break;
}
if (!timeo) {
- if (noblock)
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ /* ensure EPOLLOUT is subsequently generated */
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
rc = -EAGAIN;
break;
}
static void
call_bind_status(struct rpc_task *task)
{
+ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
int status = -EIO;
if (rpc_task_transmitted(task)) {
return;
}
- if (task->tk_status >= 0) {
- dprint_status(task);
+ dprint_status(task);
+ trace_rpc_bind_status(task);
+ if (task->tk_status >= 0)
+ goto out_next;
+ if (xprt_bound(xprt)) {
task->tk_status = 0;
- task->tk_action = call_connect;
- return;
+ goto out_next;
}
- trace_rpc_bind_status(task);
switch (task->tk_status) {
case -ENOMEM:
dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
+ case -ENOBUFS:
+ rpc_delay(task, HZ >> 2);
+ goto retry_timeout;
case -EAGAIN:
goto retry_timeout;
case -ETIMEDOUT:
case -ENETDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
- case -ENOBUFS:
case -EPIPE:
dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
task->tk_pid, task->tk_status);
rpc_call_rpcerror(task, status);
return;
-
+out_next:
+ task->tk_action = call_connect;
+ return;
retry_timeout:
task->tk_status = 0;
task->tk_action = call_bind;
static void
call_connect_status(struct rpc_task *task)
{
+ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
}
dprint_status(task);
-
trace_rpc_connect_status(task);
+
+ if (task->tk_status == 0) {
+ clnt->cl_stats->netreconn++;
+ goto out_next;
+ }
+ if (xprt_connected(xprt)) {
+ task->tk_status = 0;
+ goto out_next;
+ }
+
task->tk_status = 0;
switch (status) {
case -ECONNREFUSED:
case -ENETDOWN:
case -ENETUNREACH:
case -EHOSTUNREACH:
- case -EADDRINUSE:
- case -ENOBUFS:
case -EPIPE:
xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
task->tk_rqstp->rq_connect_cookie);
/* retry with existing socket, after a delay */
rpc_delay(task, 3*HZ);
/* fall through */
+ case -EADDRINUSE:
case -ENOTCONN:
case -EAGAIN:
case -ETIMEDOUT:
goto out_retry;
- case 0:
- clnt->cl_stats->netreconn++;
- task->tk_action = call_transmit;
- return;
+ case -ENOBUFS:
+ rpc_delay(task, HZ >> 2);
+ goto out_retry;
}
rpc_call_rpcerror(task, status);
return;
+out_next:
+ task->tk_action = call_transmit;
+ return;
out_retry:
/* Check for timeouts before looping back to call_bind */
task->tk_action = call_bind;
case -ECONNABORTED:
case -ENOTCONN:
rpc_force_rebind(clnt);
- /* fall through */
+ break;
case -EADDRINUSE:
rpc_delay(task, 3*HZ);
/* fall through */
status = -EBADMSG;
goto out_dequeue;
}
- if (task->tk_ops->rpc_call_prepare_transmit) {
- task->tk_ops->rpc_call_prepare_transmit(task,
- task->tk_calldata);
- status = task->tk_status;
- if (status < 0)
- goto out_dequeue;
- }
if (RPC_SIGNALLED(task)) {
status = -ERESTARTSYS;
goto out_dequeue;
tipc_set_node_id(net, node_id);
}
tn->trial_addr = addr;
+ tn->addr_trial_end = jiffies;
pr_info("32-bit node address hash set to %x\n", addr);
}
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
- * @prev_from: sequence number of most previous retransmission request
- * @stale_limit: time when repeated identical retransmits must force link reset
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
u16 limit;
} backlog[5];
u16 snd_nxt;
- u16 prev_from;
u16 window;
- unsigned long stale_limit;
/* Reception */
u16 rcv_nxt;
* link_retransmit_failure() - Detect repeated retransmit failures
* @l: tipc link sender
* @r: tipc link receiver (= l in case of unicast)
- * @from: seqno of the 1st packet in retransmit request
* @rc: returned code
*
* Return: true if the repeated retransmit failures happens, otherwise
* false
*/
static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
- u16 from, int *rc)
+ int *rc)
{
struct sk_buff *skb = skb_peek(&l->transmq);
struct tipc_msg *hdr;
if (!skb)
return false;
- hdr = buf_msg(skb);
- /* Detect repeated retransmit failures on same packet */
- if (r->prev_from != from) {
- r->prev_from = from;
- r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
- } else if (time_after(jiffies, r->stale_limit)) {
- pr_warn("Retransmission failure on link <%s>\n", l->name);
- link_print(l, "State of link ");
- pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
- msg_user(hdr), msg_type(hdr), msg_size(hdr),
- msg_errcode(hdr));
- pr_info("sqno %u, prev: %x, src: %x\n",
- msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
-
- trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
- trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
- trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
+ if (!TIPC_SKB_CB(skb)->retr_cnt)
+ return false;
- if (link_is_bc_sndlink(l))
- *rc = TIPC_LINK_DOWN_EVT;
+ if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
+ msecs_to_jiffies(r->tolerance)))
+ return false;
+
+ hdr = buf_msg(skb);
+ if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
+ return false;
+ pr_warn("Retransmission failure on link <%s>\n", l->name);
+ link_print(l, "State of link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+ pr_info("sqno %u, prev: %x, dest: %x\n",
+ msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
+ pr_info("retr_stamp %d, retr_cnt %d\n",
+ jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
+ TIPC_SKB_CB(skb)->retr_cnt);
+
+ trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
+ trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
+
+ if (link_is_bc_sndlink(l)) {
+ r->state = LINK_RESET;
+ *rc = TIPC_LINK_DOWN_EVT;
+ } else {
*rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
- return true;
}
- return false;
+ return true;
}
/* tipc_link_bc_retrans() - retransmit zero or more packets
trace_tipc_link_retrans(r, from, to, &l->transmq);
- if (link_retransmit_failure(l, r, from, &rc))
+ if (link_retransmit_failure(l, r, &rc))
return rc;
skb_queue_walk(&l->transmq, skb) {
continue;
if (more(msg_seqno(hdr), to))
break;
- if (link_is_bc_sndlink(l)) {
- if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
- continue;
- TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
- }
+
+ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+ continue;
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
if (!_skb)
return 0;
_skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, _skb);
l->stats.retransmitted++;
+
+ /* Increase actual retrans counter & mark first time */
+ if (!TIPC_SKB_CB(skb)->retr_cnt++)
+ TIPC_SKB_CB(skb)->retr_stamp = jiffies;
}
return 0;
}
struct tipc_msg *hdr;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
+ bool passed = false;
u16 seqno, n = 0;
int rc = 0;
- if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
- return rc;
-
skb_queue_walk_safe(&l->transmq, skb, tmp) {
seqno = buf_seqno(skb);
__skb_unlink(skb, &l->transmq);
kfree_skb(skb);
} else if (less_eq(seqno, acked + gap)) {
- /* retransmit skb */
+ /* First, check if repeated retrans failures occurs? */
+ if (!passed && link_retransmit_failure(l, l, &rc))
+ return rc;
+ passed = true;
+
+ /* retransmit skb if unrestricted*/
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
-
- _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+ _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
+ GFP_ATOMIC);
if (!_skb)
continue;
hdr = buf_msg(_skb);
_skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, _skb);
l->stats.retransmitted++;
+
+ /* Increase actual retrans counter & mark first time */
+ if (!TIPC_SKB_CB(skb)->retr_cnt++)
+ TIPC_SKB_CB(skb)->retr_stamp = jiffies;
} else {
/* retry with Gap ACK blocks if any */
if (!ga || n >= ga->gack_cnt)
i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
- i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
+ i += scnprintf(buf + i, sz - i, " %u", 0);
i += scnprintf(buf + i, sz - i, " %u", 0);
i += scnprintf(buf + i, sz - i, " %u", l->acked);
#define TIPC_MEDIA_INFO_OFFSET 5
struct tipc_skb_cb {
- u32 bytes_read;
- u32 orig_member;
struct sk_buff *tail;
unsigned long nxt_retr;
- bool validated;
+ unsigned long retr_stamp;
+ u32 bytes_read;
+ u32 orig_member;
u16 chain_imp;
u16 ackers;
+ u16 retr_cnt;
+ bool validated;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
publ->key);
}
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
/**
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
- int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
struct tls_record_info *record = ctx->open_record;
+ int tls_push_record_flags;
struct page_frag *pfrag;
size_t orig_size = size;
u32 max_open_record_len;
if (sk->sk_err)
return -sk->sk_err;
+ flags |= MSG_SENDPAGE_DECRYPTED;
+ tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
+
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
if (tls_is_partially_sent_record(tls_ctx)) {
rc = tls_push_partial_record(sk, tls_ctx, flags);
gfp_t sk_allocation = sk->sk_allocation;
sk->sk_allocation = GFP_ATOMIC;
- tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
+ tls_push_partial_record(sk, ctx,
+ MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_DECRYPTED);
sk->sk_allocation = sk_allocation;
}
}
if (free_ctx)
icsk->icsk_ulp_data = NULL;
sk->sk_prot = ctx->sk_proto;
+ if (sk->sk_write_space == tls_write_space)
+ sk->sk_write_space = ctx->sk_write_space;
write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
if (ctx->tx_conf == TLS_SW)
/* When last_request->processed becomes true this will be rescheduled */
if (lr && !lr->processed) {
- reg_process_hint(lr);
+ pr_debug("Pending regulatory request, waiting for it to be processed...\n");
return;
}
switch (params->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
+ /* Extended Key ID can only be used with CCMP/GCMP ciphers */
+ if ((pairwise && key_idx) ||
+ params->mode != NL80211_KEY_RX_TX)
+ return -EINVAL;
+ break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- /* IEEE802.11-2016 allows only 0 and - when using Extended Key
- * ID - 1 as index for pairwise keys.
+ /* IEEE802.11-2016 allows only 0 and - when supporting
+ * Extended Key ID - 1 as index for pairwise keys.
* @NL80211_KEY_NO_TX is only allowed for pairwise keys when
* the driver supports Extended Key ID.
* @NL80211_KEY_SET_TX can't be set when installing and
* validating a key.
*/
- if (params->mode == NL80211_KEY_NO_TX) {
- if (!wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_EXT_KEY_ID))
- return -EINVAL;
- else if (!pairwise || key_idx < 0 || key_idx > 1)
+ if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
+ params->mode == NL80211_KEY_SET_TX)
+ return -EINVAL;
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_EXT_KEY_ID)) {
+ if (pairwise && (key_idx < 0 || key_idx > 1))
return -EINVAL;
- } else if ((pairwise && key_idx) ||
- params->mode == NL80211_KEY_SET_TX) {
+ } else if (pairwise && key_idx) {
return -EINVAL;
}
break;
umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
if (!umem->pages) {
err = -ENOMEM;
- goto out_account;
+ goto out_pin;
}
for (i = 0; i < umem->npgs; i++)
return 0;
+out_pin:
+ xdp_umem_unpin_pages(umem);
out_account:
xdp_umem_unaccount_pages(umem);
return err;
if (err < 0)
goto out;
- strcpy(xi->p.name, dev->name);
-
dev_hold(dev);
xfrmi_link(xfrmn, xi);
struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
xfrmi_unlink(xfrmn, xi);
- dev_put(xi->phydev);
dev_put(dev);
}
if (tdev == dev) {
stats->collisions++;
net_warn_ratelimited("%s: Local routing loop detected!\n",
- xi->p.name);
+ dev->name);
goto tx_err_dst_release;
}
goto tx_err;
}
- fl.flowi_oif = xi->phydev->ifindex;
+ fl.flowi_oif = xi->p.link;
ret = xfrmi_xmit2(skb, dev, &fl);
if (ret < 0)
static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
{
- struct net *net = dev_net(xi->dev);
+ struct net *net = xi->net;
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
int err;
{
struct xfrm_if *xi = netdev_priv(dev);
- return xi->phydev->ifindex;
+ return xi->p.link;
}
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
netif_keep_dst(dev);
+
+ eth_broadcast_addr(dev->broadcast);
}
static int xfrmi_dev_init(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device *phydev = xi->phydev;
+ struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
int err;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
dev->features |= NETIF_F_LLTX;
- dev->needed_headroom = phydev->needed_headroom;
- dev->needed_tailroom = phydev->needed_tailroom;
+ if (phydev) {
+ dev->needed_headroom = phydev->needed_headroom;
+ dev->needed_tailroom = phydev->needed_tailroom;
- if (is_zero_ether_addr(dev->dev_addr))
- eth_hw_addr_inherit(dev, phydev);
- if (is_zero_ether_addr(dev->broadcast))
- memcpy(dev->broadcast, phydev->broadcast, dev->addr_len);
+ if (is_zero_ether_addr(dev->dev_addr))
+ eth_hw_addr_inherit(dev, phydev);
+ if (is_zero_ether_addr(dev->broadcast))
+ memcpy(dev->broadcast, phydev->broadcast,
+ dev->addr_len);
+ } else {
+ eth_hw_addr_random(dev);
+ eth_broadcast_addr(dev->broadcast);
+ }
return 0;
}
int err;
xfrmi_netlink_parms(data, &p);
-
- if (!tb[IFLA_IFNAME])
- return -EINVAL;
-
- nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
-
xi = xfrmi_locate(net, &p);
if (xi)
return -EEXIST;
xi->p = p;
xi->net = net;
xi->dev = dev;
- xi->phydev = dev_get_by_index(net, p.link);
- if (!xi->phydev)
- return -ENODEV;
err = xfrmi_create(dev);
- if (err < 0)
- dev_put(xi->phydev);
return err;
}
struct netlink_ext_ack *extack)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net *net = dev_net(dev);
-
- xfrmi_netlink_parms(data, &xi->p);
+ struct net *net = xi->net;
+ struct xfrm_if_parms p;
- xi = xfrmi_locate(net, &xi->p);
+ xfrmi_netlink_parms(data, &p);
+ xi = xfrmi_locate(net, &p);
if (!xi) {
xi = netdev_priv(dev);
} else {
return -EEXIST;
}
- return xfrmi_update(xi, &xi->p);
+ return xfrmi_update(xi, &p);
}
static size_t xfrmi_get_size(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- return dev_net(xi->phydev);
+ return xi->net;
}
static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
} else if (delta > 0) {
p = &parent->rb_right;
} else {
+ bool same_prefixlen = node->prefixlen == n->prefixlen;
struct xfrm_policy *tmp;
hlist_for_each_entry(tmp, &n->hhead, bydst) {
hlist_del_rcu(&tmp->bydst);
}
+ node->prefixlen = prefixlen;
+
xfrm_policy_inexact_list_reinsert(net, node, family);
- if (node->prefixlen == n->prefixlen) {
+ if (same_prefixlen) {
kfree_rcu(n, rcu);
return;
}
rb_erase(*p, new);
kfree_rcu(n, rcu);
n = node;
- n->prefixlen = prefixlen;
goto restart;
}
}
struct flowi4 *fl4 = &fl->u.ip4;
int oif = 0;
- if (skb_dst(skb))
+ if (skb_dst(skb) && skb_dst(skb)->dev)
oif = skb_dst(skb)->dev->ifindex;
memset(fl4, 0, sizeof(struct flowi4));
nexthdr = nh[nhoff];
- if (skb_dst(skb))
+ if (skb_dst(skb) && skb_dst(skb)->dev)
oif = skb_dst(skb)->dev->ifindex;
memset(fl6, 0, sizeof(struct flowi6));
if (argc != 2) {
printf(
- "Sintax: %s fbdev\n"
+ "Syntax: %s fbdev\n"
"Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
return -1;
}
+// SPDX-License-Identifier: GPL-2.0-only
// Check if refcount_t type and API should be used
// instead of atomic_t type when dealing with refcounters
//
key = check_cached_key(&ctx);
if (key)
- return key;
+ goto error_free;
/* search all the process keyrings for a key */
rcu_read_lock();
{
struct request_key_auth *rka = dereference_key_rcu(key);
+ if (!rka)
+ return;
+
seq_puts(m, "key:");
seq_puts(m, key->description);
if (key_is_positive(key))
size_t datalen;
long ret;
+ if (!rka)
+ return -EKEYREVOKED;
+
datalen = rka->callout_len;
ret = datalen;
static int __init init_digests(void)
{
- u8 digest[TPM_MAX_DIGEST_SIZE];
- int ret;
- int i;
-
- ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
- if (ret < 0)
- return ret;
- if (ret < TPM_MAX_DIGEST_SIZE)
- return -EFAULT;
-
digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
GFP_KERNEL);
if (!digests)
return -ENOMEM;
- for (i = 0; i < chip->nr_allocated_banks; i++)
- memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
-
return 0;
}
if (cptr->type == USER_CLIENT) {
info->input_pool = cptr->data.user.fifo_pool_size;
info->input_free = info->input_pool;
- if (cptr->data.user.fifo)
- info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+ info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
} else {
info->input_pool = 0;
info->input_free = 0;
return 0;
}
+
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
+{
+ unsigned long flags;
+ int cells;
+
+ if (!f)
+ return 0;
+
+ snd_use_lock_use(&f->use_lock);
+ spin_lock_irqsave(&f->lock, flags);
+ cells = snd_seq_unused_cells(f->pool);
+ spin_unlock_irqrestore(&f->lock, flags);
+ snd_use_lock_free(&f->use_lock);
+ return cells;
+}
/* resize pool in fifo */
int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
#endif
unsigned int channels = params_channels(hw_params);
mutex_lock(&oxfw->mutex);
- err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream,
+ err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
rate, channels);
if (err >= 0)
++oxfw->substreams_count;
while (id >= 0) {
const struct hda_fixup *fix = codec->fixup_list + id;
+ if (++depth > 10)
+ break;
if (fix->chained_before)
apply_fixup(codec, fix->chain_id, action, depth + 1);
}
if (!fix->chained || fix->chained_before)
break;
- if (++depth > 10)
- break;
id = fix->chain_id;
}
}
if (spec->init_hook)
spec->init_hook(codec);
- snd_hda_apply_verbs(codec);
+ if (!spec->skip_verbs)
+ snd_hda_apply_verbs(codec);
init_multi_out(codec);
init_extra_out(codec);
}
EXPORT_SYMBOL_GPL(snd_hda_gen_free);
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+ /* Make the codec enter D3 to avoid spurious noises from the internal
+ * speaker during (and after) reboot
+ */
+ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
#ifdef CONFIG_PM
/**
* snd_hda_gen_check_power_status - check the loopback power save state
.init = snd_hda_gen_init,
.free = snd_hda_gen_free,
.unsol_event = snd_hda_jack_unsol_event,
+ .reboot_notify = snd_hda_gen_reboot_notify,
#ifdef CONFIG_PM
.check_power_status = snd_hda_gen_check_power_status,
#endif
err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
if (err < 0)
- return err;
+ goto error;
err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
if (err < 0)
unsigned int indep_hp_enabled:1; /* independent HP enabled */
unsigned int have_aamix_ctl:1;
unsigned int hp_mic_jack_modes:1;
+ unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
/* additional mute flags (only effective with auto_mute_via_amp=1) */
u64 mute_bits;
struct auto_pin_cfg *cfg);
int snd_hda_gen_build_controls(struct hda_codec *codec);
int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
/* standard jack event callbacks */
void snd_hda_gen_hp_automute(struct hda_codec *codec,
/* AMD, X370 & co */
{ PCI_DEVICE(0x1022, 0x1457),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* AMD, X570 & co */
+ { PCI_DEVICE(0x1022, 0x1487),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
/* AMD Stoney */
{ PCI_DEVICE(0x1022, 0x157a),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
+ SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
{
struct conexant_spec *spec = codec->spec;
- switch (codec->core.vendor_id) {
- case 0x14f12008: /* CX8200 */
- case 0x14f150f2: /* CX20722 */
- case 0x14f150f4: /* CX20724 */
- break;
- default:
- return;
- }
-
/* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
+ snd_hda_gen_reboot_notify(codec);
}
static void cx_auto_free(struct hda_codec *codec)
/* update LED status via GPIO */
static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
- bool enabled)
+ bool led_on)
{
struct conexant_spec *spec = codec->spec;
unsigned int oldval = spec->gpio_led;
if (spec->mute_led_polarity)
- enabled = !enabled;
+ led_on = !led_on;
- if (enabled)
- spec->gpio_led &= ~mask;
- else
+ if (led_on)
spec->gpio_led |= mask;
+ else
+ spec->gpio_led &= ~mask;
+ codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
+ mask, led_on, spec->gpio_led);
if (spec->gpio_led != oldval)
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
spec->gpio_led);
{
struct hda_codec *codec = private_data;
struct conexant_spec *spec = codec->spec;
-
- cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+ /* muted -> LED on */
+ cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
}
/* turn on/off mic-mute LED via GPIO per capture hook */
{ 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
{}
};
- codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
if (spec->init_hook)
spec->init_hook(codec);
+ spec->gen.skip_verbs = 1; /* applied in below */
snd_hda_gen_init(codec);
alc_fix_pll(codec);
alc_auto_init_amp(codec, spec->init_amp);
+ snd_hda_apply_verbs(codec); /* apply verbs here after own init */
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
alc_shutup(codec);
}
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
-}
-
#define alc_free snd_hda_gen_free
#ifdef CONFIG_PM
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
codec->power_save_node = 0; /* avoid click noises */
snd_hda_apply_pincfgs(codec, pincfgs);
ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC299_FIXUP_PREDATOR_SPK,
+ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
{ }
}
},
+ [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
+ { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
line6pcm->volume_monitor = 255;
line6pcm->line6 = line6;
+ spin_lock_init(&line6pcm->out.lock);
+ spin_lock_init(&line6pcm->in.lock);
+ line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
+
+ line6->line6pcm = line6pcm;
+
+ pcm->private_data = line6pcm;
+ pcm->private_free = line6_cleanup_pcm;
+
line6pcm->max_packet_size_in =
usb_maxpacket(line6->usbdev,
usb_rcvisocpipe(line6->usbdev, ep_read), 0);
return -EINVAL;
}
- spin_lock_init(&line6pcm->out.lock);
- spin_lock_init(&line6pcm->in.lock);
- line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
-
- line6->line6pcm = line6pcm;
-
- pcm->private_data = line6pcm;
- pcm->private_free = line6_cleanup_pcm;
-
err = line6_create_audio_out_urbs(line6pcm);
if (err < 0)
return err;
unsigned char *buffer;
unsigned int buflen;
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
struct usb_audio_term oterm;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
struct uac_mixer_unit_descriptor *desc)
{
int mu_channels;
- void *c;
if (desc->bLength < sizeof(*desc))
return -EINVAL;
if (!desc->bNrInPins)
return -EINVAL;
+ if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+ return -EINVAL;
switch (state->mixer->protocol) {
case UAC_VERSION_1:
break;
}
- if (!mu_channels)
- return 0;
-
- c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
- if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
- return 0; /* no bmControls -> skip */
-
return mu_channels;
}
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
struct usb_audio_term *term)
{
int protocol = state->mixer->protocol;
int err;
void *p1;
+ unsigned char *hdr;
memset(term, 0, sizeof(*term));
- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
- unsigned char *hdr = p1;
+ for (;;) {
+ /* a loop in the terminal chain? */
+ if (test_and_set_bit(id, state->termbitmap))
+ return -EINVAL;
+
+ p1 = find_audio_control_unit(state, id);
+ if (!p1)
+ break;
+
+ hdr = p1;
term->id = id;
if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
case UAC3_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
return -EINVAL;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
return -ENODEV;
}
+
+static int check_input_term(struct mixer_build *state, int id,
+ struct usb_audio_term *term)
+{
+ memset(term, 0, sizeof(*term));
+ memset(state->termbitmap, 0, sizeof(state->termbitmap));
+ return __check_input_term(state, id, term);
+}
+
/*
* Feature Unit
*/
* Mixer Unit
*/
+/* check whether the given in/out overflows bmMixerControls matrix */
+static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
+ int protocol, int num_ins, int num_outs)
+{
+ u8 *hdr = (u8 *)desc;
+ u8 *c = uac_mixer_unit_bmControls(desc, protocol);
+ size_t rest; /* remaining bytes after bmMixerControls */
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ default:
+ rest = 1; /* iMixer */
+ break;
+ case UAC_VERSION_2:
+ rest = 2; /* bmControls + iMixer */
+ break;
+ case UAC_VERSION_3:
+ rest = 6; /* bmControls + wMixerDescrStr */
+ break;
+ }
+
+ /* overflow? */
+ return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
+}
+
/*
* build a mixer unit control
*
if (err < 0)
return err;
num_ins += iterm.channels;
+ if (mixer_bitmap_overflow(desc, state->mixer->protocol,
+ num_ins, num_outs))
+ break;
for (; ich < num_ins; ich++) {
int och, ich_has_controls = 0;
{
struct usb_mixer_interface *mixer;
struct usb_mixer_elem_info *cval;
- int unitid = 12; /* SamleRate ExtensionUnit ID */
+ int unitid = 12; /* SampleRate ExtensionUnit ID */
list_for_each_entry(mixer, &chip->mixer_list, list) {
- cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
- if (cval) {
+ if (mixer->id_elems[unitid]) {
+ cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
cval->control << 8,
samplerate_id);
snd_usb_mixer_notify_id(mixer, unitid);
+ break;
}
- break;
}
}
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
ep = 0x81;
ifnum = 1;
if (err)
return err;
- return bpf_obj_pin(fd, name);
+ err = bpf_obj_pin(fd, name);
+ if (err)
+ p_err("can't pin the object (%s): %s", name, strerror(errno));
+
+ return err;
}
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
fd = get_fd_by_id(id);
if (fd < 0) {
- p_err("can't get prog by id (%u): %s", id, strerror(errno));
+ p_err("can't open object by id (%u): %s", id, strerror(errno));
return -1;
}
if (fd < 0)
return -1;
- return show_prog(fd);
+ err = show_prog(fd);
+ close(fd);
+ return err;
}
if (argc)
# the script prints the string "Disabled" to stdout.
#
# Each Distro is expected to implement this script in a distro specific
-# fashion. For instance on Distros that ship with Network Manager enabled,
+# fashion. For instance, on Distros that ship with Network Manager enabled,
# this script can be based on the Network Manager APIs for retrieving DHCP
# information.
/*
- * Gather the DNS state.
+ * Gather the DNS state.
* Since there is no standard way to get this information
* across various distributions of interest; we just invoke
* an external script that needs to be ported across distros
int sn_offset = 0;
int error = 0;
char *buffer;
- struct hv_kvp_ipaddr_value *ip_buffer;
+ struct hv_kvp_ipaddr_value *ip_buffer = NULL;
char cidr_mask[5]; /* /xyz */
int weight;
int i;
char *start;
/*
- * in_buf has sequence of characters that are seperated by
+ * in_buf has sequence of characters that are separated by
* the character ';'. The last sequence does not have the
* terminating ";" character.
*/
daemonize = 0;
break;
case 'h':
+ print_usage(argv);
+ exit(0);
default:
print_usage(argv);
exit(EXIT_FAILURE);
case KVP_OP_GET_IP_INFO:
kvp_ip_val = &hv_msg->body.kvp_ip_val;
- error = kvp_mac_to_ip(kvp_ip_val);
+ error = kvp_mac_to_ip(kvp_ip_val);
if (error)
hv_msg->error = error;
# be used to configure the interface.
#
# Each Distro is expected to implement this script in a distro specific
-# fashion. For instance on Distros that ship with Network Manager enabled,
+# fashion. For instance, on Distros that ship with Network Manager enabled,
# this script can be based on the Network Manager APIs for configuring the
# interface.
#
* If a partition is mounted more than once, only the first
* FREEZE/THAW can succeed and the later ones will get
* EBUSY/EINVAL respectively: there could be 2 cases:
- * 1) a user may mount the same partition to differnt directories
+ * 1) a user may mount the same partition to different directories
* by mistake or on purpose;
* 2) The subvolume of btrfs appears to have the same partition
* mounted more than once.
daemonize = 0;
break;
case 'h':
+ print_usage(argv);
+ exit(0);
default:
print_usage(argv);
exit(EXIT_FAILURE);
import os
from optparse import OptionParser
+help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages"
parser = OptionParser()
-parser.add_option("-v", "--verbose", dest="verbose",
- help="print verbose messages. Try -vv, -vvv for \
- more verbose messages", action="count")
+parser.add_option(
+ "-v", "--verbose", dest="verbose", help=help_msg, action="count")
(options, args) = parser.parse_args()
exit(-1)
vmbus_dev_dict = {
- '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]',
- '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]',
- '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]',
- '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]',
- '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]',
- '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]',
- '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]',
- '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse',
- '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard',
- '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter',
- '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter',
- '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller',
- '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller',
- '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter',
- '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter',
- '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through',
- '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]',
- '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]',
- '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]',
+ '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
+ '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
+ '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
+ '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
+ '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
+ '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
+ '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
+ '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
+ '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
+ '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
+ '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
+ '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
+ '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
+ '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
+ '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
+ '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
+ '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
+ '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
+ '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
}
+
def get_vmbus_dev_attr(dev_name, attr):
try:
f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
return lines
+
class VMBus_Dev:
pass
chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
- chn_vp_mapping = sorted(chn_vp_mapping,
- key = lambda c : int(c.split(':')[0]))
+ chn_vp_mapping = sorted(
+ chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
- chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' %
- (c.split(':')[0], c.split(':')[1])
- for c in chn_vp_mapping]
+ chn_vp_mapping = [
+ '\tRel_ID=%s, target_cpu=%s' %
+ (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
+ ]
d = VMBus_Dev()
d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
d.vmbus_id = vmbus_id
vmbus_dev_list.append(d)
-vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id))
+vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
format0 = '%2s: %s'
format1 = '%2s: Class_ID = %s - %s\n%s'
if verbose == 0:
print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
elif verbose == 1:
- print (('VMBUS ID ' + format1) % \
- (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping))
+ print(
+ ('VMBUS ID ' + format1) %
+ (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
+ )
else:
- print (('VMBUS ID ' + format2) % \
- (d.vmbus_id, d.class_id, d.dev_desc, \
- d.device_id, d.sysfs_path, d.chn_vp_mapping))
+ print(
+ ('VMBUS ID ' + format2) %
+ (
+ d.vmbus_id, d.class_id, d.dev_desc,
+ d.device_id, d.sysfs_path, d.chn_vp_mapping
+ )
+ )
* If no cookie has been set yet, generate a new cookie. Once
* generated, the socket cookie remains stable for the life of the
* socket. This helper can be useful for monitoring per socket
- * networking traffic statistics as it provides a unique socket
- * identifier per namespace.
+ * networking traffic statistics as it provides a global socket
+ * identifier that can be assumed unique.
* Return
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
* but this is only implemented for native XDP (with driver
* support) as of this writing).
*
- * All values for *flags* are reserved for future usage, and must
- * be left at zero.
+ * The lower two bits of *flags* are used as the return code if
+ * the map lookup fails. This is so that the return value can be
+ * one of the XDP program return codes up to XDP_TX, as chosen by
+ * the caller. Any higher bits in the *flags* argument must be
+ * unset.
*
* When used to redirect packets to net devices, this helper
* provides a high performance increase over **bpf_redirect**\ ().
bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type;
- int btf_fd;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
prog->instances.nr = -1;
zfree(&prog->instances.fds);
- zclose(prog->btf_fd);
zfree(&prog->func_info);
zfree(&prog->line_info);
}
prog->instances.fds = NULL;
prog->instances.nr = -1;
prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->btf_fd = -1;
return 0;
errout:
prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
}
- if (!insn_offset)
- prog->btf_fd = btf__fd(obj->btf);
-
return 0;
}
char *cp, errmsg[STRERR_BUFSIZE];
int log_buf_size = BPF_LOG_BUF_SIZE;
char *log_buf;
- int ret;
+ int btf_fd, ret;
if (!insns || !insns_cnt)
return -EINVAL;
load_attr.license = license;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
- load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
+ /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
+ if (prog->obj->btf_ext)
+ btf_fd = bpf_object__btf_fd(prog->obj);
+ else
+ btf_fd = -1;
+ load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
static const char *fcpu = "/sys/devices/system/cpu/possible";
int len = 0, n = 0, il = 0, ir = 0;
unsigned int start = 0, end = 0;
+ int tmp_cpus = 0;
static int cpus;
char buf[128];
int error = 0;
int fd = -1;
- if (cpus > 0)
- return cpus;
+ tmp_cpus = READ_ONCE(cpus);
+ if (tmp_cpus > 0)
+ return tmp_cpus;
fd = open(fcpu, O_RDONLY);
if (fd < 0) {
}
buf[len] = '\0';
- for (ir = 0, cpus = 0; ir <= len; ir++) {
+ for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
if (buf[ir] == ',' || buf[ir] == '\0') {
buf[ir] = '\0';
} else if (n == 1) {
end = start;
}
- cpus += end - start + 1;
+ tmp_cpus += end - start + 1;
il = ir + 1;
}
}
- if (cpus <= 0) {
- pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu);
+ if (tmp_cpus <= 0) {
+ pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
return -EINVAL;
}
- return cpus;
+
+ WRITE_ONCE(cpus, tmp_cpus);
+ return tmp_cpus;
}
endif
turbostat : turbostat.c
-override CFLAGS += -Wall -I../../../include
+override CFLAGS += -O2 -Wall -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+override CFLAGS += -D_FORTIFY_SOURCE=2
%: %.c
@mkdir -p $(BUILD_OUTPUT)
int *fd_percpu;
struct timeval interval_tv = {5, 0};
struct timespec interval_ts = {5, 0};
-struct timespec one_msec = {0, 1000000};
unsigned int num_iterations;
unsigned int debug;
unsigned int quiet;
unsigned int units = 1000000; /* MHz etc */
unsigned int genuine_intel;
unsigned int authentic_amd;
+unsigned int hygon_genuine;
unsigned int max_level, max_extended_level;
unsigned int has_invariant_tsc;
unsigned int do_nhm_platform_info;
unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
unsigned int has_misc_feature_control;
unsigned int first_counter_read = 1;
+int ignore_stdin;
#define RAPL_PKG (1 << 0)
/* 0x610 MSR_PKG_POWER_LIMIT */
struct thread_data {
struct timeval tv_begin;
struct timeval tv_end;
+ struct timeval tv_delta;
unsigned long long tsc;
unsigned long long aperf;
unsigned long long mperf;
unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
+#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
- outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
if (DO_BIC(BIC_TOD))
outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
- interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
+ interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
tsc = t->tsc * tsc_tweak;
}
}
+int soft_c1_residency_display(int bic)
+{
+ if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
+ return 0;
+
+ return DO_BIC_READ(bic);
+}
+
/*
* old = new - old
*/
* over-write old w/ new so we can print end of interval values
*/
+ timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
old->tv_begin = new->tv_begin;
old->tv_end = new->tv_end;
old->c1 = new->c1 - old->c1;
- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
+ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
+ soft_c1_residency_display(BIC_Avg_MHz)) {
if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
old->aperf = new->aperf - old->aperf;
old->mperf = new->mperf - old->mperf;
t->tv_begin.tv_usec = 0;
t->tv_end.tv_sec = 0;
t->tv_end.tv_usec = 0;
+ t->tv_delta.tv_sec = 0;
+ t->tv_delta.tv_usec = 0;
t->tsc = 0;
t->aperf = 0;
for_all_cpus(sum_counters, t, c, p);
+ /* Use the global time delta for the average. */
+ average.threads.tv_delta = tv_delta;
+
average.threads.tsc /= topo.num_cpus;
average.threads.aperf /= topo.num_cpus;
average.threads.mperf /= topo.num_cpus;
if (!DO_BIC(BIC_X2APIC))
return;
- if (authentic_amd) {
+ if (authentic_amd || hygon_genuine) {
unsigned int topology_extensions;
if (max_extended_level < 0x8000001e)
struct msr_counter *mp;
int i;
- gettimeofday(&t->tv_begin, (struct timezone *)NULL);
-
if (cpu_migrate(cpu)) {
fprintf(outf, "Could not migrate to CPU %d\n", cpu);
return -1;
}
+ gettimeofday(&t->tv_begin, (struct timezone *)NULL);
+
if (first_counter_read)
get_apic_id(t);
retry:
t->tsc = rdtsc(); /* we are running on local CPU of interest */
- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
+ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
+ soft_c1_residency_display(BIC_Avg_MHz)) {
unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
/*
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (DO_BIC(BIC_CPU_c3)) {
+ if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
return -6;
}
- if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) {
+ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
return -7;
- } else if (do_knl_cstates) {
+ } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
return -7;
}
- if (DO_BIC(BIC_CPU_c7))
+ if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
return -8;
if (retval != 1) {
fprintf(stderr, "Disabling Low Power Idle CPU output\n");
BIC_NOT_PRESENT(BIC_CPU_LPI);
+ fclose(fp);
return -1;
}
if (retval != 1) {
fprintf(stderr, "Disabling Low Power Idle System output\n");
BIC_NOT_PRESENT(BIC_SYS_LPI);
+ fclose(fp);
return -1;
}
fclose(fp);
fprintf(stderr, "SIGUSR1\n");
break;
}
- /* make sure this manually-invoked interval is at least 1ms long */
- nanosleep(&one_msec, NULL);
}
void setup_signal_handler(void)
void do_sleep(void)
{
- struct timeval select_timeout;
+ struct timeval tout;
+ struct timespec rest;
fd_set readfds;
int retval;
FD_ZERO(&readfds);
FD_SET(0, &readfds);
- if (!isatty(fileno(stdin))) {
+ if (ignore_stdin) {
nanosleep(&interval_ts, NULL);
return;
}
- select_timeout = interval_tv;
- retval = select(1, &readfds, NULL, NULL, &select_timeout);
+ tout = interval_tv;
+ retval = select(1, &readfds, NULL, NULL, &tout);
if (retval == 1) {
switch (getc(stdin)) {
case 'q':
exit_requested = 1;
break;
+ case EOF:
+ /*
+ * 'stdin' is a pipe closed on the other end. There
+ * won't be any further input.
+ */
+ ignore_stdin = 1;
+ /* Sleep the rest of the time */
+ rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
+ rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
+ nanosleep(&rest, NULL);
}
- /* make sure this manually-invoked interval is at least 1ms long */
- nanosleep(&one_msec, NULL);
}
}
break;
case INTEL_FAM6_HASWELL_CORE: /* HSW */
case INTEL_FAM6_HASWELL_X: /* HSX */
+ case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
case INTEL_FAM6_IVYBRIDGE: /* IVB */
case INTEL_FAM6_HASWELL_CORE: /* HSW */
case INTEL_FAM6_HASWELL_X: /* HSX */
+ case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
{
switch (family) {
case 0x17:
+ case 0x18:
default:
/* This is the max stock TDP of HEDT/Server Fam17h chips */
return 250.0;
case INTEL_FAM6_SANDYBRIDGE:
case INTEL_FAM6_IVYBRIDGE:
case INTEL_FAM6_HASWELL_CORE: /* HSW */
+ case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
switch (family) {
case 0x17: /* Zen, Zen+ */
+ case 0x18: /* Hygon Dhyana */
do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
if (rapl_joules) {
BIC_PRESENT(BIC_Pkg_J);
rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
rapl_power_units = ldexp(1.0, -(msr & 0xf));
- tdp = get_tdp_amd(model);
+ tdp = get_tdp_amd(family);
rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
if (!quiet)
{
if (genuine_intel)
rapl_probe_intel(family, model);
- if (authentic_amd)
+ if (authentic_amd || hygon_genuine)
rapl_probe_amd(family, model);
}
switch (model) {
case INTEL_FAM6_HASWELL_CORE: /* HSW */
+ case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
do_gfx_perf_limit_reasons = 1;
case INTEL_FAM6_HASWELL_X: /* HSX */
case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
case INTEL_FAM6_HASWELL_CORE: /* HSW */
case INTEL_FAM6_HASWELL_X: /* HSW */
+ case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
}
/*
- * HSW adds support for additional MSRs:
+ * HSW ULT added support for C8/C9/C10 MSRs:
*
* MSR_PKG_C8_RESIDENCY 0x00000630
* MSR_PKG_C9_RESIDENCY 0x00000631
* MSR_PKGC10_IRTL 0x00000635
*
*/
-int has_hsw_msrs(unsigned int family, unsigned int model)
+int has_c8910_msrs(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return 0;
switch (model) {
- case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
case INTEL_FAM6_XEON_PHI_KNM:
return INTEL_FAM6_XEON_PHI_KNL;
- case INTEL_FAM6_HASWELL_ULT:
- return INTEL_FAM6_HASWELL_CORE;
-
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
return INTEL_FAM6_BROADWELL_X;
return INTEL_FAM6_SKYLAKE_MOBILE;
case INTEL_FAM6_ICELAKE_MOBILE:
+ case INTEL_FAM6_ICELAKE_NNPI:
return INTEL_FAM6_CANNONLAKE_MOBILE;
+
+ case INTEL_FAM6_ATOM_TREMONT_X:
+ return INTEL_FAM6_ATOM_GOLDMONT_X;
}
return model;
}
genuine_intel = 1;
else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
authentic_amd = 1;
+ else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
+ hygon_genuine = 1;
if (!quiet)
fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
BIC_NOT_PRESENT(BIC_CPU_c7);
BIC_NOT_PRESENT(BIC_Pkgpc7);
}
- if (has_hsw_msrs(family, model)) {
+ if (has_c8910_msrs(family, model)) {
BIC_PRESENT(BIC_Pkgpc8);
BIC_PRESENT(BIC_Pkgpc9);
BIC_PRESENT(BIC_Pkgpc10);
}
- do_irtl_hsw = has_hsw_msrs(family, model);
+ do_irtl_hsw = has_c8910_msrs(family, model);
if (has_skl_msrs(family, model)) {
BIC_PRESENT(BIC_Totl_c0);
BIC_PRESENT(BIC_Any_c0);
void allocate_output_buffer()
{
- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
outp = output_buffer;
if (outp == NULL)
err(-1, "calloc output buffer");
}
void print_version() {
- fprintf(outf, "turbostat version 19.03.20"
+ fprintf(outf, "turbostat version 19.08.31"
" - Len Brown <lenb@kernel.org>\n");
}
endif
x86_energy_perf_policy : x86_energy_perf_policy.c
-override CFLAGS += -Wall -I../../../include
+override CFLAGS += -O2 -Wall -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+override CFLAGS += -D_FORTIFY_SOURCE=2
%: %.c
@mkdir -p $(BUILD_OUTPUT)
Hardware P-States (HWP) are effectively an expansion of hardware
P-state control from the opportunistic turbo-mode P-state range
to include the entire range of available P-states.
-On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP.
+On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP.
That influence was removed in subsequent generations,
where it was moved to the
Energy_Performance_Preference (EPP) field in
progname = argv[0];
- while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
+ while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
long_options, &option_index)) != -1) {
switch (opt) {
case 'a':
if (system("/sbin/modprobe msr > /dev/null 2>&1"))
err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
}
+
+static void get_cpuid_or_exit(unsigned int leaf,
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
+ errx(1, "Processor not supported\n");
+}
+
/*
* early_cpuid()
* initialize turbo_is_enabled, has_hwp, has_epb
*/
void early_cpuid(void)
{
- unsigned int eax, ebx, ecx, edx, max_level;
+ unsigned int eax, ebx, ecx, edx;
unsigned int fms, family, model;
- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
-
- if (max_level < 6)
- errx(1, "Processor not supported\n");
-
- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
if (family == 6 || family == 0xf)
bdx_highest_ratio = msr & 0xFF;
}
- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
turbo_is_enabled = (eax >> 1) & 1;
has_hwp = (eax >> 7) & 1;
has_epb = (ecx >> 3) & 1;
eax = ebx = ecx = edx = 0;
- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+ get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
genuine_intel = 1;
fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
(char *)&ebx, (char *)&edx, (char *)&ecx);
- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
stepping = fms & 0xf;
errx(1, "CPUID: no MSR");
- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
/* turbo_is_enabled already set */
/* has_hwp already set */
has_hwp_notify = eax & (1 << 8);
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
TEST_GEN_FILES = $(BPF_OBJ_FILES)
+BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
+TEST_FILES = $(BTF_C_FILES)
+
# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
# contains both ALU32 and JMP32 instructions.
SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
tcp_client.py \
- tcp_server.py
+ tcp_server.py \
+ test_xdp_vlan.sh
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_IPV6_SIT=m
+CONFIG_BPF_JIT=y
}
snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
+ if (access(test_file, R_OK) == -1)
+ /*
+ * When the test is run with O=, kselftest copies TEST_FILES
+ * without preserving the directory structure.
+ */
+ snprintf(test_file, sizeof(test_file), "%s.c",
+ test_case->name);
/*
* Diff test output and expected test output, contained between
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
#include <bpf/bpf.h>
#include "cgroup_helpers.h"
+#include "bpf_endian.h"
#include "bpf_rlimit.h"
#include "bpf_util.h"
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip6[3])),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x00000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
/* if (ip == expected && port == expected) */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_ip4)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
+ __bpf_constant_ntohl(0x7F000001), 4),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock, src_port)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
.errstr = "loop detected",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
+{
+ "not-taken loop with back jump to 1st insn",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 123),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 123,
+},
+{
+ "taken loop with back jump to 1st insn",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 55,
+},
return ret;
}
+/*
+ * The test creates a cgroups and freezes it. Then it creates a child cgroup
+ * and populates it with a task. After that it checks that the child cgroup
+ * is frozen and the parent cgroup remains frozen too.
+ */
+static int test_cgfreezer_mkdir(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *parent, *child = NULL;
+ int pid;
+
+ parent = cg_name(root, "cg_test_mkdir_A");
+ if (!parent)
+ goto cleanup;
+
+ child = cg_name(parent, "cg_test_mkdir_B");
+ if (!child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_freeze_wait(parent, true))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ pid = cg_run_nowait(child, child_fn, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_wait_for_proc_count(child, 1))
+ goto cleanup;
+
+ if (cg_check_frozen(child, true))
+ goto cleanup;
+
+ if (cg_check_frozen(parent, true))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ free(child);
+ if (parent)
+ cg_destroy(parent);
+ free(parent);
+ return ret;
+}
+
/*
* The test creates two nested cgroups, freezes the parent
* and removes the child. Then it checks that the parent cgroup
T(test_cgfreezer_simple),
T(test_cgfreezer_tree),
T(test_cgfreezer_forkbomb),
+ T(test_cgfreezer_mkdir),
T(test_cgfreezer_rmdir),
T(test_cgfreezer_migrate),
T(test_cgfreezer_ptrace),
struct hv_enlightened_vmcs *current_evmcs;
struct hv_vp_assist_page *current_vp_assist;
+int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
+
static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
{
u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
r);
- r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
- r);
+ if (kvm_check_cap(KVM_CAP_XCRS)) {
+ r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
+ r);
+ }
r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
r);
- r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
- r);
+ if (kvm_check_cap(KVM_CAP_XCRS)) {
+ r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
+ r);
+ }
r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
bool enable_evmcs;
+int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
+{
+ uint16_t evmcs_ver;
+
+ struct kvm_enable_cap enable_evmcs_cap = {
+ .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ .args[0] = (unsigned long)&evmcs_ver
+ };
+
+ vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+ /* KVM should return supported EVMCS version range */
+ TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
+ (evmcs_ver & 0xff) > 0,
+ "Incorrect EVMCS version range: %x:%x\n",
+ evmcs_ver & 0xff, evmcs_ver >> 8);
+
+ return evmcs_ver;
+}
+
/* Allocate memory regions for nested VMX tests.
*
* Input Args:
struct kvm_x86_state *state;
struct ucall uc;
int stage;
- uint16_t evmcs_ver;
- struct kvm_enable_cap enable_evmcs_cap = {
- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
- .args[0] = (unsigned long)&evmcs_ver
- };
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
exit(KSFT_SKIP);
}
- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
-
- /* KVM should return supported EVMCS version range */
- TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
- (evmcs_ver & 0xff) > 0,
- "Incorrect EVMCS version range: %x:%x\n",
- evmcs_ver & 0xff, evmcs_ver >> 8);
+ vcpu_enable_evmcs(vm, VCPU_ID);
run = vcpu_state(vm, VCPU_ID);
kvm_vm_restart(vm, O_RDWR);
vm_vcpu_add(vm, VCPU_ID);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+ vcpu_enable_evmcs(vm, VCPU_ID);
vcpu_load_state(vm, VCPU_ID, state);
run = vcpu_state(vm, VCPU_ID);
free(state);
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
+#include "vmx.h"
#define VCPU_ID 0
{
struct kvm_vm *vm;
int rv;
- uint16_t evmcs_ver;
struct kvm_cpuid2 *hv_cpuid_entries;
- struct kvm_enable_cap enable_evmcs_cap = {
- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
- .args[0] = (unsigned long)&evmcs_ver
- };
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
free(hv_cpuid_entries);
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
-
- if (rv) {
+ if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
fprintf(stderr,
"Enlightened VMCS is unsupported, skip related test\n");
goto vm_free;
}
+ vcpu_enable_evmcs(vm, VCPU_ID);
+
hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
if (!hv_cpuid_entries)
return 1;
msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
- test_msr_platform_info_disabled(vm);
test_msr_platform_info_enabled(vm);
+ test_msr_platform_info_disabled(vm);
vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
kvm_vm_free(vm);
#define VMCS12_REVISION 0x11e57ed0
#define VCPU_ID 5
+bool have_evmcs;
+
void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
{
- volatile struct kvm_run *run;
-
vcpu_nested_state_set(vm, VCPU_ID, state, false);
- run = vcpu_state(vm, VCPU_ID);
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
- "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
}
void test_nested_state_expect_errno(struct kvm_vm *vm,
struct kvm_nested_state *state,
int expected_errno)
{
- volatile struct kvm_run *run;
int rv;
rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
strerror(expected_errno), expected_errno, rv, strerror(errno),
errno);
- run = vcpu_state(vm, VCPU_ID);
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
- "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
}
void test_nested_state_expect_einval(struct kvm_vm *vm,
{
memset(state, 0, size);
state->flags = KVM_STATE_NESTED_GUEST_MODE |
- KVM_STATE_NESTED_RUN_PENDING |
- KVM_STATE_NESTED_EVMCS;
+ KVM_STATE_NESTED_RUN_PENDING;
+ if (have_evmcs)
+ state->flags |= KVM_STATE_NESTED_EVMCS;
state->format = 0;
state->size = size;
state->hdr.vmx.vmxon_pa = 0x1000;
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
* setting the nested state but flags other than eVMCS must be clear.
+ * The eVMCS flag can be set if the enlightened VMCS capability has
+ * been enabled.
*/
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
test_nested_state_expect_einval(vm, state);
- state->flags = KVM_STATE_NESTED_EVMCS;
+ state->flags &= KVM_STATE_NESTED_EVMCS;
+ if (have_evmcs) {
+ test_nested_state_expect_einval(vm, state);
+ vcpu_enable_evmcs(vm, VCPU_ID);
+ }
test_nested_state(vm, state);
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
struct kvm_nested_state state;
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+ have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
+
if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
exit(KSFT_SKIP);
printf " ${out}\n"
printf " Expected:\n"
printf " ${expected}\n\n"
+ else
+ echo " WARNING: Unexpected route entry"
fi
fi
run_cmd "$IP nexthop get id 52"
log_test $? 0 "Get nexthop by id"
- check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1"
+ check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1 scope link"
run_cmd "$IP nexthop del id 52"
log_test $? 0 "Delete nexthop by id"
run_cmd "$IP -6 nexthop add id 85 dev veth1"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 85"
log_test $? 0 "IPv6 route with device only nexthop"
- check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1"
+ check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1 metric 1024 pref medium"
run_cmd "$IP nexthop add id 123 group 81/85"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 123"
log_test $? 0 "IPv6 multipath route with nexthop mix - dev only + gw"
- check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 nexthop via 2001:db8:91::2 dev veth1 nexthop dev veth1"
+ check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 123 metric 1024 nexthop via 2001:db8:91::2 dev veth1 weight 1 nexthop dev veth1 weight 1 pref medium"
#
# IPv6 route with v4 nexthop - not allowed
run_cmd "$IP nexthop get id 12"
log_test $? 0 "Get nexthop by id"
- check_nexthop "id 12" "id 12 via 172.16.1.2 src 172.16.1.1 dev veth1 scope link"
+ check_nexthop "id 12" "id 12 via 172.16.1.2 dev veth1 scope link"
run_cmd "$IP nexthop del id 12"
log_test $? 0 "Delete nexthop by id"
set +e
run_cmd "$IP ro add 172.16.101.1/32 nhid 11"
log_test $? 0 "IPv6 nexthop with IPv4 route"
- check_route "172.16.101.1" "172.16.101.1 nhid 11 via ${lladdr} dev veth1"
+ check_route "172.16.101.1" "172.16.101.1 nhid 11 via inet6 ${lladdr} dev veth1"
set -e
run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
log_test $? 0 "IPv6 nexthop with IPv4 route"
- check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
+ check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
log_test $? 0 "IPv4 route with IPv6 gateway"
- check_route "172.16.101.1" "172.16.101.1 via ${lladdr} dev veth1"
+ check_route "172.16.101.1" "172.16.101.1 via inet6 ${lladdr} dev veth1"
run_cmd "$IP ro replace 172.16.101.1/32 via inet6 2001:db8:50::1 dev veth1"
log_test $? 2 "IPv4 route with invalid IPv6 gateway"
log_test $? 0 "IPv4 route with device only nexthop"
check_route "172.16.101.1" "172.16.101.1 nhid 85 dev veth1"
- run_cmd "$IP nexthop add id 122 group 21/85"
- run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
+ run_cmd "$IP nexthop add id 123 group 21/85"
+ run_cmd "$IP ro replace 172.16.101.1/32 nhid 123"
log_test $? 0 "IPv4 multipath route with nexthop mix - dev only + gw"
- check_route "172.16.101.1" "172.16.101.1 nhid 85 nexthop via 172.16.1.2 dev veth1 nexthop dev veth1"
+ check_route "172.16.101.1" "172.16.101.1 nhid 123 nexthop via 172.16.1.2 dev veth1 weight 1 nexthop dev veth1 weight 1"
#
# IPv4 with IPv6
run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
log_test $? 0 "IPv4 route with mixed v4-v6 multipath route"
- check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
+ check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
log_test $? 0 "IPv6 nexthop with IPv4 route"
ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1"
val=$(ip netns exec "${NETNS}" nstat -az | \
grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}')
- if [ $val -ne 0 ]; then
+ if [ "$val" != 0 ]; then
echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero"
return 1
fi
#
# 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
+
+ # similar to above: add policies (with partially random address), with shrinking prefixes.
+ for p in 29 28 27;do
+ for k in $(seq 1 32); do
+ ip -net $ns xfrm policy add src 10.253.1.$((RANDOM%255))/$p dst 10.254.1.$((RANDOM%255))/$p dir fwd priority $((200+k)) action block 2>/dev/null
+ done
+ done
}
do_esp_policy_get_check() {
ip netns exec nsr1 nft list ruleset
fi
+KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
+KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
+SPI1=$RANDOM
+SPI2=$RANDOM
+
+if [ $SPI1 -eq $SPI2 ]; then
+ SPI2=$((SPI2+1))
+fi
+
+do_esp() {
+ local ns=$1
+ local me=$2
+ local remote=$3
+ local lnet=$4
+ local rnet=$5
+ local spi_out=$6
+ local spi_in=$7
+
+ ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
+ ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
+
+ # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
+ ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow
+ # to fwd decrypted packets after esp processing:
+ ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 1 action allow
+
+}
+
+do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
+
+do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
+
+ip netns exec nsr1 nft delete table ip nat
+
+# restore default routes
+ip -net ns2 route del 192.168.10.1 via 10.0.2.1
+ip -net ns2 route add default via 10.0.2.1
+ip -net ns2 route add default via dead:2::1
+
+test_tcp_forwarding ns1 ns2
+if [ $? -eq 0 ] ;then
+ echo "PASS: ipsec tunnel mode for ns1/ns2"
+else
+ echo "FAIL: ipsec tunnel mode for ns1/ns2"
+ ip netns exec nsr1 nft list ruleset 1>&2
+ ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
+fi
+
exit $ret
pidfd_open_test
+pidfd_poll_test
pidfd_test
+pidfd_wait
# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -g -I../../../../usr/include/ -lpthread
-TEST_GEN_PROGS := pidfd_test pidfd_open_test
+TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait
include ../lib.mk
#include "../kselftest.h"
+#ifndef P_PIDFD
+#define P_PIDFD 3
+#endif
+
+#ifndef CLONE_PIDFD
+#define CLONE_PIDFD 0x00001000
+#endif
+
+#ifndef __NR_pidfd_open
+#define __NR_pidfd_open -1
+#endif
+
+#ifndef __NR_pidfd_send_signal
+#define __NR_pidfd_send_signal -1
+#endif
+
+#ifndef __NR_clone3
+#define __NR_clone3 -1
+#endif
+
/*
* The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
* That means, when it wraps around any pid < 300 will be skipped.
return WEXITSTATUS(status);
}
+static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
+{
+ return syscall(__NR_pidfd_open, pid, flags);
+}
+
+static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
+ unsigned int flags)
+{
+ return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
+}
#endif /* __PIDFD_H */
#include "pidfd.h"
#include "../kselftest.h"
-static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(__NR_pidfd_open, pid, flags);
-}
-
static int safe_int(const char *numstr, int *converted)
{
char *err = NULL;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "pidfd.h"
+#include "../kselftest.h"
+
+static bool timeout;
+
+static void handle_alarm(int sig)
+{
+ timeout = true;
+}
+
+int main(int argc, char **argv)
+{
+ struct pollfd fds;
+ int iter, nevents;
+ int nr_iterations = 10000;
+
+ fds.events = POLLIN;
+
+ if (argc > 2)
+ ksft_exit_fail_msg("Unexpected command line argument\n");
+
+ if (argc == 2) {
+ nr_iterations = atoi(argv[1]);
+ if (nr_iterations <= 0)
+ ksft_exit_fail_msg("invalid input parameter %s\n",
+ argv[1]);
+ }
+
+ ksft_print_msg("running pidfd poll test for %d iterations\n",
+ nr_iterations);
+
+ for (iter = 0; iter < nr_iterations; iter++) {
+ int pidfd;
+ int child_pid = fork();
+
+ if (child_pid < 0) {
+ if (errno == EAGAIN) {
+ iter--;
+ continue;
+ }
+ ksft_exit_fail_msg(
+ "%s - failed to fork a child process\n",
+ strerror(errno));
+ }
+
+ if (child_pid == 0) {
+ /* Child process just sleeps for a min and exits */
+ sleep(60);
+ exit(EXIT_SUCCESS);
+ }
+
+ /* Parent kills the child and waits for its death */
+ pidfd = sys_pidfd_open(child_pid, 0);
+ if (pidfd < 0)
+ ksft_exit_fail_msg("%s - pidfd_open failed\n",
+ strerror(errno));
+
+ /* Setup 3 sec alarm - plenty of time */
+ if (signal(SIGALRM, handle_alarm) == SIG_ERR)
+ ksft_exit_fail_msg("%s - signal failed\n",
+ strerror(errno));
+ alarm(3);
+
+ /* Send SIGKILL to the child */
+ if (sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0))
+ ksft_exit_fail_msg("%s - pidfd_send_signal failed\n",
+ strerror(errno));
+
+ /* Wait for the death notification */
+ fds.fd = pidfd;
+ nevents = poll(&fds, 1, -1);
+
+ /* Check for error conditions */
+ if (nevents < 0)
+ ksft_exit_fail_msg("%s - poll failed\n",
+ strerror(errno));
+
+ if (nevents != 1)
+ ksft_exit_fail_msg("unexpected poll result: %d\n",
+ nevents);
+
+ if (!(fds.revents & POLLIN))
+ ksft_exit_fail_msg(
+ "unexpected event type received: 0x%x\n",
+ fds.revents);
+
+ if (timeout)
+ ksft_exit_fail_msg(
+ "death notification wait timeout\n");
+
+ close(pidfd);
+ /* Wait for child to prevent zombies */
+ if (waitpid(child_pid, NULL, 0) < 0)
+ ksft_exit_fail_msg("%s - waitpid failed\n",
+ strerror(errno));
+
+ }
+
+ ksft_test_result_pass("pidfd poll test: pass\n");
+ return ksft_exit_pass();
+}
#include "pidfd.h"
#include "../kselftest.h"
-#ifndef __NR_pidfd_send_signal
-#define __NR_pidfd_send_signal -1
-#endif
-
#define str(s) _str(s)
#define _str(s) #s
#define CHILD_THREAD_MIN_WAIT 3 /* seconds */
#define MAX_EVENTS 5
-#ifndef CLONE_PIDFD
-#define CLONE_PIDFD 0x00001000
-#endif
-
static pid_t pidfd_clone(int flags, int *pidfd, int (*fn)(void *))
{
size_t stack_size = 1024;
#endif
}
-static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
- unsigned int flags)
-{
- return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
-}
-
static int signal_received;
static void set_signal_received_on_sigusr1(int sig)
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "pidfd.h"
+#include "../kselftest.h"
+
+#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
+
+static pid_t sys_clone3(struct clone_args *args)
+{
+ return syscall(__NR_clone3, args, sizeof(struct clone_args));
+}
+
+static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
+ struct rusage *ru)
+{
+ return syscall(__NR_waitid, which, pid, info, options, ru);
+}
+
+static int test_pidfd_wait_simple(void)
+{
+ const char *test_name = "pidfd wait simple";
+ int pidfd = -1, status = 0;
+ pid_t parent_tid = -1;
+ struct clone_args args = {
+ .parent_tid = ptr_to_u64(&parent_tid),
+ .pidfd = ptr_to_u64(&pidfd),
+ .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
+ .exit_signal = SIGCHLD,
+ };
+ int ret;
+ pid_t pid;
+ siginfo_t info = {
+ .si_signo = 0,
+ };
+
+ pidfd = open("/proc/self", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
+ if (pidfd < 0)
+ ksft_exit_fail_msg("%s test: failed to open /proc/self %s\n",
+ test_name, strerror(errno));
+
+ pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ if (pid == 0)
+ ksft_exit_fail_msg(
+ "%s test: succeeded to wait on invalid pidfd %s\n",
+ test_name, strerror(errno));
+ close(pidfd);
+ pidfd = -1;
+
+ pidfd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+ if (pidfd == 0)
+ ksft_exit_fail_msg("%s test: failed to open /dev/null %s\n",
+ test_name, strerror(errno));
+
+ pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ if (pid == 0)
+ ksft_exit_fail_msg(
+ "%s test: succeeded to wait on invalid pidfd %s\n",
+ test_name, strerror(errno));
+ close(pidfd);
+ pidfd = -1;
+
+ pid = sys_clone3(&args);
+ if (pid < 0)
+ ksft_exit_fail_msg("%s test: failed to create new process %s\n",
+ test_name, strerror(errno));
+
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+
+ pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ if (pid < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to wait on process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ if (!WIFEXITED(info.si_status) || WEXITSTATUS(info.si_status))
+ ksft_exit_fail_msg(
+ "%s test: unexpected status received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+ close(pidfd);
+
+ if (info.si_signo != SIGCHLD)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_signo, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_code != CLD_EXITED)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_code, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_pid != parent_tid)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_pid, parent_tid, pidfd,
+ strerror(errno));
+
+ ksft_test_result_pass("%s test: Passed\n", test_name);
+ return 0;
+}
+
+static int test_pidfd_wait_states(void)
+{
+ const char *test_name = "pidfd wait states";
+ int pidfd = -1, status = 0;
+ pid_t parent_tid = -1;
+ struct clone_args args = {
+ .parent_tid = ptr_to_u64(&parent_tid),
+ .pidfd = ptr_to_u64(&pidfd),
+ .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
+ .exit_signal = SIGCHLD,
+ };
+ int ret;
+ pid_t pid;
+ siginfo_t info = {
+ .si_signo = 0,
+ };
+
+ pid = sys_clone3(&args);
+ if (pid < 0)
+ ksft_exit_fail_msg("%s test: failed to create new process %s\n",
+ test_name, strerror(errno));
+
+ if (pid == 0) {
+ kill(getpid(), SIGSTOP);
+ kill(getpid(), SIGSTOP);
+ exit(EXIT_SUCCESS);
+ }
+
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to wait on WSTOPPED process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ if (info.si_signo != SIGCHLD)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_signo, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_code != CLD_STOPPED)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_code, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_pid != parent_tid)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_pid, parent_tid, pidfd,
+ strerror(errno));
+
+ ret = sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to send signal to process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to wait WCONTINUED on process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ if (info.si_signo != SIGCHLD)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_signo, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_code != CLD_CONTINUED)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_code, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_pid != parent_tid)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_pid, parent_tid, pidfd,
+ strerror(errno));
+
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WUNTRACED, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to wait on WUNTRACED process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ if (info.si_signo != SIGCHLD)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_signo, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_code != CLD_STOPPED)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_code, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_pid != parent_tid)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_pid, parent_tid, pidfd,
+ strerror(errno));
+
+ ret = sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to send SIGKILL to process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: failed to wait on WEXITED process with pid %d and pidfd %d: %s\n",
+ test_name, parent_tid, pidfd, strerror(errno));
+
+ if (info.si_signo != SIGCHLD)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_signo value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_signo, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_code != CLD_KILLED)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_code value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_code, parent_tid, pidfd,
+ strerror(errno));
+
+ if (info.si_pid != parent_tid)
+ ksft_exit_fail_msg(
+ "%s test: unexpected si_pid value %d received after waiting on process with pid %d and pidfd %d: %s\n",
+ test_name, info.si_pid, parent_tid, pidfd,
+ strerror(errno));
+
+ close(pidfd);
+
+ ksft_test_result_pass("%s test: Passed\n", test_name);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ ksft_set_plan(2);
+
+ test_pidfd_wait_simple();
+ test_pidfd_wait_states();
+
+ return ksft_exit_pass();
+}
cmdlist.insert(0, self.args.NAMES['NS'])
cmdlist.insert(0, 'exec')
cmdlist.insert(0, 'netns')
- cmdlist.insert(0, 'ip')
+ cmdlist.insert(0, self.args.NAMES['IP'])
else:
pass
return command
def _ports_create(self):
- cmd = 'ip link add $DEV0 type veth peer name $DEV1'
+ cmd = '$IP link add $DEV0 type veth peer name $DEV1'
self._exec_cmd('pre', cmd)
- cmd = 'ip link set $DEV0 up'
+ cmd = '$IP link set $DEV0 up'
self._exec_cmd('pre', cmd)
if not self.args.namespace:
- cmd = 'ip link set $DEV1 up'
+ cmd = '$IP link set $DEV1 up'
self._exec_cmd('pre', cmd)
def _ports_destroy(self):
- cmd = 'ip link del $DEV0'
+ cmd = '$IP link del $DEV0'
self._exec_cmd('post', cmd)
def _ns_create(self):
'''
self._ports_create()
if self.args.namespace:
- cmd = 'ip netns add {}'.format(self.args.NAMES['NS'])
+ cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
- cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
+ cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
- cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
+ cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
if self.args.device:
- cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
+ cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
- cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
+ cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
self._exec_cmd('pre', cmd)
def _ns_destroy(self):
devices as well)
'''
if self.args.namespace:
- cmd = 'ip netns delete {}'.format(self.args.NAMES['NS'])
+ cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
self._exec_cmd('post', cmd)
def _exec_cmd(self, stage, command):
"teardown": [
"$TC actions flush action skbedit"
]
+ },
+ {
+ "id": "630c",
+ "name": "Add batch of 32 skbedit actions with all parameters and cookie",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "^[ \t]+index [0-9]+ ref",
+ "matchCount": "32",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
+ },
+ {
+ "id": "706d",
+ "name": "Delete batch of 32 skbedit actions with all parameters",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ],
+ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
+ ],
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "^[ \t]+index [0-9]+ ref",
+ "matchCount": "0",
+ "teardown": []
}
]
unsigned int len;
int mask;
+ /* Detect an already handled MMIO return */
+ if (unlikely(!vcpu->mmio_needed))
+ return 0;
+
+ vcpu->mmio_needed = 0;
+
if (!run->mmio.is_write) {
len = run->mmio.len;
if (len > sizeof(unsigned long))
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
+ vcpu->mmio_needed = 1;
if (!ret) {
/* We handled the access successfully in the kernel. */
#include <linux/cpu.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include "vgic.h"
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
kref_init(&irq->refcount);
- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ switch (dist->vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->targets = 0;
irq->group = 0;
- } else {
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->mpidr = 0;
irq->group = 1;
+ break;
+ default:
+ kfree(dist->spis);
+ return -EINVAL;
}
}
return 0;
irq->intid = i;
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
- irq->targets = 1U << vcpu->vcpu_id;
kref_init(&irq->refcount);
if (vgic_irq_is_sgi(i)) {
/* SGIs */
/* PPIs */
irq->config = VGIC_CONFIG_LEVEL;
}
-
- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
- irq->group = 1;
- else
- irq->group = 0;
}
if (!irqchip_in_kernel(vcpu->kvm))
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ switch (dist->vgic_model) {
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1;
- else
+ irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ break;
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->group = 0;
+ irq->targets = 1U << idx;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
}
}
vgic_irq_set_phys_active(irq, true);
}
+static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
+ return (vgic_irq_is_sgi(irq->intid) &&
+ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
+}
+
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ /* GICD_ISPENDR0 SGI bits are WI */
+ if (is_vgic_v2_sgi(vcpu, irq)) {
+ vgic_put_irq(vcpu->kvm, irq);
+ continue;
+ }
+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw)
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ /* GICD_ICPENDR0 SGI bits are WI */
+ if (is_vgic_v2_sgi(vcpu, irq)) {
+ vgic_put_irq(vcpu->kvm, irq);
+ continue;
+ }
+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw)
if (vgic_irq_is_sgi(irq->intid)) {
u32 src = ffs(irq->source);
- BUG_ON(!src);
+ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+ irq->intid))
+ return;
+
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
if (irq->source) {
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
u32 src = ffs(irq->source);
- BUG_ON(!src);
+ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+ irq->intid))
+ return;
+
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
if (irq->source) {
bool penda, pendb;
int ret;
+ /*
+ * list_sort may call this function with the same element when
+ * the list is fairly long.
+ */
+ if (unlikely(irqa == irqb))
+ return 0;
+
raw_spin_lock(&irqa->irq_lock);
raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);